def add_example_layers(state):
    a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
    ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
    a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
    a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
    a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

    b = np.cast[np.uint32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
    b = np.pad(b, 1, 'constant')
    dimensions = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                              units='nm',
                                              scales=[10, 10, 10])

    state.dimensions = dimensions
    state.layers.append(
        name='a',
        layer=neuroglancer.LocalVolume(
            data=a,
            dimensions=neuroglancer.CoordinateSpace(
                names=['c', 'x', 'y', 'z'],
                units=['', 'nm', 'nm', 'nm'],
                scales=[1, 10, 10, 10],
                coordinate_arrays=[
                    neuroglancer.CoordinateArray(labels=['red', 'green', 'blue']), None, None, None
                ]),
            voxel_offset=(0, 20, 30, 15),
        ))
    state.layers.append(
        name='b',
        layer=neuroglancer.LocalVolume(
            data=b,
            dimensions=dimensions,
        ),
    )
    return a, b
예제 #2
0
def glance(viewer, raw, labels=None):
    with viewer.txn() as s:
        s.voxel_size = [600, 600, 600]
        s.layers.append(name='image',
                        layer=neuroglancer.LocalVolume(
                            data=raw,
                            offset=(0, 0, 0),
                            voxel_size=s.voxel_size,
                        ),
                        shader="""
    void main() {
    emitRGB(vec3(toNormalized(getDataValue(0)),
                toNormalized(getDataValue(1)),
                toNormalized(getDataValue(2))));
    }
    """),
        if labels is not None:
            s.layers.append(
                name='labels',
                layer=neuroglancer.LocalVolume(
                    data=labels,
                    offset=(0, 0, 0),
                    voxel_size=s.voxel_size,
                ),
            )
    return viewer.get_viewer_url()
예제 #3
0
def show_skeleton(filepath,
                  name='skeletons',
                  resolution=None,
                  show_ends=False):
    global viewer
    global res
    r = resolution if resolution else res

    vertices = {}
    edges = {}
    if filepath[-3:] == '.h5':
        with h5py.File(filepath, 'r') as hf:
            print(f"Skeleton Ids are: {[int(k)for k in hf.keys()]}")
            for g in hf.keys():
                vertices[g] = np.asarray(
                    hf.get(g)['vertices'])[:, ::-1] * np.array(r)
                if 'edges' in hf.get(g).keys():
                    edges[g] = np.asarray(hf.get(g)['edges'])
                else:
                    edges[g] = np.array([], dtype=np.uint16)
    else:
        with open(filepath, 'rb') as phandle:
            paths = pickle.load(phandle)

        for g, data in paths.items():
            vertices[str(g)] = data['vertices'][:, ::-1] * np.array(r)
            edges[str(g)] = data['edges'] if 'edges' in data.keys(
            ) else np.array([], dtype=np.uint16)

    skeletons = SkeletonSource(vertices, edges)

    if show_ends is True:
        skeletonEndpoints = SkeletonEndPointSource(vertices, edges)

    with viewer.txn() as s:
        s.layers.append(name=name,
                        layer=neuroglancer.SegmentationLayer(
                            source=neuroglancer.LocalVolume(
                                data=np.zeros((1, 1, 1)),
                                voxel_size=r,
                                skeletons=skeletons),
                            skeleton_shader=
                            'void main() { emitRGB(colormapJet(color[0])); }',
                            selected_alpha=0,
                            not_selected_alpha=0,
                        ))

        if show_ends is True:
            s.layers.append(
                name=name + 'Ends',
                layer=neuroglancer.SegmentationLayer(
                    source=neuroglancer.LocalVolume(
                        data=np.zeros((1, 1, 1)),
                        voxel_size=r,
                        skeletons=skeletonEndpoints),
                    skeleton_shader=
                    'void main() { emitRGB(colormapJet(color[0])); }',
                    selected_alpha=0,
                    not_selected_alpha=0,
                ))
예제 #4
0
    def addLayer(self,
                 path,
                 fileType,
                 res=None,
                 isLabel=False,
                 name="Image",
                 verbose=False):
        '''

	Add new visualization layer to the instance

    Args:
        path (string): Path to the file (for loading the file), or variable name to preloaded data
        fileType (string, None): Extension of the file to be loaded (None for preloaded file)
        res (tuple): Override resolution set for the whole object
        name (string): Name to be displayed in the layer tab of neuroglancer.
        verbose (bool): Print additional information about the layer

    Examples:

	>>>  ng.addLayer('./inputs/test/seg.h5', 'h5py', None, True, "Segmentation Map")
	>>>  ng.addLayer('./inputs/train/img.npz', 'npArray', None, False, "Input Image")
	>>>  ng.addLayer(a, None, None, False, name='Image')


        '''

        assert fileType in ['h5py', 'npArray', None]

        if res == None:
            res = self.res
        if verbose:
            print(f"Load {name}")

        if fileType == 'h5py':
            temp = np.array(h5py.File(path, 'r')['main'])
        elif fileType == 'npArray':
            temp = np.load(path)['arr_0']
        elif fileType == None:
            temp = path  #path is the array then

        if isLabel:
            with self.viewer.txn() as s:
                s.layers.append(name=name,
                                layer=neuroglancer.LocalVolume(
                                    data=temp.astype(self.label_dtype),
                                    voxel_size=res))
        else:
            with self.viewer.txn() as s:
                s.layers.append(name=name,
                                layer=neuroglancer.LocalVolume(data=temp,
                                                               voxel_size=res))

        del temp
예제 #5
0
    def useLayer(self, numLayers):
        # Store as ProjectionArray
        self.img2 = ProjectionArray(self.img, numLayers)

        dimensions = neuroglancer.CoordinateSpace(
            names=['z', 'y', 'x'],
            units='nm',
            scales=[10, 10, 10])

        with self.viewer.txn() as s:
            s.dimensions = dimensions
            s.layers.clear()
            s.layers.append(
                name='Original Image',
                layer=neuroglancer.LocalVolume(
                    data=self.img,
                    dimensions=neuroglancer.CoordinateSpace(
                        names=['c^', 'x', 'y', 'z'],
                        units=['', 'nm', 'nm', 'nm'],
                        scales=[1, 10, 10, 10]),
                    voxel_offset=(0, 0, 0, 0),
                ),
                shader='''
            void main() {
            emitRGB(vec3(toNormalized(getDataValue(0)),
                        toNormalized(getDataValue(1)),
                        toNormalized(getDataValue(2))));
            }        
            ''')
            s.layers.append(
                name='Z-Projection Image',
                layer=neuroglancer.LocalVolume(
                    data=self.img2,
                    dimensions=neuroglancer.CoordinateSpace(
                        names=['c^', 'x', 'y', 'z'],
                        units=['', 'nm', 'nm', 'nm'],
                        scales=[1, 10, 10, 10]),
                    voxel_offset=(0, 0, 0, 0),
                ),
                shader='''
            void main() {
            emitRGB(vec3(toNormalized(getDataValue(0)),
                        toNormalized(getDataValue(1)),
                        toNormalized(getDataValue(2))));
            }        
            ''')
            s.layers['Original Image'] = s.layers[0]
            s.layers['Z-Projection Image'] = s.layers[1]
            s.layout = neuroglancer.row_layout([
                    neuroglancer.LayerGroupViewer(layers=['Original Image']),
                    neuroglancer.LayerGroupViewer(layers=['Z-Projection Image']),
            ])
    def _start_flood_fill(self, pos):
        self._stop_flood_fill()
        inf_results = zarr.zeros(self.gt_vol.bounds.to_list()[3:],
                                 chunks=(64, 64, 64),
                                 dtype=np.uint8)
        inf_volume = neuroglancer.LocalVolume(data=inf_results,
                                              dimensions=self.dimensions)

        with self.viewer.txn() as s:
            s.layers['points'] = neuroglancer.LocalAnnotationLayer(
                self.dimensions)
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=inf_volume,
                shader='''
void main() {
  float v = toNormalized(getDataValue(0));
  vec4 rgba = vec4(0,0,0,0);
  if (v != 0.0) {
    rgba = vec4(colormapJet(v), 1.0);
  }
  emitRGBA(rgba);
}
''',
            )
        self.flood_fill_event = threading.Event()
        t = threading.Thread(target=self._do_flood_fill,
                             kwargs=dict(
                                 initial_pos=pos,
                                 inf_results=inf_results,
                                 inf_volume=inf_volume,
                                 event=self.flood_fill_event,
                             ))
        t.daemon = True
        t.start()
예제 #7
0
    def __call__(self, chunks: dict):
        """
        Parameters:
        chunks: multiple chunks 
        """
        ng.set_static_content_source(
            url='https://neuromancer-seung-import.appspot.com')
        ng.set_server_bind_address(bind_port=self.port)
        viewer = ng.Viewer()

        with viewer.txn() as s:
            for chunk_name, chunk in chunks.items():
                global_offset = chunk.global_offset
                chunk = np.ascontiguousarray(chunk)

                s.layers.append(
                    name=chunk_name,
                    layer=ng.LocalVolume(
                        data=chunk,
                        dimensions=neuroglancer.CordinateSpace(
                            scales=[1, *self.voxel_size[::-1]],
                            units=['', 'nm', 'nm', 'nm'],
                            names=['c^', 'x', 'y', 'z']),
                        # offset is in nm, not voxels
                        offset=list(o * v for o, v in zip(
                            global_offset[::-1][-3:], self.voxel_size[::-1]))))
        print('Open this url in browser: ')
        print(viewer)
        input('Press Enter to exit neuroglancer.')
    def add_volume(self, volume, layer_name=None, clear_layer=False):
        if self.viewer is None:
            self.viewer = neuroglancer.Viewer()

        if layer_name is None:
            layer_name = f'{self.layer_type}_{self.scales}'

        source = neuroglancer.LocalVolume(
            data=volume,
            dimensions=neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                                    units='nm',
                                                    scales=self.scales),
            voxel_offset=self.offset)

        if self.layer_type == 'segmentation':
            layer = neuroglancer.SegmentationLayer(source=source)
        else:
            layer = neuroglancer.ImageLayer(source=source)

        with self.viewer.txn() as s:
            if clear_layer:
                s.layers.clear()
            s.layers[layer_name] = layer

        print(f'A new layer named {layer_name} is added to:')
        print(self.viewer)
예제 #9
0
파일: ngutils.py 프로젝트: richardqiu/nuggt
def layer(txn,
          name,
          img,
          shader,
          multiplier,
          offx=0,
          offy=0,
          offz=0,
          voxel_size=default_voxel_size):
    """Add an image layer to Neuroglancer

    :param txn: The transaction context of the viewer

    :param name: The name of the layer as displayed in Neuroglancer

    :param img: The image to display

    :param shader: the shader to use when displaying, e.g. gray_shader

    :param multiplier: the multiplier to apply to the normalized data value.
    This can be used to brighten or dim the image.
    """
    if isinstance(img, str):
        frac = multiplier
        source = img
    else:
        frac = multiplier / np.percentile(img, 99.9)
        if img.dtype.kind in ("i", "u"):
            frac = frac * np.iinfo(img.dtype).max
        source = neuroglancer.LocalVolume(img,
                                          voxel_offset=(offx, offy, offz),
                                          voxel_size=voxel_size)
    txn.layers[name] = neuroglancer.ImageLayer(source=source,
                                               shader=shader % frac)
예제 #10
0
def test_title(webdriver):
    if webdriver.browser == 'firefox':
        pytest.skip('test can hang under firefox')
    a = np.array([[[255]]], dtype=np.uint8)
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(data=a,
                                                                          dimensions=s.dimensions),
                                          ),
        )

    webdriver.sync()

    assert webdriver.driver.title == 'neuroglancer'

    with webdriver.viewer.txn() as s:
        s.title = 'the title'

    webdriver.sync()

    assert webdriver.driver.title == 'the title - neuroglancer'

    with webdriver.viewer.txn() as s:
        s.title = None

    webdriver.sync()

    assert webdriver.driver.title == 'neuroglancer'
예제 #11
0
def seglayer(txn,
             name,
             seg,
             dimensions=None,
             offx=0,
             offy=0,
             offz=0,
             voxel_size=default_voxel_size):
    """Add a segmentation layer

    :param txn: the neuroglancer transaction
    :param name: the display name of the segmentation
    :param seg: the segmentation to display
    """
    if isinstance(seg, str):
        source = seg

    else:
        if dimensions is None:
            dim_names = ["xyzct"[d] for d in range(seg.ndim)]
            dim_units = ["µm"] * seg.ndim
            dim_scales = [1.0] * seg.ndim

            dimensions = neuroglancer.CoordinateSpace(names=dim_names,
                                                      units=dim_units,
                                                      scales=dim_scales)

        source = neuroglancer.LocalVolume(data=reverse_dimensions(
            seg.astype(np.uint16)),
                                          dimensions=dimensions,
                                          voxel_offset=(offx, offy, offz))

    txn.layers[name] = neuroglancer.SegmentationLayer(source=source)
예제 #12
0
파일: align.py 프로젝트: richardqiu/nuggt
 def refresh(self):
     """Refresh both views"""
     with self.moving_viewer.txn() as s:
         s.voxel_size = self.moving_voxel_size
         layer(s,
               self.IMAGE,
               self.moving_image,
               gray_shader,
               self.moving_brightness,
               voxel_size=s.voxel_size)
     with self.reference_viewer.txn() as s:
         s.voxel_size = self.reference_voxel_size
         layer(s,
               self.REFERENCE,
               self.reference_image,
               red_shader,
               self.reference_brightness,
               voxel_size=s.voxel_size)
         layer(s,
               self.ALIGNMENT,
               self.alignment_image,
               green_shader,
               self.moving_brightness,
               voxel_size=s.voxel_size)
         if self.segmentation is not None:
             s.layers[self.SEGMENTATION] = neuroglancer.SegmentationLayer(
                 source=neuroglancer.LocalVolume(self.segmentation,
                                                 voxel_size=s.voxel_size))
예제 #13
0
def show_grad_as_color(path, name, bounds=None, resolution=None):
    global viewer
    global res

    if resolution is None:
        r = res
    else:
        r = resolution
    print('Loading: ' + path)
    print('as: ' + name)
    with h5py.File(path, 'r') as hf:
        hf_keys = hf.keys()
        print(list(hf_keys))
        for key in list(hf_keys):

            if bounds is not None:
                data = np.array(hf[key][bounds[0][0]:bounds[0][1],
                                        bounds[1][0]:bounds[1][1],
                                        bounds[2][0]:bounds[2][1]])
            else:
                data = np.array(hf[key])

            data = grad_to_RGB(data)

            with viewer.txn() as s:
                s.layers[name] = neuroglancer.ImageLayer(
                    source=neuroglancer.LocalVolume(data, voxel_size=res),
                    shader="""void main()
                                                                        { 
                                                                            emitRGB(vec3(toNormalized(getDataValue(0)), 
                                                                            toNormalized(getDataValue(1)), 
                                                                            toNormalized(getDataValue(2)))); 
                                                                        }""",
                )
예제 #14
0
    def add_volume(self, colored=True):
        volume_filepath = os.path.join(self.local_volume_fp_root, self.stack,
                                       'human_annotation/solid_volume_5um')
        if colored:
            volume_fn = 'volume_colored.npy'
            color_segments = []
            for i in range(1, 50):
                color_segments.append(i)
        else:
            volume_fn = 'volume.npy'

        xy_ng_resolution_um = 5
        volume_data = np.load(os.path.join(volume_filepath, volume_fn))
        # deprecated voxel_size = [xy_ng_resolution_um * 1000, xy_ng_resolution_um * 1000, 20000],  # X Y Z
        dims = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                            units=['nm', 'nm', 'nm'],
                                            scales=[10, 10, 10])

        with self.viewer.txn() as s:
            s.layers[self.stack +
                     "_Annotations"] = neuroglancer.SegmentationLayer(
                         source=neuroglancer.LocalVolume(
                             data=volume_data,  # Z,Y,X
                             dimensions=dims,
                             voxel_offset=[0, 0, 0]  # X Y Z
                         ),
                         segments=color_segments)
예제 #15
0
def test_segment_colors(webdriver):
    a = np.array([[[42]]], dtype=np.uint8)
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=neuroglancer.SegmentationLayer(
                source=neuroglancer.LocalVolume(data=a,
                                                dimensions=s.dimensions),
                segment_colors={42: '#f00'},
            ),
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False
        assert list(s.layers[0].segment_colors.keys()) == [42]
        assert s.layers[0].segment_colors[42] == '#f00'
    screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
    assert screenshot_response.viewer_state.layers[0].segment_colors[
        42] == '#ff0000'
    screenshot = screenshot_response.screenshot
    np.testing.assert_array_equal(
        screenshot.image_pixels,
        np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
    with webdriver.viewer.txn() as s:
        s.layers[0].segment_colors[42] = '#0f0'
    screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
    assert screenshot_response.viewer_state.layers[0].segment_colors[
        42] == '#00ff00'
    screenshot = screenshot_response.screenshot
    np.testing.assert_array_equal(
        screenshot.image_pixels,
        np.tile(np.array([0, 255, 0, 255], dtype=np.uint8), (10, 10, 1)))
예제 #16
0
def ngLayer(data, res, oo=[0, 0, 0], tt='segmentation'):
    # tt: image or segmentation
    import neuroglancer
    return neuroglancer.LocalVolume(data,
                                    volume_type=tt,
                                    voxel_size=res,
                                    offset=oo)
예제 #17
0
def add_layer(context, array, name, visible=True, **kwargs):
    array_dims = len(array.shape)
    voxel_size = array.voxel_size[-3:]
    attrs = {
        2: {
            "names": ["y", "x"],
            "units": "nm",
            "scales": voxel_size
        },
        3: {
            "names": ["z", "y", "x"],
            "units": "nm",
            "scales": voxel_size
        },
        4: {
            "names": ["c^", "z", "y", "x"],
            "units": ["", "nm", "nm", "nm"],
            "scales": [1, *voxel_size],
        },
    }
    dimensions = neuroglancer.CoordinateSpace(**attrs[array_dims])
    print(array_dims, array.roi.get_offset())
    offset = np.array((0, ) * (array_dims - 3) + array.roi.get_offset()[-3:])
    offset = offset // attrs[array_dims]["scales"]
    # if len(offset) == 2:
    #     offset = (0,) + tuple(offset)

    d = np.asarray(array.data)
    if array.data.dtype == np.dtype(bool):
        array.data = np.array(d, dtype=np.float32)

    channels = ",".join([
        f"toNormalized(getDataValue({i}))" if i < array.shape[0] else "0"
        for i in range(3)
    ])
    shader_4d = ("""
void main() {
  emitRGB(vec3(%s));
}
""" % channels)
    shader_3d = """
void main () {
  emitGrayscale(toNormalized(getDataValue()));
}"""
    print(offset)
    layer = neuroglancer.LocalVolume(data=array.data,
                                     dimensions=dimensions,
                                     voxel_offset=tuple(offset))

    if array.data.dtype == np.dtype(np.uint64):
        context.layers.append(name=name, layer=layer, visible=visible)
    else:
        context.layers.append(
            name=name,
            layer=layer,
            visible=visible,
            shader=shader_4d if array_dims == 4 else shader_3d,
            **kwargs,
        )
예제 #18
0
def add_stack(vol, viewer, name):
    volume_layer = neuroglancer.LocalVolume(
        data=vol,
        dimensions=neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                                units='um',
                                                scales=[10.4, 10.4, 20]),
        voxel_offset=(0, 0, 0))
    with viewer.txn() as s:
        s.layers.append(name=name, layer=volume_layer)
예제 #19
0
    def __init__(self, raw, embedding, mst, classifier):

        self.raw = raw
        self.embedding = embedding
        self.classifier = classifier
        self.mst = mst

        self.points = []

        self.mst_graph = nx.Graph()
        self.mst_graph.add_weighted_edges_from(mst)

        self.threshold = 0.5

        self.raw_dimensions = neuroglancer.CoordinateSpace(
            names=['z', 'y', 'x'],
            units='nm',
            scales=raw.voxel_size)

        self.dimensions = neuroglancer.CoordinateSpace(
            names=['c^', 'z', 'y', 'x'],
            units=[''] + 3*['nm'],
            scales=raw.voxel_size)

        # if len(raw.shape) > 3:
        #     volume_shape = raw.shape[1:]
        # else:
        volume_shape = raw.shape

        print(f"Creating segmentation layer with shape {volume_shape}")
        #self.segmentation = np.arange(np.product(volume_shape),dtype=np.uint32)
        #self.segmentation = self.segmentation.reshape(volume_shape)
        self.segmentation = np.zeros(volume_shape, dtype=np.uint32)
        
        self.segmentation_volume = neuroglancer.LocalVolume(
            data=self.segmentation,
            dimensions=self.raw_dimensions)

        self.viewer = neuroglancer.Viewer()
        self.viewer.actions.add('label_fg', self._label_fg)
        self.viewer.actions.add('label_bg', self._label_bg)
        self.viewer.actions.add('update_seg', self._update_segmentation)

        with self.viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'label_fg'
            s.input_event_bindings.data_view['shift+mousedown1'] = 'label_bg'
            s.input_event_bindings.data_view['keyu'] = 'update_seg'

        with self.viewer.txn() as s:
            
            add_layer(s, self.raw, 'raw')
            add_layer(s, self.embedding, 'embedding')
            s.layers['embedding'].visible = False
            s.layers['points'] = neuroglancer.LocalAnnotationLayer(
                self.dimensions)
            s.layers['segmentation'] = neuroglancer.SegmentationLayer(
                source=self.segmentation_volume)
def test_linked_segmentation_group(webdriver):
    a = np.array([[[42]]], dtype=np.uint8)
    b = np.array([[[43]]], dtype=np.uint8)
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=neuroglancer.SegmentationLayer(
                source=neuroglancer.LocalVolume(data=a,
                                                dimensions=s.dimensions),
                segment_default_color='#f00',
                segments=[43],
            ),
            visible=False,
        )
        s.layers.append(
            name="b",
            layer=neuroglancer.SegmentationLayer(
                source=neuroglancer.LocalVolume(data=b,
                                                dimensions=s.dimensions),
                linked_segmentation_group='a',
            ),
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False
    screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
    assert screenshot_response.viewer_state.layers[
        0].segment_default_color == '#ff0000'
    screenshot = screenshot_response.screenshot
    np.testing.assert_array_equal(
        screenshot.image_pixels,
        np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
    with webdriver.viewer.txn() as s:
        s.layers[1].linked_segmentation_color_group = False
        s.layers[1].segment_default_color = '#0f0'
    screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
    screenshot = screenshot_response.screenshot
    np.testing.assert_array_equal(
        screenshot.image_pixels,
        np.tile(np.array([0, 255, 0, 255], dtype=np.uint8), (10, 10, 1)))
예제 #21
0
def add_match_layers(
    context,
    mst,
    gt,
    label_matchings,
    node_matchings,
    node_labels_mst,
    node_labels_gt,
    name="matching",
):
    dimensions, voxel_offset, voxel_shape = dims_from_guess(mst)
    graph_dims = dimensions_from_graph_guess()
    context.layers.append(
        name="{}".format(name),
        layer=neuroglancer.SegmentationLayer(
            source=[
                neuroglancer.LocalVolume(
                    data=np.ones(voxel_shape, dtype=np.uint32),
                    dimensions=dimensions,
                    voxel_offset=voxel_offset,
                ),
                MatchSource(
                    mst,
                    gt,
                    graph_dims,
                    label_matchings,
                    node_matchings,
                    node_labels_mst,
                    node_labels_gt,
                ),
            ],
            skeleton_shader="""
// coloring options:
// source
// distance
// matching
// false_match
// merge
// split

void main() {
  if (error_vis_edge < 0.4) discard;
  emitRGB(vec3(error_vis));
  
  //if (source_edge < 0.5) discard;
  //emitRGB(vec3(matching));

  //if (abs(source_edge-1.0) > 0.5) discard;
  //emitRGB(colormapJet(distance));
}
""",
            selected_alpha=0,
            not_selected_alpha=0,
        ),
    )
예제 #22
0
def add_layer(context, array, name, visible=True, **kwargs):
    array_dims = len(array.shape)
    voxel_size = array.voxel_size
    attrs = {
        3: {"names": ["z", "y", "x"], "units": "nm", "scales": voxel_size},
        4: {
            "names": ["c^", "z", "y", "x"],
            "units": ["", "nm", "nm", "nm"],
            "scales": [1, *voxel_size],
        },
    }
    dimensions = neuroglancer.CoordinateSpace(**attrs[array_dims])
    offset = np.array((0,) * (array_dims - 3) + array.roi.get_offset())
    offset = offset // attrs[array_dims]["scales"]

    if len(array.shape) > 3 and array.shape[0] > 3:
        pca = PCA(n_components=3)
        flattened = array.to_ndarray().reshape(array.shape[0], -1).T
        fitted = pca.fit_transform(flattened).T
        array.data = fitted.reshape((3,) + array.shape[1:])

    # d = np.asarray(array.data)
    # if array.data.dtype == np.dtype(bool):
    #     array.data = np.array(d, dtype=np.float32)

    channels = ",".join(
        [
            f"toNormalized(getDataValue({i}))" if i < array.shape[0] else "0"
            for i in range(3)
        ]
    )
    shader_4d = (
        """
void main() {
  emitRGB(vec3(%s));
}
"""
        % channels
    )
    shader_3d = None

    layer = neuroglancer.LocalVolume(
        data=array.data, dimensions=dimensions, voxel_offset=tuple(offset)
    )

    if array.data.dtype == np.dtype(np.uint64):
        context.layers.append(name=name, layer=layer, visible=visible)
    else:
        context.layers.append(
            name=name,
            layer=layer,
            visible=visible,
            shader=shader_4d if array_dims == 4 else shader_3d,
            **kwargs,
        )
예제 #23
0
def add_example_layers(state):
    a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
    ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]],
                             indexing='ij')
    a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
    a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
    a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

    b = np.cast[np.uint32](np.floor(
        np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
    b = np.pad(b, 1, 'constant')
    dimensions = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                              units='nm',
                                              scales=[10, 10, 10])

    state.dimensions = dimensions
    state.layers.append(
        name='a',
        layer=neuroglancer.LocalVolume(
            data=a,
            dimensions=neuroglancer.CoordinateSpace(
                names=['c^', 'x', 'y', 'z'],
                units=['', 'nm', 'nm', 'nm'],
                scales=[1, 10, 10, 10]),
            voxel_offset=(0, 20, 30, 15),
        ),
        shader="""
void main() {
  emitRGB(vec3(toNormalized(getDataValue(0)),
               toNormalized(getDataValue(1)),
               toNormalized(getDataValue(2))));
}
""",
    )
    state.layers.append(
        name='b',
        layer=neuroglancer.LocalVolume(
            data=b,
            dimensions=dimensions,
        ),
    )
    return a, b
예제 #24
0
def show_with_junctions(data_path, name, resolution=None):
    global viewer
    global res
    if resolution is None:
        r = res
    else:
        r = resolution

    h5file = h5py.File(data_path, 'r')
    seg_vol = (np.array(h5file['vol'])).astype(np.uint32)
    seg_sz = seg_vol.shape
    junctions = (np.array(h5file['junctions'])).astype(np.uint32)
    junctions_vol = np.zeros_like(seg_vol, dtype=np.uint32)
    print(junctions)
    ww = (2, 2, 2)
    for j in range(junctions.shape[0]):
        pt = junctions[j]
        junctions_vol[max(0, pt[0] - ww[0]): min(seg_sz[0], pt[0] + ww[0] + 1), \
        max(0, pt[1] - ww[1]): min(seg_sz[1], pt[1] + ww[1] + 1), \
        max(0, pt[2] - ww[2]): min(seg_sz[2], pt[2] + ww[2] + 1)] = 1

    with viewer.txn() as s:
        s.layers.append(name=name,
                        layer=neuroglancer.LocalVolume(
                            data=seg_vol,
                            voxel_size=r,
                        ))

        s.layers.append(name=name + '_junctions',
                        layer=neuroglancer.LocalVolume(
                            data=junctions_vol,
                            voxel_size=r,
                        ))

        s.layers.append(name=name + '_merged',
                        layer=neuroglancer.LocalVolume(
                            data=(seg_vol > 0).astype(np.uint32),
                            voxel_size=r,
                        ))
예제 #25
0
def visualize_trees(graphs: Dict[str, nx.DiGraph]):

    viewer = neuroglancer.Viewer()
    with viewer.txn() as s:
        s.layers["blegh"] = neuroglancer.ImageLayer(
            source=neuroglancer.LocalVolume(data=np.zeros([1, 1, 1]).transpose(
                [2, 1, 0]),
                                            voxel_size=[1, 1, 1]))
        node_id = itertools.count(start=1)
        for name, graph in graphs.items():
            add_trees(s, graph, node_id, name=name, visible=True)
    print(viewer)
    input("Hit ENTER to quit!")
def setup_viewer(viewer, dtype, value, layer_type):
    a = np.array([[[value]]], dtype=dtype)
    with viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=layer_type(source=neuroglancer.LocalVolume(
                data=a, dimensions=s.dimensions), ),
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False
예제 #27
0
def show(path,
         name,
         bounds=None,
         is_image=False,
         resolution=None,
         normalize=False,
         bb=False):
    global viewer
    global res
    if resolution is None:
        r = res
    else:
        r = resolution
    print('Loading: ' + path)
    print('as: ' + name)
    hf = h5py.File(path, 'r')
    hf_keys = hf.keys()
    print(list(hf_keys))
    for key in list(hf_keys):

        if bounds is not None:
            data = np.array(hf[key][bounds[0][0]:bounds[0][1],
                                    bounds[1][0]:bounds[1][1],
                                    bounds[2][0]:bounds[2][1]])
        else:
            data = np.array(hf[key])

        if not is_image:
            volume_type = 'segmentation'
        else:
            if normalize:
                data = (data - data.min()) / (
                    (data.max() - data.min()) + np.finfo(np.float32).eps)
            volume_type = 'image'
            # if(isinstance(data, np.floating)):
            #     data = (data*255).astype(np.uint8)
            # else:
            #     data = data.astype(np.uint8)

        with viewer.txn() as s:
            s.layers.append(name=name,
                            layer=neuroglancer.LocalVolume(
                                data=data,
                                voxel_size=r,
                                volume_type=volume_type))

        if bb:
            show_bbox([0, 0, 0], data.shape, name)
def test_mesh_silhouette(webdriver):
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name='a',
            layer=neuroglancer.SegmentationLayer(
                source=neuroglancer.LocalVolume(data=np.zeros((10, 10, 10),
                                                              dtype=np.uint8),
                                                dimensions=s.dimensions),
                mesh_silhouette_rendering=2),
        )

    state = webdriver.sync()
    assert state.layers['a'].mesh_silhouette_rendering == 2
def add_example_layer(state):
    ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in [100, 100, 100]], indexing='ij')
    b = np.cast[np.int32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10)) - 2
    b = np.pad(b, 1, 'constant')
    dimensions = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                              units='nm',
                                              scales=[10, 10, 10])

    state.dimensions = dimensions
    state.layers.append(
        name='b',
        layer=neuroglancer.SegmentationLayer(source=neuroglancer.LocalVolume(
            data=b,
            dimensions=dimensions,
        )),
    )
    return b
예제 #30
0
    def __init__(self):
        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('inference', self._do_inference)
        self.gt_vol = cloudvolume.CloudVolume(
            'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
            mip=0,
            bounded=True,
            progress=False,
            provenance={})
        self.dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=self.gt_vol.resolution,
        )
        self.inf_results = zarr.zeros(self.gt_vol.bounds.to_list()[3:],
                                      chunks=(64, 64, 64),
                                      dtype=np.uint8)
        self.inf_volume = neuroglancer.LocalVolume(data=self.inf_results,
                                                   dimensions=self.dimensions)
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'inference'

        with viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['ground_truth'].visible = False
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=self.inf_volume,
                shader='''
void main() {
  float v = toNormalized(getDataValue(0));
  vec4 rgba = vec4(0,0,0,0);
  if (v != 0.0) {
    rgba = vec4(colormapJet(v), 1.0);
  }
  emitRGBA(rgba);
}
''',
            )