def add_example_layers(state):
    a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
    ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
    a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
    a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
    a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

    b = np.cast[np.uint32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
    b = np.pad(b, 1, 'constant')
    dimensions = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                              units='nm',
                                              scales=[10, 10, 10])

    state.dimensions = dimensions
    state.layers.append(
        name='a',
        layer=neuroglancer.LocalVolume(
            data=a,
            dimensions=neuroglancer.CoordinateSpace(
                names=['c', 'x', 'y', 'z'],
                units=['', 'nm', 'nm', 'nm'],
                scales=[1, 10, 10, 10],
                coordinate_arrays=[
                    neuroglancer.CoordinateArray(labels=['red', 'green', 'blue']), None, None, None
                ]),
            voxel_offset=(0, 20, 30, 15),
        ))
    state.layers.append(
        name='b',
        layer=neuroglancer.LocalVolume(
            data=b,
            dimensions=dimensions,
        ),
    )
    return a, b
コード例 #2
0
ファイル: align.py プロジェクト: healthonrails/nuggt
 def refresh(self):
     """Refresh both views"""
     with self.moving_viewer.txn() as s:
         s.dimensions = neuroglancer.CoordinateSpace(
             names=["x", "y", "z"],
             units=["µm"],
             scales=self.moving_voxel_size)
         layer(s,
               self.IMAGE,
               self.moving_image,
               gray_shader,
               self.moving_brightness,
               voxel_size=self.moving_voxel_size)
     with self.reference_viewer.txn() as s:
         s.dimensions = neuroglancer.CoordinateSpace(
             names=["x", "y", "z"],
             units=["µm"],
             scales=self.reference_voxel_size)
         layer(s,
               self.REFERENCE,
               self.reference_image,
               red_shader,
               self.reference_brightness,
               voxel_size=self.reference_voxel_size)
         layer(s,
               self.ALIGNMENT,
               self.alignment_image,
               green_shader,
               self.moving_brightness,
               voxel_size=self.moving_voxel_size)
         if self.segmentation is not None:
             seglayer(s, self.SEGMENTATION, self.segmentation)
コード例 #3
0
ファイル: segment_interactive.py プロジェクト: funkelab/lisl
    def __init__(self, raw, embedding, mst, classifier):

        self.raw = raw
        self.embedding = embedding
        self.classifier = classifier
        self.mst = mst

        self.points = []

        self.mst_graph = nx.Graph()
        self.mst_graph.add_weighted_edges_from(mst)

        self.threshold = 0.5

        self.raw_dimensions = neuroglancer.CoordinateSpace(
            names=['z', 'y', 'x'],
            units='nm',
            scales=raw.voxel_size)

        self.dimensions = neuroglancer.CoordinateSpace(
            names=['c^', 'z', 'y', 'x'],
            units=[''] + 3*['nm'],
            scales=raw.voxel_size)

        # if len(raw.shape) > 3:
        #     volume_shape = raw.shape[1:]
        # else:
        volume_shape = raw.shape

        print(f"Creating segmentation layer with shape {volume_shape}")
        #self.segmentation = np.arange(np.product(volume_shape),dtype=np.uint32)
        #self.segmentation = self.segmentation.reshape(volume_shape)
        self.segmentation = np.zeros(volume_shape, dtype=np.uint32)
        
        self.segmentation_volume = neuroglancer.LocalVolume(
            data=self.segmentation,
            dimensions=self.raw_dimensions)

        self.viewer = neuroglancer.Viewer()
        self.viewer.actions.add('label_fg', self._label_fg)
        self.viewer.actions.add('label_bg', self._label_bg)
        self.viewer.actions.add('update_seg', self._update_segmentation)

        with self.viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'label_fg'
            s.input_event_bindings.data_view['shift+mousedown1'] = 'label_bg'
            s.input_event_bindings.data_view['keyu'] = 'update_seg'

        with self.viewer.txn() as s:
            
            add_layer(s, self.raw, 'raw')
            add_layer(s, self.embedding, 'embedding')
            s.layers['embedding'].visible = False
            s.layers['points'] = neuroglancer.LocalAnnotationLayer(
                self.dimensions)
            s.layers['segmentation'] = neuroglancer.SegmentationLayer(
                source=self.segmentation_volume)
コード例 #4
0
    def useLayer(self, numLayers):
        # Store as ProjectionArray
        self.img2 = ProjectionArray(self.img, numLayers)

        dimensions = neuroglancer.CoordinateSpace(
            names=['z', 'y', 'x'],
            units='nm',
            scales=[10, 10, 10])

        with self.viewer.txn() as s:
            s.dimensions = dimensions
            s.layers.clear()
            s.layers.append(
                name='Original Image',
                layer=neuroglancer.LocalVolume(
                    data=self.img,
                    dimensions=neuroglancer.CoordinateSpace(
                        names=['c^', 'x', 'y', 'z'],
                        units=['', 'nm', 'nm', 'nm'],
                        scales=[1, 10, 10, 10]),
                    voxel_offset=(0, 0, 0, 0),
                ),
                shader='''
            void main() {
            emitRGB(vec3(toNormalized(getDataValue(0)),
                        toNormalized(getDataValue(1)),
                        toNormalized(getDataValue(2))));
            }        
            ''')
            s.layers.append(
                name='Z-Projection Image',
                layer=neuroglancer.LocalVolume(
                    data=self.img2,
                    dimensions=neuroglancer.CoordinateSpace(
                        names=['c^', 'x', 'y', 'z'],
                        units=['', 'nm', 'nm', 'nm'],
                        scales=[1, 10, 10, 10]),
                    voxel_offset=(0, 0, 0, 0),
                ),
                shader='''
            void main() {
            emitRGB(vec3(toNormalized(getDataValue(0)),
                        toNormalized(getDataValue(1)),
                        toNormalized(getDataValue(2))));
            }        
            ''')
            s.layers['Original Image'] = s.layers[0]
            s.layers['Z-Projection Image'] = s.layers[1]
            s.layout = neuroglancer.row_layout([
                    neuroglancer.LayerGroupViewer(layers=['Original Image']),
                    neuroglancer.LayerGroupViewer(layers=['Z-Projection Image']),
            ])
コード例 #5
0
def test_segment_colors(webdriver):
    a = np.array([[[42]]], dtype=np.uint8)
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=neuroglancer.SegmentationLayer(
                source=neuroglancer.LocalVolume(data=a,
                                                dimensions=s.dimensions),
                segment_colors={42: '#f00'},
            ),
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False
        assert list(s.layers[0].segment_colors.keys()) == [42]
        assert s.layers[0].segment_colors[42] == '#f00'
    screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
    assert screenshot_response.viewer_state.layers[0].segment_colors[
        42] == '#ff0000'
    screenshot = screenshot_response.screenshot
    np.testing.assert_array_equal(
        screenshot.image_pixels,
        np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
    with webdriver.viewer.txn() as s:
        s.layers[0].segment_colors[42] = '#0f0'
    screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
    assert screenshot_response.viewer_state.layers[0].segment_colors[
        42] == '#00ff00'
    screenshot = screenshot_response.screenshot
    np.testing.assert_array_equal(
        screenshot.image_pixels,
        np.tile(np.array([0, 255, 0, 255], dtype=np.uint8), (10, 10, 1)))
コード例 #6
0
    def test_create_nghandledimensionslayer(self):
        """Check if the dimensions are handled."""
        dimensions = ng.CoordinateSpace(names=['x', 'y', 'z'], units='um', scales=[1, 1, 1])

        dimensions2 = _handle_ngdimensions(layer_kws={'dimensions': dimensions})

        assert dimensions2 == dimensions
コード例 #7
0
    def __init__(self):
        viewer = self.viewer = neuroglancer.Viewer()
        self.gt_vol = cloudvolume.CloudVolume(
            'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
            mip=0,
            bounded=True,
            progress=False,
            provenance={})
        viewer.actions.add('start-fill', self._start_fill_action)
        viewer.actions.add('stop-fill', self._stop_fill_action)
        self.dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=[8, 8, 8],
        )
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'start-fill'
            s.input_event_bindings.data_view['keyt'] = 'stop-fill'

        with viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['ground_truth'].visible = False
            self.flood_fill_event = None
コード例 #8
0
def test_title(webdriver):
    if webdriver.browser == 'firefox':
        pytest.skip('test can hang under firefox')
    a = np.array([[[255]]], dtype=np.uint8)
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(data=a,
                                                                          dimensions=s.dimensions),
                                          ),
        )

    webdriver.sync()

    assert webdriver.driver.title == 'neuroglancer'

    with webdriver.viewer.txn() as s:
        s.title = 'the title'

    webdriver.sync()

    assert webdriver.driver.title == 'the title - neuroglancer'

    with webdriver.viewer.txn() as s:
        s.title = None

    webdriver.sync()

    assert webdriver.driver.title == 'neuroglancer'
コード例 #9
0
    def add_volume(self, volume, layer_name=None, clear_layer=False):
        if self.viewer is None:
            self.viewer = neuroglancer.Viewer()

        if layer_name is None:
            layer_name = f'{self.layer_type}_{self.scales}'

        source = neuroglancer.LocalVolume(
            data=volume,
            dimensions=neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                                    units='nm',
                                                    scales=self.scales),
            voxel_offset=self.offset)

        if self.layer_type == 'segmentation':
            layer = neuroglancer.SegmentationLayer(source=source)
        else:
            layer = neuroglancer.ImageLayer(source=source)

        with self.viewer.txn() as s:
            if clear_layer:
                s.layers.clear()
            s.layers[layer_name] = layer

        print(f'A new layer named {layer_name} is added to:')
        print(self.viewer)
コード例 #10
0
ファイル: layers.py プロジェクト: VarunSane97/pyroglancer
def _handle_ngdimensions(layer_kws):
    """Return the dimensions of different neuroglancer spaces."""
    # return dimensions either based on already set ngspace or a string 'ngspace'.
    dimensions = None
    if 'ngspace' in sys.modules:
        layer_kws['ngspace'] = sys.modules['ngspace']

    if 'dimensions' in layer_kws:
        dimensions = layer_kws['dimensions']
    if 'ngspace' in layer_kws:
        ngspace = _get_ngspace(layer_kws)

        print('Dimensions are in :', ngspace['ngspace'])
        dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units=ngspace['dimension']['units'],
            scales=[
                ngspace['dimension']['x'], ngspace['dimension']['y'],
                ngspace['dimension']['z']
            ])

    if dimensions is None:
        raise ValueError(
            "dimensions is not set already: either use 'ngspace' or 'dimensions'"
        )

    return dimensions
コード例 #11
0
def seglayer(txn,
             name,
             seg,
             dimensions=None,
             offx=0,
             offy=0,
             offz=0,
             voxel_size=default_voxel_size):
    """Add a segmentation layer

    :param txn: the neuroglancer transaction
    :param name: the display name of the segmentation
    :param seg: the segmentation to display
    """
    if isinstance(seg, str):
        source = seg

    else:
        if dimensions is None:
            dim_names = ["xyzct"[d] for d in range(seg.ndim)]
            dim_units = ["µm"] * seg.ndim
            dim_scales = [1.0] * seg.ndim

            dimensions = neuroglancer.CoordinateSpace(names=dim_names,
                                                      units=dim_units,
                                                      scales=dim_scales)

        source = neuroglancer.LocalVolume(data=reverse_dimensions(
            seg.astype(np.uint16)),
                                          dimensions=dimensions,
                                          voxel_offset=(offx, offy, offz))

    txn.layers[name] = neuroglancer.SegmentationLayer(source=source)
コード例 #12
0
    def add_volume(self, colored=True):
        volume_filepath = os.path.join(self.local_volume_fp_root, self.stack,
                                       'human_annotation/solid_volume_5um')
        if colored:
            volume_fn = 'volume_colored.npy'
            color_segments = []
            for i in range(1, 50):
                color_segments.append(i)
        else:
            volume_fn = 'volume.npy'

        xy_ng_resolution_um = 5
        volume_data = np.load(os.path.join(volume_filepath, volume_fn))
        # deprecated voxel_size = [xy_ng_resolution_um * 1000, xy_ng_resolution_um * 1000, 20000],  # X Y Z
        dims = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                            units=['nm', 'nm', 'nm'],
                                            scales=[10, 10, 10])

        with self.viewer.txn() as s:
            s.layers[self.stack +
                     "_Annotations"] = neuroglancer.SegmentationLayer(
                         source=neuroglancer.LocalVolume(
                             data=volume_data,  # Z,Y,X
                             dimensions=dims,
                             voxel_offset=[0, 0, 0]  # X Y Z
                         ),
                         segments=color_segments)
コード例 #13
0
def add_layer(context, array, name, visible=True, **kwargs):
    array_dims = len(array.shape)
    voxel_size = array.voxel_size[-3:]
    attrs = {
        2: {
            "names": ["y", "x"],
            "units": "nm",
            "scales": voxel_size
        },
        3: {
            "names": ["z", "y", "x"],
            "units": "nm",
            "scales": voxel_size
        },
        4: {
            "names": ["c^", "z", "y", "x"],
            "units": ["", "nm", "nm", "nm"],
            "scales": [1, *voxel_size],
        },
    }
    dimensions = neuroglancer.CoordinateSpace(**attrs[array_dims])
    print(array_dims, array.roi.get_offset())
    offset = np.array((0, ) * (array_dims - 3) + array.roi.get_offset()[-3:])
    offset = offset // attrs[array_dims]["scales"]
    # if len(offset) == 2:
    #     offset = (0,) + tuple(offset)

    d = np.asarray(array.data)
    if array.data.dtype == np.dtype(bool):
        array.data = np.array(d, dtype=np.float32)

    channels = ",".join([
        f"toNormalized(getDataValue({i}))" if i < array.shape[0] else "0"
        for i in range(3)
    ])
    shader_4d = ("""
void main() {
  emitRGB(vec3(%s));
}
""" % channels)
    shader_3d = """
void main () {
  emitGrayscale(toNormalized(getDataValue()));
}"""
    print(offset)
    layer = neuroglancer.LocalVolume(data=array.data,
                                     dimensions=dimensions,
                                     voxel_offset=tuple(offset))

    if array.data.dtype == np.dtype(np.uint64):
        context.layers.append(name=name, layer=layer, visible=visible)
    else:
        context.layers.append(
            name=name,
            layer=layer,
            visible=visible,
            shader=shader_4d if array_dims == 4 else shader_3d,
            **kwargs,
        )
コード例 #14
0
ファイル: neuroglancer2.py プロジェクト: pattonw/neurolight
def dimensions_from_volume(array):
    voxel_size = array.voxel_size
    spatial_dims = 3
    attrs = {"names": ["z", "y", "x"], "units": "nm", "scales": voxel_size}
    dimensions = neuroglancer.CoordinateSpace(**attrs)
    offset = np.array(array.roi.get_offset())
    offset = offset // attrs["scales"]
    return dimensions, offset, array.shape[-spatial_dims:]
コード例 #15
0
 def __init__(self, source, units='nm', scales=[1, 1, 1], **kwargs):
     self.dimensions = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                                    units=units,
                                                    scales=scales)
     props = copy.deepcopy(self.DEFAULTS)
     props['source'] = LocalMeshSource(source, self.dimensions, **kwargs)
     props.update(**kwargs)
     super().__init__(**props)
コード例 #16
0
    def __init__(self, synapse_path, top_method='min', num_top_partners=10):
        with open(synapse_path, 'r') as f:
            synapse_data = json.load(f)['data']
        self.synapses_by_id, self.synapse_partner_counts = get_synapses_by_id(
            synapse_data)
        self.top_method = top_method
        self.num_top_partners = num_top_partners

        dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=[8, 8, 8],
        )

        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('select-custom', self._handle_select)
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['dblclick0'] = 'select-custom'
        with viewer.txn() as s:
            s.projection_orientation = [
                0.63240087, 0.01582051, 0.05692779, 0.77238464
            ]
            s.dimensions = dimensions
            s.position = [3000, 3000, 3000]
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['partners'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['synapses'] = neuroglancer.LocalAnnotationLayer(
                dimensions=dimensions,
                linked_segmentation_layer='ground_truth')
            s.layout = neuroglancer.row_layout([
                neuroglancer.LayerGroupViewer(
                    layout='xy',
                    layers=['image', 'ground_truth', 'partners', 'synapses'],
                ),
                neuroglancer.LayerGroupViewer(
                    layout='3d',
                    layers=['ground_truth', 'synapses'],
                ),
                neuroglancer.LayerGroupViewer(
                    layout='3d',
                    layers=['partners', 'synapses'],
                ),
            ])

        self.selected_segments = frozenset()
        self.viewer.shared_state.add_changed_callback(
            lambda: self.viewer.defer_callback(self.on_state_changed))
コード例 #17
0
def add_stack(vol, viewer, name):
    volume_layer = neuroglancer.LocalVolume(
        data=vol,
        dimensions=neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                                units='um',
                                                scales=[10.4, 10.4, 20]),
        voxel_offset=(0, 0, 0))
    with viewer.txn() as s:
        s.layers.append(name=name, layer=volume_layer)
コード例 #18
0
ファイル: neuroglancer2.py プロジェクト: pattonw/neurolight
def add_layer(context, array, name, visible=True, **kwargs):
    array_dims = len(array.shape)
    voxel_size = array.voxel_size
    attrs = {
        3: {"names": ["z", "y", "x"], "units": "nm", "scales": voxel_size},
        4: {
            "names": ["c^", "z", "y", "x"],
            "units": ["", "nm", "nm", "nm"],
            "scales": [1, *voxel_size],
        },
    }
    dimensions = neuroglancer.CoordinateSpace(**attrs[array_dims])
    offset = np.array((0,) * (array_dims - 3) + array.roi.get_offset())
    offset = offset // attrs[array_dims]["scales"]

    if len(array.shape) > 3 and array.shape[0] > 3:
        pca = PCA(n_components=3)
        flattened = array.to_ndarray().reshape(array.shape[0], -1).T
        fitted = pca.fit_transform(flattened).T
        array.data = fitted.reshape((3,) + array.shape[1:])

    # d = np.asarray(array.data)
    # if array.data.dtype == np.dtype(bool):
    #     array.data = np.array(d, dtype=np.float32)

    channels = ",".join(
        [
            f"toNormalized(getDataValue({i}))" if i < array.shape[0] else "0"
            for i in range(3)
        ]
    )
    shader_4d = (
        """
void main() {
  emitRGB(vec3(%s));
}
"""
        % channels
    )
    shader_3d = None

    layer = neuroglancer.LocalVolume(
        data=array.data, dimensions=dimensions, voxel_offset=tuple(offset)
    )

    if array.data.dtype == np.dtype(np.uint64):
        context.layers.append(name=name, layer=layer, visible=visible)
    else:
        context.layers.append(
            name=name,
            layer=layer,
            visible=visible,
            shader=shader_4d if array_dims == 4 else shader_3d,
            **kwargs,
        )
コード例 #19
0
def add_example_layers(state):
    a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
    ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]],
                             indexing='ij')
    a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
    a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
    a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

    b = np.cast[np.uint32](np.floor(
        np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
    b = np.pad(b, 1, 'constant')
    dimensions = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                              units='nm',
                                              scales=[10, 10, 10])

    state.dimensions = dimensions
    state.layers.append(
        name='a',
        layer=neuroglancer.LocalVolume(
            data=a,
            dimensions=neuroglancer.CoordinateSpace(
                names=['c^', 'x', 'y', 'z'],
                units=['', 'nm', 'nm', 'nm'],
                scales=[1, 10, 10, 10]),
            voxel_offset=(0, 20, 30, 15),
        ),
        shader="""
void main() {
  emitRGB(vec3(toNormalized(getDataValue(0)),
               toNormalized(getDataValue(1)),
               toNormalized(getDataValue(2))));
}
""",
    )
    state.layers.append(
        name='b',
        layer=neuroglancer.LocalVolume(
            data=b,
            dimensions=dimensions,
        ),
    )
    return a, b
コード例 #20
0
def setup_viewer(viewer, dtype, value, layer_type):
    a = np.array([[[value]]], dtype=dtype)
    with viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=layer_type(source=neuroglancer.LocalVolume(
                data=a, dimensions=s.dimensions), ),
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False
コード例 #21
0
    def _append_synapse_annotation_layer(
            self, viewer_state: ng.viewer_state.ViewerState, name: str,
            synapses: Synapses):
        annotations = []

        pre_synapses = synapses.pre_with_physical_coordinate
        self._append_point_annotation_layer(viewer_state, name + '_pre',
                                            pre_synapses)

        post_synapses = synapses.post_with_physical_coordinate
        if post_synapses is not None:
            for post_idx in range(post_synapses.shape[0]):
                pre_idx = post_synapses[post_idx, 0]
                pre_coordinate = pre_synapses[pre_idx, :]
                post_coordinate = post_synapses[post_idx, 1:]
                post_annotation = ng.LineAnnotation(
                    id=str(post_idx),
                    # note that the synapse coordinate is already in xyz order
                    # so we do not need to reverse it!
                    pointA=pre_coordinate,
                    pointB=post_coordinate,
                    props=['#0ff', 5])
                annotations.append(post_annotation)

        viewer_state.layers.append(
            name=name,
            layer=ng.LocalAnnotationLayer(
                dimensions=ng.CoordinateSpace(names=['z', 'y', 'x'],
                                              units="nm",
                                              scales=(1, 1, 1)),
                annotation_properties=[
                    ng.AnnotationPropertySpec(
                        id='color',
                        type='rgb',
                        default='red',
                    ),
                    ng.AnnotationPropertySpec(id='size',
                                              type='float32',
                                              default=5)
                ],
                annotations=annotations,
                shader='''
void main() {
  setColor(prop_color());
  setPointMarkerSize(prop_size());
}
''',
            ),
        )
コード例 #22
0
ファイル: neuroglancer2.py プロジェクト: pattonw/neurolight
def dimensions_from_guess(dataset, graph):
    locations = dataset[f"{graph}-locations"][()]
    lower = locations.min(axis=0)
    upper = locations.max(axis=0)
    voxel_size = [1000, 300, 300]
    lower -= lower % voxel_size
    upper += voxel_size - upper % voxel_size
    shape = ((upper - lower) / voxel_size).astype(int)

    attrs = {"names": ["z", "y", "x"], "units": "nm", "scales": voxel_size}
    dimensions = neuroglancer.CoordinateSpace(**attrs)

    offset = np.array(lower)
    offset = offset // voxel_size
    return dimensions, offset, shape
コード例 #23
0
def test_mesh_silhouette(webdriver):
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name='a',
            layer=neuroglancer.SegmentationLayer(
                source=neuroglancer.LocalVolume(data=np.zeros((10, 10, 10),
                                                              dtype=np.uint8),
                                                dimensions=s.dimensions),
                mesh_silhouette_rendering=2),
        )

    state = webdriver.sync()
    assert state.layers['a'].mesh_silhouette_rendering == 2
コード例 #24
0
def pointlayer(txn,
               name,
               x,
               y,
               z,
               color="yellow",
               size=5,
               shader=pointlayer_shader,
               voxel_size=default_voxel_size):
    """Add a point layer.

    :param txn: the neuroglancer viewer transaction context
    :param name: the displayable name of the point layer
    :param x: the x coordinate per point
    :param y: the y coordinate per point
    :param z: the z coordinate per point
    :param color: the color of the points in the layer, e.g. "red", "yellow"
    :param size: the size of the points
    :param voxel_size: the size of a voxel (x, y, z)
    """

    dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                              units=["µm", "µm", "µm"],
                                              scales=voxel_size)
    layer = neuroglancer.LocalAnnotationLayer(
        dimensions=dimensions,
        annotation_properties=[
            neuroglancer.AnnotationPropertySpec(
                id='color',
                type='rgb',
                default=color,
            ),
            neuroglancer.AnnotationPropertySpec(
                id='size',
                type='float32',
                default=float(size),
            )
        ],
        annotations=[
            neuroglancer.PointAnnotation(
                id=i + 1, point=[zz, yy,
                                 xx])  # input points should be in zyx order
            for i, (xx, yy, zz) in enumerate(zip(x, y, z))
        ],
        shader=shader)
    txn.layers[name] = layer
コード例 #25
0
def add_example_layer(state):
    ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in [100, 100, 100]], indexing='ij')
    b = np.cast[np.int32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10)) - 2
    b = np.pad(b, 1, 'constant')
    dimensions = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                              units='nm',
                                              scales=[10, 10, 10])

    state.dimensions = dimensions
    state.layers.append(
        name='b',
        layer=neuroglancer.SegmentationLayer(source=neuroglancer.LocalVolume(
            data=b,
            dimensions=dimensions,
        )),
    )
    return b
コード例 #26
0
    def __init__(self):
        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('inference', self._do_inference)
        self.gt_vol = cloudvolume.CloudVolume(
            'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
            mip=0,
            bounded=True,
            progress=False,
            provenance={})
        self.dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=self.gt_vol.resolution,
        )
        self.inf_results = zarr.zeros(self.gt_vol.bounds.to_list()[3:],
                                      chunks=(64, 64, 64),
                                      dtype=np.uint8)
        self.inf_volume = neuroglancer.LocalVolume(data=self.inf_results,
                                                   dimensions=self.dimensions)
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'inference'

        with viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['ground_truth'].visible = False
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=self.inf_volume,
                shader='''
void main() {
  float v = toNormalized(getDataValue(0));
  vec4 rgba = vec4(0,0,0,0);
  if (v != 0.0) {
    rgba = vec4(colormapJet(v), 1.0);
  }
  emitRGBA(rgba);
}
''',
            )
コード例 #27
0
ファイル: neuroglancer2.py プロジェクト: pattonw/neurolight
def dims_from_guess(graph):
    locations = np.array([attrs["location"] for attrs in graph.nodes.values()])
    try:
        lower = locations.min(axis=0)
        upper = locations.max(axis=0)
    except:
        lower = np.array([0, 0, 0])
        upper = np.array([0, 0, 0])
    voxel_size = [1000, 300, 300]
    lower -= lower % voxel_size
    upper += voxel_size - upper % voxel_size
    shape = ((upper - lower) / voxel_size).astype(int)

    attrs = {"names": ["z", "y", "x"], "units": "nm", "scales": voxel_size}
    dimensions = neuroglancer.CoordinateSpace(**attrs)

    offset = np.array(lower)
    offset = offset // voxel_size
    return dimensions, offset, shape
コード例 #28
0
ファイル: screenshot_test.py プロジェクト: kevinyu328/ng_v2
def test_screenshot_basic(webdriver):
    a = np.array([[[255]]], dtype=np.uint8)
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=neuroglancer.ImageLayer(
                source=neuroglancer.LocalVolume(data=a, dimensions=s.dimensions),
                shader='void main () { emitRGB(vec3(1.0, 0.0, 0.0)); }',
            ),
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False
    screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
    np.testing.assert_array_equal(screenshot.image_pixels,
                                  np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
コード例 #29
0
def test_linked_segmentation_group(webdriver):
    a = np.array([[[42]]], dtype=np.uint8)
    b = np.array([[[43]]], dtype=np.uint8)
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=neuroglancer.SegmentationLayer(
                source=neuroglancer.LocalVolume(data=a,
                                                dimensions=s.dimensions),
                segment_default_color='#f00',
                segments=[43],
            ),
            visible=False,
        )
        s.layers.append(
            name="b",
            layer=neuroglancer.SegmentationLayer(
                source=neuroglancer.LocalVolume(data=b,
                                                dimensions=s.dimensions),
                linked_segmentation_group='a',
            ),
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False
    screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
    assert screenshot_response.viewer_state.layers[
        0].segment_default_color == '#ff0000'
    screenshot = screenshot_response.screenshot
    np.testing.assert_array_equal(
        screenshot.image_pixels,
        np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
    with webdriver.viewer.txn() as s:
        s.layers[1].linked_segmentation_color_group = False
        s.layers[1].segment_default_color = '#0f0'
    screenshot_response = webdriver.viewer.screenshot(size=[10, 10])
    screenshot = screenshot_response.screenshot
    np.testing.assert_array_equal(
        screenshot.image_pixels,
        np.tile(np.array([0, 255, 0, 255], dtype=np.uint8), (10, 10, 1)))
コード例 #30
0
def test_slider(webdriver):

    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1])
        s.position = [0.5, 0.5]
        s.layers.append(
            name='image',
            layer=neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(
                dimensions=s.dimensions,
                data=np.full(shape=(1, 1), dtype=np.uint32, fill_value=42),
            ),
                                          ),
            visible=True,
            shader='''
#uicontrol float color slider(min=0, max=10)

void main() {
  emitGrayscale(color);
}
''',
            shader_controls={
                'color': 1,
            },
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False

    control = webdriver.viewer.state.layers['image'].shader_controls['color']
    assert control == 1

    def expect_color(color):
        webdriver.sync()
        screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
        np.testing.assert_array_equal(screenshot.image_pixels,
                                      np.tile(np.array(color, dtype=np.uint8), (10, 10, 1)))

    expect_color([255, 255, 255, 255])
    with webdriver.viewer.txn() as s:
        s.layers['image'].shader_controls = {
            'color': 0,
        }
    expect_color([0, 0, 0, 255])