def _update_state(self, s):
        self.cached_split_result.update()
        self.state.save()
        _set_viewer_seeds(s, self.cached_split_result.seeds)

        s.layers['unused'].segments = self.state.unused_supervoxels
        s.layers['original'].segments = self.cached_split_result.supervoxels
        s.layers[
            'split-result'].segments = self.cached_split_result.supervoxels
        split_result = self.cached_split_result.split_result
        if split_result is not None:
            self._show_split_result(
                s,
                cur_eqs=split_result['cur_eqs'],
                supervoxel_map=split_result['supervoxel_map'],
            )
        s.layout = neuroglancer.row_layout([
            neuroglancer.LayerGroupViewer(layout='3d',
                                          layers=[
                                              'image', 'original', 'unused',
                                              'inclusive-seeds',
                                              'exclusive-seeds'
                                          ]),
            neuroglancer.LayerGroupViewer(layout='3d',
                                          layers=[
                                              'image', 'split-result',
                                              'inclusive-seeds',
                                              'exclusive-seeds'
                                          ]),
        ])
Exemple #2
0
    def _neuroglancer_link(self):
        options = Options.instance()
        store_path = Path(options.runs_base_dir).expanduser()

        viewer = neuroglancer.Viewer()
        with viewer.txn() as s:

            train_layers = {}
            for i, dataset in enumerate(self.train):
                train_layers.update(
                    dataset._neuroglancer_layers(
                        exclude_layers=set(train_layers.keys())))

            validate_layers = {}
            if self.validate is not None:
                for i, dataset in enumerate(self.validate):
                    validate_layers.update(
                        dataset._neuroglancer_layers(
                            exclude_layers=set(validate_layers.keys())))

            for layer_name, (layer, kwargs) in itertools.chain(
                    train_layers.items(), validate_layers.items()):
                s.layers.append(
                    name=layer_name,
                    layer=layer,
                    **kwargs,
                )

            s.layout = neuroglancer.row_layout([
                neuroglancer.LayerGroupViewer(
                    layers=list(train_layers.keys())),
                neuroglancer.LayerGroupViewer(
                    layers=list(validate_layers.keys())),
            ])
        return f"http://neuroglancer-demo.appspot.com/#!{json.dumps(viewer.state.to_json())}"
Exemple #3
0
    def __init__(self, synapse_path, top_method='min', num_top_partners=10):
        with open(synapse_path, 'r') as f:
            synapse_data = json.load(f)['data']
        self.synapses_by_id, self.synapse_partner_counts = get_synapses_by_id(
            synapse_data)
        self.top_method = top_method
        self.num_top_partners = num_top_partners

        dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=[8, 8, 8],
        )

        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('select-custom', self._handle_select)
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['dblclick0'] = 'select-custom'
        with viewer.txn() as s:
            s.projection_orientation = [
                0.63240087, 0.01582051, 0.05692779, 0.77238464
            ]
            s.dimensions = dimensions
            s.position = [3000, 3000, 3000]
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['partners'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['synapses'] = neuroglancer.LocalAnnotationLayer(
                dimensions=dimensions,
                linked_segmentation_layer='ground_truth')
            s.layout = neuroglancer.row_layout([
                neuroglancer.LayerGroupViewer(
                    layout='xy',
                    layers=['image', 'ground_truth', 'partners', 'synapses'],
                ),
                neuroglancer.LayerGroupViewer(
                    layout='3d',
                    layers=['ground_truth', 'synapses'],
                ),
                neuroglancer.LayerGroupViewer(
                    layout='3d',
                    layers=['partners', 'synapses'],
                ),
            ])

        self.selected_segments = frozenset()
        self.viewer.shared_state.add_changed_callback(
            lambda: self.viewer.defer_callback(self.on_state_changed))
Exemple #4
0
    def _visualize_training(self, run):
        # returns a neuroglancer link to visualize snapshots and validations
        snapshot_container = self.snapshot_container(run.name)
        validation_container = self.validation_container(run.name)
        snapshot_zarr = zarr.open(snapshot_container.container)
        validation_zarr = zarr.open(validation_container.container)

        snapshots = []
        validations = []

        def generate_groups(container):
            def add_element(name, obj):
                if isinstance(obj, zarr.hierarchy.Array):
                    container.append(name)

            return add_element

        snapshot_zarr.visititems(
            lambda name, obj: generate_groups(snapshots)(name, obj)
        )
        validation_zarr.visititems(
            lambda name, obj: generate_groups(validations)(name, obj)
        )

        viewer = neuroglancer.Viewer()
        with viewer.txn() as s:

            snapshot_layers = {}
            for snapshot in snapshots:
                snapshot_layers[snapshot] = ZarrArray.open_from_array_identifier(
                    snapshot_container.array_identifier(snapshot), name=snapshot
                )._neuroglancer_layer()

            validation_layers = {}
            for validation in validations:
                validation_layers[validation] = ZarrArray.open_from_array_identifier(
                    validation_container.array_identifier(validation), name=validation
                )._neuroglancer_layer()

            for layer_name, (layer, kwargs) in itertools.chain(
                snapshot_layers.items(), validation_layers.items()
            ):
                s.layers.append(
                    name=layer_name,
                    layer=layer,
                    **kwargs,
                )

            s.layout = neuroglancer.row_layout(
                [
                    neuroglancer.LayerGroupViewer(layers=list(snapshot_layers.keys())),
                    neuroglancer.LayerGroupViewer(
                        layers=list(validation_layers.keys())
                    ),
                ]
            )
        return f"http://neuroglancer-demo.appspot.com/#!{json.dumps(viewer.state.to_json())}"
Exemple #5
0
    def useLayer(self, numLayers):
        # Store as ProjectionArray
        self.img2 = ProjectionArray(self.img, numLayers)

        dimensions = neuroglancer.CoordinateSpace(
            names=['z', 'y', 'x'],
            units='nm',
            scales=[10, 10, 10])

        with self.viewer.txn() as s:
            s.dimensions = dimensions
            s.layers.clear()
            s.layers.append(
                name='Original Image',
                layer=neuroglancer.LocalVolume(
                    data=self.img,
                    dimensions=neuroglancer.CoordinateSpace(
                        names=['c^', 'x', 'y', 'z'],
                        units=['', 'nm', 'nm', 'nm'],
                        scales=[1, 10, 10, 10]),
                    voxel_offset=(0, 0, 0, 0),
                ),
                shader='''
            void main() {
            emitRGB(vec3(toNormalized(getDataValue(0)),
                        toNormalized(getDataValue(1)),
                        toNormalized(getDataValue(2))));
            }        
            ''')
            s.layers.append(
                name='Z-Projection Image',
                layer=neuroglancer.LocalVolume(
                    data=self.img2,
                    dimensions=neuroglancer.CoordinateSpace(
                        names=['c^', 'x', 'y', 'z'],
                        units=['', 'nm', 'nm', 'nm'],
                        scales=[1, 10, 10, 10]),
                    voxel_offset=(0, 0, 0, 0),
                ),
                shader='''
            void main() {
            emitRGB(vec3(toNormalized(getDataValue(0)),
                        toNormalized(getDataValue(1)),
                        toNormalized(getDataValue(2))));
            }        
            ''')
            s.layers['Original Image'] = s.layers[0]
            s.layers['Z-Projection Image'] = s.layers[1]
            s.layout = neuroglancer.row_layout([
                    neuroglancer.LayerGroupViewer(layers=['Original Image']),
                    neuroglancer.LayerGroupViewer(layers=['Z-Projection Image']),
            ])
Exemple #6
0
viewer.actions.add('my-action', my_action)
with viewer.config_state.txn() as s:
    s.input_event_bindings.viewer['keyt'] = 'my-action'
    s.status_messages['hello'] = 'Welcome to this example'

with viewer.txn() as s:
    s.layout = '3d'
    s.projection_scale = 3000

from ipywidgets import Image
screenshot = viewer.screenshot(size=[1000, 1000])
screenshot_image = Image(value=screenshot.screenshot.image)
screenshot_image

with viewer.txn() as s:
    s.layout = neuroglancer.row_layout([
        neuroglancer.LayerGroupViewer(layers=['image', 'overlay']),
        neuroglancer.LayerGroupViewer(layers=['segmentation'])
    ])

with viewer.txn() as s:
    s.layout = neuroglancer.row_layout([
        neuroglancer.LayerGroupViewer(layers=['image']),
        neuroglancer.LayerGroupViewer(layers=['segmentation'])
    ])

print(neuroglancer.to_url(viewer.state))
print(viewer)

neuroglancer.stop()
    ap = argparse.ArgumentParser()
    neuroglancer.cli.add_server_arguments(ap)
    args = ap.parse_args()
    neuroglancer.cli.handle_server_arguments(args)
    viewer = neuroglancer.Viewer()

    with viewer.txn() as s:
        s.layers['image'] = neuroglancer.ImageLayer(
            source=
            'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image', )
        s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
            source=
            'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
        )
        print(s.layers)
        s.layout = neuroglancer.row_layout([
            neuroglancer.column_layout([
                neuroglancer.LayerGroupViewer(
                    layers=['image', 'ground_truth']),
                neuroglancer.LayerGroupViewer(
                    layers=['image', 'ground_truth']),
            ]),
            neuroglancer.column_layout([
                neuroglancer.LayerGroupViewer(layers=['ground_truth']),
                neuroglancer.LayerGroupViewer(layers=['ground_truth']),
            ]),
        ])
    print(viewer.state)
    print(viewer)
    webbrowser.open_new(viewer.get_viewer_url())
Exemple #8
0
viewer.actions.add('my-action', my_action)
with viewer.config_state.txn() as s:
    s.input_event_bindings.viewer['keyt'] = 'my-action'
    s.status_messages['hello'] = 'Welcome to this example'

# with viewer.txn() as s:
#    s.layout = '3d'
#    s.projection_scale = 3000
"""
from ipywidgets import Image
screenshot = viewer.screenshot(size=[1000, 1000])
screenshot_image = Image(value=screenshot.screenshot.image)
screenshot_image
"""

with viewer.txn() as s:
    s.layout = neuroglancer.row_layout(
        [neuroglancer.LayerGroupViewer(layers=['image', 'overlay'])])
#         neuroglancer.LayerGroupViewer(layers=['segmentation'])])

# with viewer.txn() as s:
#    s.layout = neuroglancer.row_layout(
#        [neuroglancer.LayerGroupViewer(layers=['image']),
#         neuroglancer.LayerGroupViewer(layers=['segmentation'])])

print(neuroglancer.to_url(viewer.state))
print(viewer.state)
print(viewer)
webbrowser.open_new(viewer.get_viewer_url())
def runserver():
    event_schedule.enter(30, 1, runserver)

    event_schedule.enter(30, 1, runserver())
    event_schedule.run()
    viewer = neuroglancer.Viewer()

    a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
    ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]],
                             indexing='ij')
    a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
    a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
    a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

    with viewer.txn() as s:
        s.layers['image'] = neuroglancer.ImageLayer(
            source=
            'a.  nifti://http://127.0.0.1:9000/Users/alex/AlexBadeaMyAtlases/atlases/chass_symmetric3/chass_symmetric3_FA.nii.gz',
        )
        # s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
        #    source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
        # )
        s.layers['overlay'] = neuroglancer.ImageLayer(
            # source=neuroglancer.LocalVolume(a, voxel_size=[8, 8, 8], voxel_offset=[3000, 3000, 3000]),
            shader="""

    void main() {
      emitRGB(vec3(toNormalized(getDataValue(0)),
                   toNormalized(getDataValue(1)),
                   toNormalized(getDataValue(2))));
    }
    """, )
        s.voxel_coordinates = [3000, 3000, 3000]

    num_actions = 0

    def my_action(s):
        global num_actions
        num_actions += 1
        with viewer.config_state.txn() as st:
            st.status_messages['hello'] = (
                'Got action %d: mouse position = %r' %
                (num_actions, s.mouse_voxel_coordinates))
        print('Got my-action')
        print('  Mouse position: %s' % (s.mouse_voxel_coordinates, ))
        print('  Layer selected values: %s' % (s.selected_values, ))

    viewer.actions.add('my-action', my_action)
    with viewer.config_state.txn() as s:
        s.input_event_bindings.viewer['keyt'] = 'my-action'
        s.status_messages['hello'] = 'Welcome to this example'

    # with viewer.txn() as s:
    #    s.layout = '3d'
    #    s.projection_scale = 3000
    """
    from ipywidgets import Image
    screenshot = viewer.screenshot(size=[1000, 1000])
    screenshot_image = Image(value=screenshot.screenshot.image)
    screenshot_image
    """

    with viewer.txn() as s:
        s.layout = neuroglancer.row_layout(
            [neuroglancer.LayerGroupViewer(layers=['image', 'overlay'])])
    #         neuroglancer.LayerGroupViewer(layers=['segmentation'])])

    # with viewer.txn() as s:
    #    s.layout = neuroglancer.row_layout(
    #        [neuroglancer.LayerGroupViewer(layers=['image']),
    #         neuroglancer.LayerGroupViewer(layers=['segmentation'])])

    print(neuroglancer.to_url(viewer.state))
    print(viewer.state)
    print(viewer)
    webbrowser.open_new(viewer.get_viewer_url())

    neuroglancer.stop()