Esempio n. 1
0
    def __init__(self, reference_image, moving_image, segmentation,
                 points_file, reference_voxel_size, moving_voxel_size):
        """Constructor

        :param reference_image: align to this image
        :param moving_image: align this image
        :param segmentation: the segmentation associated with the reference
        image or None if no segmentation.
        :param points_file: where to load and store points
        :param reference_voxel_size: a 3-tuple giving the X, Y and Z voxel
        size in nanometers.
        :param moving_voxel_size: the voxel size for the moving image
        """
        self.reference_image = reference_image
        self.moving_image = moving_image
        self.segmentation = segmentation
        self.moving_images[id(self)] = moving_image
        self.decimation = max(1, np.min(reference_image.shape) // 5)
        n_elems = int(np.prod(self.reference_image.shape))
        alignment_raw = multiprocessing.RawArray('f', n_elems)
        self.alignment_raw[id(self)] = alignment_raw
        self.reference_viewer = neuroglancer.Viewer()
        self.moving_viewer = neuroglancer.Viewer()
        self.points_file = points_file
        self.warper = None
        self.reference_voxel_size = reference_voxel_size
        self.moving_voxel_size = moving_voxel_size
        self.load_points()
        self.init_state()
    def add_volume(self, volume, layer_name=None, clear_layer=False):
        if self.viewer is None:
            self.viewer = neuroglancer.Viewer()

        if layer_name is None:
            layer_name = f'{self.layer_type}_{self.scales}'

        source = neuroglancer.LocalVolume(
            data=volume,
            dimensions=neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                                    units='nm',
                                                    scales=self.scales),
            voxel_offset=self.offset)

        if self.layer_type == 'segmentation':
            layer = neuroglancer.SegmentationLayer(source=source)
        else:
            layer = neuroglancer.ImageLayer(source=source)

        with self.viewer.txn() as s:
            if clear_layer:
                s.layers.clear()
            s.layers[layer_name] = layer

        print(f'A new layer named {layer_name} is added to:')
        print(self.viewer)
Esempio n. 3
0
    def __init__(self, filename):
        self.filename = filename
        self.point_annotation_layer_name = 'false-merges'
        self.states = []
        self.state_index = None
        viewer = self.viewer = neuroglancer.Viewer()
        self.other_state_segment_ids = dict()

        viewer.actions.add('anno-next-state', lambda s: self.next_state())
        viewer.actions.add('anno-prev-state', lambda s: self.prev_state())
        viewer.actions.add('anno-save', lambda s: self.save())
        viewer.actions.add('anno-show-all',
                           lambda s: self.set_combined_state())
        viewer.actions.add(
            'anno-add-segments-from-state',
            lambda s: self.add_segments_from_state(s.viewer_state))

        with viewer.config_state.txn() as s:
            s.input_event_bindings.viewer['pageup'] = 'anno-prev-state'
            s.input_event_bindings.viewer['pagedown'] = 'anno-next-state'
            s.input_event_bindings.viewer['control+keys'] = 'anno-save'
            s.input_event_bindings.viewer['control+keya'] = 'anno-show-all'

        viewer.shared_state.add_changed_callback(self.on_state_changed)
        self.cur_message = None
        if not self.load():
            self.set_state_index(None)
Esempio n. 4
0
    def _neuroglancer_link(self):
        options = Options.instance()
        store_path = Path(options.runs_base_dir).expanduser()

        viewer = neuroglancer.Viewer()
        with viewer.txn() as s:

            train_layers = {}
            for i, dataset in enumerate(self.train):
                train_layers.update(
                    dataset._neuroglancer_layers(
                        exclude_layers=set(train_layers.keys())))

            validate_layers = {}
            if self.validate is not None:
                for i, dataset in enumerate(self.validate):
                    validate_layers.update(
                        dataset._neuroglancer_layers(
                            exclude_layers=set(validate_layers.keys())))

            for layer_name, (layer, kwargs) in itertools.chain(
                    train_layers.items(), validate_layers.items()):
                s.layers.append(
                    name=layer_name,
                    layer=layer,
                    **kwargs,
                )

            s.layout = neuroglancer.row_layout([
                neuroglancer.LayerGroupViewer(
                    layers=list(train_layers.keys())),
                neuroglancer.LayerGroupViewer(
                    layers=list(validate_layers.keys())),
            ])
        return f"http://neuroglancer-demo.appspot.com/#!{json.dumps(viewer.state.to_json())}"
Esempio n. 5
0
def visualize(dataset, split, aff, ip, port):
    """
    Opens a tab in your webbrowser showing the chosen dataset
    """
    import neuroglancer

    config = config_dict(dataset)

    neuroglancer.set_static_content_source(
        url='https://neuroglancer-demo.appspot.com')
    neuroglancer.set_server_bind_address(bind_address=ip, bind_port=port)
    viewer = neuroglancer.Viewer(voxel_size=[6, 6, 30])
    if aff:
        import augmentation
        augmentation.maybe_create_affinities(split)
        add_affinities(config.folder, split + '-affinities', viewer)
    else:
        add_file(config.folder, split + '-input', viewer)
        add_file(config.folder, split + '-labels', viewer)

    print('open your brower at:')
    print(viewer.__str__().replace('172.17.0.2', '54.166.106.209')
          )  # Replace the second argument with your own server's ip address
    webbrowser.open(viewer.__str__())
    print("press any key to exit")
    input()
Esempio n. 6
0
def run_render(args):
    keypoints = load_script(args.script)
    viewer = neuroglancer.Viewer()
    print('Open the specified URL to begin rendering')
    print(viewer)
    if args.browser:
        webbrowser.open_new(viewer.get_viewer_url())
    fps = args.fps
    with viewer.config_state.txn() as s:
        s.show_ui_controls = False
        s.show_panel_borders = False
        s.viewer_size = [args.width, args.height]
    saver = neuroglancer.ScreenshotSaver(viewer, args.output_directory)
    total_frames = sum(max(1, k['transition_duration'] * fps) for k in keypoints[:-1])
    for i in range(len(keypoints) - 1):
        a = keypoints[i]['state']
        b = keypoints[i + 1]['state']
        duration = keypoints[i]['transition_duration']
        num_frames = max(1, int(duration * fps))
        for frame_i in range(num_frames):
            t = frame_i / num_frames
            cur_state = neuroglancer.ViewerState.interpolate(a, b, t)
            viewer.set_state(cur_state)
            index, path = saver.capture()
            print('[%07d/%07d] keypoint %.3f/%5d: %s' % (index, total_frames, i + t, len(keypoints), path))
    def __init__(self):
        viewer = self.viewer = neuroglancer.Viewer()
        self.gt_vol = cloudvolume.CloudVolume(
            'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
            mip=0,
            bounded=True,
            progress=False,
            provenance={})
        viewer.actions.add('start-fill', self._start_fill_action)
        viewer.actions.add('stop-fill', self._stop_fill_action)
        self.dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=[8, 8, 8],
        )
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'start-fill'
            s.input_event_bindings.data_view['keyt'] = 'stop-fill'

        with viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['ground_truth'].visible = False
            self.flood_fill_event = None
Esempio n. 8
0
    def __call__(self, chunks: dict):
        """
        Parameters:
        chunks: multiple chunks 
        """
        ng.set_static_content_source(
            url='https://neuromancer-seung-import.appspot.com')
        ng.set_server_bind_address(bind_port=self.port)
        viewer = ng.Viewer()

        with viewer.txn() as s:
            for chunk_name, chunk in chunks.items():
                global_offset = chunk.global_offset
                chunk = np.ascontiguousarray(chunk)

                s.layers.append(
                    name=chunk_name,
                    layer=ng.LocalVolume(
                        data=chunk,
                        dimensions=neuroglancer.CordinateSpace(
                            scales=[1, *self.voxel_size[::-1]],
                            units=['', 'nm', 'nm', 'nm'],
                            names=['c^', 'x', 'y', 'z']),
                        # offset is in nm, not voxels
                        offset=list(o * v for o, v in zip(
                            global_offset[::-1][-3:], self.voxel_size[::-1]))))
        print('Open this url in browser: ')
        print(viewer)
        input('Press Enter to exit neuroglancer.')
Esempio n. 9
0
def show_link(root, prefix, local_mode):
    viewer = neuroglancer.Viewer()
    # out_root = '/home/hanyu/ng/HL007/precom_theta_full/region_3400_4360_0_2048_2048_512_v3/stage_1'
    stage_list = [
        i.strip(prefix) for i in glob.glob(os.path.join(root, 'precomputed*'))
    ]
    seg_sources = [header + i for i in stage_list]
    # sk_source = header + '/HL007/precom_theta_full/region_3400_4360_0_2048_2048_512_v3/stage_1/skeletons_mip_0'
    # print(seg_sources)
    with viewer.txn() as s:
        # s.layers['image'] = neuroglancer.ImageLayer(
        #   source='precomputed://'+im_source
        # )
        for i, seg in enumerate(seg_sources[:5]):
            #     print(i, seg)
            s.layers['seg_%d' % i] = neuroglancer.SegmentationLayer(
                source='precomputed://%s' % seg)
    #   s.layers['sk'] = neuroglancer.SegmentationLayer(
    #       skeletons='precomputed://%s' % sk_source,
    #   )

    link = url_state.to_url(viewer.state)
    if local_mode:
        link = re.sub(r'https://neuroglancer-demo.appspot.com',
                      r'http://localhost:8080', link)
    else:
        link = re.sub(r'https://', r'http://', link)
    pprint(link)
Esempio n. 10
0
    def __init__(self, script_path, transition_duration, fullscreen_width,
                 fullscreen_height, frames_per_second):
        self.script_path = script_path
        self.viewer = neuroglancer.Viewer()
        self.frames_per_second = frames_per_second
        self.default_transition_duration = transition_duration
        self.fullscreen_width = fullscreen_width
        self.fullscreen_height = fullscreen_height
        self.keypoint_index = 0
        if os.path.exists(script_path):
            self.keypoints = load_script(script_path,
                                         self.default_transition_duration)
        else:
            self.keypoints = []

        self.transition_duration = transition_duration
        self.viewer.shared_state.add_changed_callback(
            self._viewer_state_changed)
        self.quit_event = threading.Event()
        self.is_dirty = True
        self.is_fullscreen = False
        keybindings = [
            ('keyk', 'add-keypoint'),
            ('bracketleft', 'prev-keypoint'),
            ('bracketright', 'next-keypoint'),
            ('backspace', 'delete-keypoint'),
            ('shift+bracketleft', 'decrease-duration'),
            ('shift+bracketright', 'increase-duration'),
            ('home', 'first-keypoint'),
            ('end', 'last-keypoint'),
            ('keyq', 'quit'),
            ('enter', 'toggle-play'),
            ('keyf', 'toggle-fullscreen'),
            ('keyj', 'revert-script'),
            ('comma', 'prev-frame'),
            ('period', 'next-frame'),
        ]
        with self.viewer.config_state.txn() as s:
            for k, a in keybindings:
                s.input_event_bindings.viewer[k] = a
                s.input_event_bindings.slice_view[k] = a
                s.input_event_bindings.perspective_view[k] = a
        self._keybinding_message = ' '.join('%s=%s' % x for x in keybindings)
        self.viewer.actions.add('add-keypoint', self._add_keypoint)
        self.viewer.actions.add('prev-keypoint', self._prev_keypoint)
        self.viewer.actions.add('next-keypoint', self._next_keypoint)
        self.viewer.actions.add('delete-keypoint', self._delete_keypoint)
        self.viewer.actions.add('increase-duration', self._increase_duration)
        self.viewer.actions.add('decrease-duration', self._decrease_duration)
        self.viewer.actions.add('first-keypoint', self._first_keypoint)
        self.viewer.actions.add('last-keypoint', self._last_keypoint)
        self.viewer.actions.add('quit', self._quit)
        self.viewer.actions.add('toggle-play', self._toggle_play)
        self.viewer.actions.add('toggle-fullscreen', self._toggle_fullscreen)
        self.viewer.actions.add('revert-script', self._revert_script)
        self.viewer.actions.add('next-frame', self._next_frame)
        self.viewer.actions.add('prev-frame', self._prev_frame)
        self.playback_manager = None
        self._set_keypoint_index(1)
 def __init__(self, url, points, voxel_size):
     self.url = url
     self.points = points
     self.voxel_size = voxel_size
     self.viewer = neuroglancer.Viewer()
     self.brightness = 20.0
     self.init_actions()
     self.refresh()
Esempio n. 12
0
    def __init__(self, raw, embedding, mst, classifier):

        self.raw = raw
        self.embedding = embedding
        self.classifier = classifier
        self.mst = mst

        self.points = []

        self.mst_graph = nx.Graph()
        self.mst_graph.add_weighted_edges_from(mst)

        self.threshold = 0.5

        self.raw_dimensions = neuroglancer.CoordinateSpace(
            names=['z', 'y', 'x'],
            units='nm',
            scales=raw.voxel_size)

        self.dimensions = neuroglancer.CoordinateSpace(
            names=['c^', 'z', 'y', 'x'],
            units=[''] + 3*['nm'],
            scales=raw.voxel_size)

        # if len(raw.shape) > 3:
        #     volume_shape = raw.shape[1:]
        # else:
        volume_shape = raw.shape

        print(f"Creating segmentation layer with shape {volume_shape}")
        #self.segmentation = np.arange(np.product(volume_shape),dtype=np.uint32)
        #self.segmentation = self.segmentation.reshape(volume_shape)
        self.segmentation = np.zeros(volume_shape, dtype=np.uint32)
        
        self.segmentation_volume = neuroglancer.LocalVolume(
            data=self.segmentation,
            dimensions=self.raw_dimensions)

        self.viewer = neuroglancer.Viewer()
        self.viewer.actions.add('label_fg', self._label_fg)
        self.viewer.actions.add('label_bg', self._label_bg)
        self.viewer.actions.add('update_seg', self._update_segmentation)

        with self.viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'label_fg'
            s.input_event_bindings.data_view['shift+mousedown1'] = 'label_bg'
            s.input_event_bindings.data_view['keyu'] = 'update_seg'

        with self.viewer.txn() as s:
            
            add_layer(s, self.raw, 'raw')
            add_layer(s, self.embedding, 'embedding')
            s.layers['embedding'].visible = False
            s.layers['points'] = neuroglancer.LocalAnnotationLayer(
                self.dimensions)
            s.layers['segmentation'] = neuroglancer.SegmentationLayer(
                source=self.segmentation_volume)
Esempio n. 13
0
    def _visualize_training(self, run):
        # returns a neuroglancer link to visualize snapshots and validations
        snapshot_container = self.snapshot_container(run.name)
        validation_container = self.validation_container(run.name)
        snapshot_zarr = zarr.open(snapshot_container.container)
        validation_zarr = zarr.open(validation_container.container)

        snapshots = []
        validations = []

        def generate_groups(container):
            def add_element(name, obj):
                if isinstance(obj, zarr.hierarchy.Array):
                    container.append(name)

            return add_element

        snapshot_zarr.visititems(
            lambda name, obj: generate_groups(snapshots)(name, obj)
        )
        validation_zarr.visititems(
            lambda name, obj: generate_groups(validations)(name, obj)
        )

        viewer = neuroglancer.Viewer()
        with viewer.txn() as s:

            snapshot_layers = {}
            for snapshot in snapshots:
                snapshot_layers[snapshot] = ZarrArray.open_from_array_identifier(
                    snapshot_container.array_identifier(snapshot), name=snapshot
                )._neuroglancer_layer()

            validation_layers = {}
            for validation in validations:
                validation_layers[validation] = ZarrArray.open_from_array_identifier(
                    validation_container.array_identifier(validation), name=validation
                )._neuroglancer_layer()

            for layer_name, (layer, kwargs) in itertools.chain(
                snapshot_layers.items(), validation_layers.items()
            ):
                s.layers.append(
                    name=layer_name,
                    layer=layer,
                    **kwargs,
                )

            s.layout = neuroglancer.row_layout(
                [
                    neuroglancer.LayerGroupViewer(layers=list(snapshot_layers.keys())),
                    neuroglancer.LayerGroupViewer(
                        layers=list(validation_layers.keys())
                    ),
                ]
            )
        return f"http://neuroglancer-demo.appspot.com/#!{json.dumps(viewer.state.to_json())}"
Esempio n. 14
0
    def __init__(self, synapse_path, top_method='min', num_top_partners=10):
        with open(synapse_path, 'r') as f:
            synapse_data = json.load(f)['data']
        self.synapses_by_id, self.synapse_partner_counts = get_synapses_by_id(
            synapse_data)
        self.top_method = top_method
        self.num_top_partners = num_top_partners

        dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=[8, 8, 8],
        )

        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('select-custom', self._handle_select)
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['dblclick0'] = 'select-custom'
        with viewer.txn() as s:
            s.projection_orientation = [
                0.63240087, 0.01582051, 0.05692779, 0.77238464
            ]
            s.dimensions = dimensions
            s.position = [3000, 3000, 3000]
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['partners'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['synapses'] = neuroglancer.LocalAnnotationLayer(
                dimensions=dimensions,
                linked_segmentation_layer='ground_truth')
            s.layout = neuroglancer.row_layout([
                neuroglancer.LayerGroupViewer(
                    layout='xy',
                    layers=['image', 'ground_truth', 'partners', 'synapses'],
                ),
                neuroglancer.LayerGroupViewer(
                    layout='3d',
                    layers=['ground_truth', 'synapses'],
                ),
                neuroglancer.LayerGroupViewer(
                    layout='3d',
                    layers=['partners', 'synapses'],
                ),
            ])

        self.selected_segments = frozenset()
        self.viewer.shared_state.add_changed_callback(
            lambda: self.viewer.defer_callback(self.on_state_changed))
Esempio n. 15
0
    def __init__(self,
                 reference_image,
                 moving_image,
                 segmentation,
                 points_file,
                 reference_voxel_size,
                 moving_voxel_size,
                 min_distance=1.0,
                 n_workers=multiprocessing.cpu_count()):
        """Constructor

        :param reference_image: align to this image
        :param moving_image: align this image
        :param segmentation: the segmentation associated with the reference
        image or None if no segmentation.
        :param points_file: where to load and store points
        :param reference_voxel_size: a 3-tuple giving the X, Y and Z voxel
        size in nanometers.
        :param moving_voxel_size: the voxel size for the moving image
        :param min_distance: the minimum allowed distance between any two points
        :param n_workers: # of workers to use when warping
        """
        self.reference_image = reference_image
        self.moving_image = moving_image
        self.segmentation = segmentation
        self.n_workers = n_workers
        self.moving_images[id(self)] = moving_image
        self.decimation = max(1, np.min(reference_image.shape) // 5)
        n_elems = int(np.prod(self.reference_image.shape))
        alignment_raw = multiprocessing.RawArray('f', n_elems)
        self.alignment_raw[id(self)] = alignment_raw
        self.reference_viewer = neuroglancer.Viewer()
        self.moving_viewer = neuroglancer.Viewer()
        self.points_file = points_file
        self.warper = None
        self.reference_voxel_size = reference_voxel_size
        self.moving_voxel_size = moving_voxel_size
        self.reference_brightness = 1.0
        self.moving_brightness = 1.0
        self.min_distance = min_distance
        self.load_points()
        self.init_state()
        self.refresh_brightness()
Esempio n. 16
0
    def __init__(self, num_to_prefetch=10):
        self.viewer = neuroglancer.Viewer()
        self.num_to_prefetch = num_to_prefetch

        self.todo = []  # items are lists of segment IDs
        self.index = 0
        self.batch = 1
        self.apply_equivs = False

        self.set_init_state()
def run_vol(vol_idx):
    neuroglancer.set_static_content_source(url='https://neuroglancer-demo.appspot.com')

    with h5py.File(os.path.expanduser('~/data/div_detect/full_records.h5'), 'r') as rec_file:
        record = rec_file['records'][vol_idx,:, :, :, 1]

    viewer = neuroglancer.Viewer(voxel_size=[1, 1, 1])
    viewer.add(vol_to_int8(record),
               name='record')
    #viewer.add(b, name='b')
    return viewer
Esempio n. 18
0
def visualize_trees(graphs: Dict[str, nx.DiGraph]):

    viewer = neuroglancer.Viewer()
    with viewer.txn() as s:
        s.layers["blegh"] = neuroglancer.ImageLayer(
            source=neuroglancer.LocalVolume(data=np.zeros([1, 1, 1]).transpose(
                [2, 1, 0]),
                                            voxel_size=[1, 1, 1]))
        node_id = itertools.count(start=1)
        for name, graph in graphs.items():
            add_trees(s, graph, node_id, name=name, visible=True)
    print(viewer)
    input("Hit ENTER to quit!")
Esempio n. 19
0
def main():
    app = QtWidgets.QApplication(sys.argv)
    args = parse_args()
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(
            url=args.static_content_source)
    neuroglancer.set_server_bind_address(
    args.bind_address, bind_port=args.port)
    viewer = neuroglancer.Viewer()
    print("Neuroglancer URL: %s" % str(viewer))
    window = ApplicationWindow(viewer)
    window.show()
    sys.exit(app.exec())
Esempio n. 20
0
def run(args):
    neuroglancer.cli.handle_server_arguments(args)
    state = args.state
    state.selected_layer.visible = False
    state.statistics.visible = False
    if args.layout is not None:
        state.layout = args.layout
    if args.show_axis_lines is not None:
        state.show_axis_lines = args.show_axis_lines
    if args.show_default_annotations is not None:
        state.show_default_annotations = args.show_default_annotations
    if args.projection_scale_multiplier is not None:
        state.projection_scale *= args.projection_scale_multiplier

    state.gpu_memory_limit = args.gpu_memory_limit
    state.system_memory_limit = args.system_memory_limit
    state.concurrent_downloads = args.concurrent_downloads

    if args.no_webdriver:
        viewers = [neuroglancer.Viewer() for _ in range(args.jobs)]
        print('Open the following URLs to begin rendering')
        for viewer in viewers:
            print(viewer)

        def refresh_browser_callback():
            print('Browser unresponsive, consider reloading')

        capture_image([(viewer, refresh_browser_callback) for viewer in viewers], args, state)
    else:
        def _make_webdriver():
            webdriver = neuroglancer.webdriver.Webdriver(
                headless=args.headless,
                docker=args.docker_chromedriver,
                debug=args.debug_chromedriver,
            )
            def refresh_browser_callback():
                print('Browser unresponsive, reloading')
                webdriver.reload_browser()

            return webdriver, refresh_browser_callback

        webdrivers = [_make_webdriver() for _ in range(args.jobs)]
        try:
            capture_image([(webdriver.viewer, refresh_browser_callback)
                           for webdriver, refresh_browser_callback in webdrivers], args, state)
        finally:
            for webdriver, _ in webdrivers:
                try:
                    webdriver.__exit__()
                except:
                    pass
Esempio n. 21
0
    def __init__(self, state_path, bodies, labels, segmentation_url, image_url,
                 num_to_prefetch):
        self.state = State(state_path)
        self.num_to_prefetch = num_to_prefetch
        self.viewer = neuroglancer.Viewer()
        self.bodies = bodies
        self.state.load()
        self.total_voxels = sum(x.num_voxels for x in bodies)
        self.cumulative_voxels = np.cumsum([x.num_voxels for x in bodies])

        with self.viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
            s.layers['segmentation'] = neuroglancer.SegmentationLayer(
                source=segmentation_url)
            s.navigation.zoom_factor = 66
            s.perspective_zoom = 1280
            s.show_slices = False
            s.concurrent_downloads = 256
            s.gpu_memory_limit = 2 * 1024 * 1024 * 1024
            s.layout = '3d'

        key_bindings = [
            ['bracketleft', 'prev-index'],
            ['bracketright', 'next-index'],
            ['home', 'first-index'],
            ['end', 'last-index'],
            ['control+keys', 'save'],
        ]
        label_keys = ['keyd', 'keyf', 'keyg', 'keyh']
        for label, label_key in zip(labels, label_keys):
            key_bindings.append([label_key, 'label-%s' % label])

            def label_func(s, label=label):
                self.set_label(s, label)

            self.viewer.actions.add('label-%s' % label, label_func)
        self.viewer.actions.add('prev-index', self._prev_index)
        self.viewer.actions.add('next-index', self._next_index)
        self.viewer.actions.add('first-index', self._first_index)
        self.viewer.actions.add('last-index', self._last_index)
        self.viewer.actions.add('save', self.save)

        with self.viewer.config_state.txn() as s:
            for key, command in key_bindings:
                s.input_event_bindings.viewer[key] = command
            s.status_messages['help'] = (
                'KEYS: ' + ' | '.join('%s=%s' % (key, command)
                                      for key, command in key_bindings))

        self.index = -1
        self.set_index(self._find_one_after_last_labeled_index())
Esempio n. 22
0
    def __init__(self):
        # Address
        # neuroglancer.set_server_bind_address('127.0.0.1')
        # neuroglancer.set_static_content_source(url='http://localhost:8080')

        # Data
        img = tifffile.imread('sample.tif')
        img = img * 10 / 256
        img = img.astype('uint8')
        img = np.transpose(img, (1, 0, 2, 3))
        self.img = img

        # Same viewer every function call
        viewer = self.viewer = neuroglancer.Viewer()
Esempio n. 23
0
def visualize_npy(npy_file: Path, voxel_size):
    voxel_size = daisy.Coordinate(voxel_size)

    viewer = neuroglancer.Viewer()
    with viewer.txn() as s:
        v = np.load(npy_file)
        m = daisy.Array(
            v,
            daisy.Roi(daisy.Coordinate([0, 0, 0]), daisy.Coordinate(v.shape)),
            daisy.Coordinate([1, 1, 1]),
        )
        add_layer(s, m, f"npy array")
    print(viewer)
    input("Hit ENTER to quit!")
Esempio n. 24
0
    def __init__(self, imgs, points, quit_cb, save_cb):
        """Initializer


        :param imgs: a sequence of three-tuples: image, name, shader. The
        possible shaders are nuggt.utils.ngutils.{gray, red, green, blue}_shader

        :param points: an Nx3 array of points where points[:, 0] is the z
        coordinate, points[:, 1] is the y coordinate and points[:, 2] is the
        x coordinate

        :param quit_cb: a function to be called when quitting.
        :param save_cb: a function to be called when saving
        """
        self.idx = 0
        self.points = points
        self.quit_cb = quit_cb
        self.save_cb = save_cb
        self.viewer = neuroglancer.Viewer()
        self._yea = np.zeros(len(points), bool)
        self._nay = np.zeros(len(points), bool)
        with self.viewer.txn() as txn:
            for img, name, shader in imgs:
                layer(txn, name, img.astype(np.float32), shader, 1.0)
        self.display_points()
        self.go_to()
        self.viewer.actions.add("quit", self.on_quit)
        self.viewer.actions.add("yea", self.on_yea)
        self.viewer.actions.add("nay", self.on_nay)
        self.viewer.actions.add("go-to", self.on_go_to)
        self.viewer.actions.add("next", self.on_next)
        self.viewer.actions.add("next-unmarked", self.on_next_unmarked)
        self.viewer.actions.add("previous", self.on_previous)
        self.viewer.actions.add("previous-unmarked", self.on_previous_unmarked)
        self.viewer.actions.add("center", self.on_center)
        if save_cb is not None:
            self.viewer.actions.add("save", self.on_save)
        with self.viewer.config_state.txn() as s:
            v = s.input_event_bindings.viewer
            v["control+keyq"] = "quit"
            v["shift+keyy"] = "yea"
            v["shift+keyn"] = "nay"
            v["shift+keyg"] = "go-to"
            v["shift+bracketleft"] = "previous"
            v["control+bracketleft"] = "previous-unmarked"
            v["shift+bracketright"] = "next"
            v["control+bracketright"] = "next-unmarked"
            v["shift+keyc"] = "center"
            if save_cb is not None:
                v["control+keys"] = "save"
Esempio n. 25
0
def create_neuroglancer_viewer(model:Model) -> neuroglancer.Viewer:
    """
    Create a viewer for a Neuroglancer instance

    :param model: has the details for the static Neuroglancer elements
    :return: a Neuroglancer viewer that can be used to display volumes
    """
    if not model.neuroglancer_initialized.get():
        neuroglancer.set_static_content_source(
            url=model.static_content_source.get())
        neuroglancer.set_server_bind_address(
            model.bind_address.get(),
            model.port_number.get())
        model.neuroglancer_initialized.set(True)
    return neuroglancer.Viewer()
Esempio n. 26
0
def main():
    app = QtWidgets.QApplication(sys.argv)
    args = parse_args()
    if args.static_content_source is not None:
        print(
            "Please do not use --static-content-source."
            " It's no longer necessary and is disabled.",
            file=sys.stderr)
    neuroglancer.set_server_bind_address(args.bind_address,
                                         bind_port=args.port)
    viewer = neuroglancer.Viewer()
    print("Neuroglancer URL: %s" % str(viewer))
    window = ApplicationWindow(viewer)
    window.show()
    sys.exit(app.exec())
Esempio n. 27
0
    def __call__(self, datas: dict, selected: str = None):
        """
        Parameters:
        chunks: multiple chunks
        """
        if selected is None:
            selected = datas.keys()
        elif isinstance(selected, str):
            selected = selected.split(',')

        # ng.set_static_content_source(
        #     url='https://neuromancer-seung-import.appspot.com')
        ng.set_server_bind_address(bind_address='0.0.0.0', bind_port=self.port)
        viewer = ng.Viewer()
        with viewer.txn() as viewer_state:
            for name in selected:
                data = datas[name]
                if data is None:
                    continue
                elif isinstance(data, Synapses):
                    # this could be synapses
                    self._append_synapse_annotation_layer(
                        viewer_state, name, data)
                elif isinstance(
                        data,
                        np.ndarray) and 2 == data.ndim and 3 == data.shape[1]:
                    # points
                    self._append_point_annotation_layer(
                        viewer_state, name, data)
                elif data.is_image or (data.ndim == 3 and np.issubdtype(
                        data.dtype, np.floating)):
                    self._append_image_layer(viewer_state, name, data)
                elif data.is_segmentation:
                    self._append_segmentation_layer(viewer_state, name, data)
                elif data.is_probability_map:
                    self._append_probability_map_layer(viewer_state, name,
                                                       data)
                else:
                    breakpoint()
                    raise ValueError(f'do not support this type: {type(data)}')

        print('Open this url in browser: ')
        viewer_url = viewer.get_viewer_url()
        print(viewer_url)

        key = None
        while key != 'q':
            key = input('Press q and enter/return to quit neuroglancer.')
Esempio n. 28
0
    def __init__(self, ref_img, ref_seg, moving_pts, ref_pts, moving_shape):
        self.viewer = neuroglancer.Viewer()

        za = np.linspace(0, moving_shape[0], 10)
        ya = np.linspace(0, moving_shape[1], 10)
        xa = np.linspace(0, moving_shape[2], 10)
        self.warp_to_ref = Warper(moving_pts, ref_pts).approximate(za, ya, xa)
        za = np.linspace(0, ref_img.shape[0], 10)
        ya = np.linspace(0, ref_img.shape[1], 10)
        xa = np.linspace(0, ref_img.shape[2] , 10)
        self.warp_to_moving = \
            Warper(ref_pts, moving_pts).approximate(za, ya, xa)

        with self.viewer.txn() as txn:
            layer(txn, "image", ref_img, gray_shader, 1.0)
            seglayer(txn, "segmentation", ref_seg)
Esempio n. 29
0
    def __init__(self,
                 ip='localhost',
                 port=98100,
                 res=[6, 6, 30],
                 label_dtype=np.uint16):
        super(NeuroG, self).__init__()

        self.port = port
        self.ip = ip
        self.res = res

        neuroglancer.set_server_bind_address(bind_address=self.ip,
                                             bind_port=self.port)
        self.viewer = neuroglancer.Viewer()

        self.label_dtype = label_dtype
Esempio n. 30
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("files_and_colors",
                        nargs="+",
                        help="File name followed by display name followed by"
                        "\"red\", \"green\", \"blue\" or \"gray\".")
    parser.add_argument("--segmentation",
                        default=None,
                        help="Segmentation volume to display")
    parser.add_argument("--ip-address",
                        default="127.0.0.1",
                        help="IP address of neuroglancer server.")
    parser.add_argument("--port",
                        default=0,
                        type=int,
                        help="Port # of neuroglancer server.")
    parser.add_argument("--static-content-source",
                        default=None,
                        help="The URL of the static content source, e.g. "
                        "http://localhost:8080 if being served via npm.")
    args = parser.parse_args()
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(url=args.static_content_source)
    neuroglancer.set_server_bind_address(args.ip_address, args.port)
    viewer = neuroglancer.Viewer()
    with viewer.txn() as txn:
        for filename, name, colorname in zip(args.files_and_colors[::3],
                                             args.files_and_colors[1::3],
                                             args.files_and_colors[2::3]):
            img = tifffile.imread(filename).astype(np.float32)
            if colorname.lower() == "red":
                shader = red_shader
            elif colorname.lower() == "green":
                shader = green_shader
            elif colorname.lower() == "blue":
                shader = blue_shader
            else:
                shader = gray_shader
            layer(txn, name, img, shader, 1.0)
        if args.segmentation != None:
            seg = tifffile.imread(args.segmentation).astype(np.uint32)
            seglayer(txn, "segmentation", seg)

    print(viewer.get_viewer_url())
    webbrowser.open(viewer.get_viewer_url())
    while True:
        time.sleep(5)