Exemple #1
0
    def __init__(self, raw, embedding, mst, classifier):

        self.raw = raw
        self.embedding = embedding
        self.classifier = classifier
        self.mst = mst

        self.points = []

        self.mst_graph = nx.Graph()
        self.mst_graph.add_weighted_edges_from(mst)

        self.threshold = 0.5

        self.raw_dimensions = neuroglancer.CoordinateSpace(
            names=['z', 'y', 'x'],
            units='nm',
            scales=raw.voxel_size)

        self.dimensions = neuroglancer.CoordinateSpace(
            names=['c^', 'z', 'y', 'x'],
            units=[''] + 3*['nm'],
            scales=raw.voxel_size)

        # if len(raw.shape) > 3:
        #     volume_shape = raw.shape[1:]
        # else:
        volume_shape = raw.shape

        print(f"Creating segmentation layer with shape {volume_shape}")
        #self.segmentation = np.arange(np.product(volume_shape),dtype=np.uint32)
        #self.segmentation = self.segmentation.reshape(volume_shape)
        self.segmentation = np.zeros(volume_shape, dtype=np.uint32)
        
        self.segmentation_volume = neuroglancer.LocalVolume(
            data=self.segmentation,
            dimensions=self.raw_dimensions)

        self.viewer = neuroglancer.Viewer()
        self.viewer.actions.add('label_fg', self._label_fg)
        self.viewer.actions.add('label_bg', self._label_bg)
        self.viewer.actions.add('update_seg', self._update_segmentation)

        with self.viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'label_fg'
            s.input_event_bindings.data_view['shift+mousedown1'] = 'label_bg'
            s.input_event_bindings.data_view['keyu'] = 'update_seg'

        with self.viewer.txn() as s:
            
            add_layer(s, self.raw, 'raw')
            add_layer(s, self.embedding, 'embedding')
            s.layers['embedding'].visible = False
            s.layers['points'] = neuroglancer.LocalAnnotationLayer(
                self.dimensions)
            s.layers['segmentation'] = neuroglancer.SegmentationLayer(
                source=self.segmentation_volume)
Exemple #2
0
def visualize_npy(npy_file: Path, voxel_size):
    voxel_size = daisy.Coordinate(voxel_size)

    viewer = neuroglancer.Viewer()
    with viewer.txn() as s:
        v = np.load(npy_file)
        m = daisy.Array(
            v,
            daisy.Roi(daisy.Coordinate([0, 0, 0]), daisy.Coordinate(v.shape)),
            daisy.Coordinate([1, 1, 1]),
        )
        add_layer(s, m, f"npy array")
    print(viewer)
    input("Hit ENTER to quit!")
Exemple #3
0
                y = nodes[node_id][1]
                z = nodes[node_id][0]
                maxima_dict[node_id] = (x, y, z)
                maxima.append(
                    neuroglancer.EllipsoidAnnotation(center=(x, y, z),
                                                     radii=(tuple([10] * 3)),
                                                     id=node_id,
                                                     segments=None))
            data_k["maxima"] = maxima

    prediction_views.append(data_k)

with viewer.txn() as s:
    for k, view in enumerate(prediction_views):
        for dset, dset_data in view["view_dsets"].items():
            add_layer(s, dset_data, str(k) + "_" + dset)

        s.layers['{}_maxima'.format(k)] = neuroglancer.AnnotationLayer(
            voxel_size=(1, 1, 1),
            filter_by_segmentation=False,
            annotation_color='#add8e6',
            annotations=view["maxima"])

        try:
            s.layers['{}_connectors'.format(k)] = neuroglancer.AnnotationLayer(
                voxel_size=(1, 1, 1),
                filter_by_segmentation=False,
                annotation_color='#00ff00',
                annotations=view["edge_connectors"])
        except KeyError:
            print("No edges in prediction")
Exemple #4
0
            if len(scales) == 0:
                print(f"Couldn't read {ds}, skipping...")
                continue
            print("Found scales %s" % ([os.path.relpath(s, f)
                                        for s in scales], ))
            a = [
                daisy.open_ds(f, os.path.relpath(scale_ds, f))
                for scale_ds in scales
            ]
        arrays.append(a)

    with viewer.txn() as s:
        for array, dataset in zip(arrays, datasets):
            print(array.roi)
            print(array.voxel_size)
            add_layer(s, array, dataset)

if args.graphs:
    for f, graphs in zip(args.file, args.graphs):

        for graph in graphs:

            graph_annotations = []
            ids = daisy.open_ds(f, graph + '-ids')
            loc = daisy.open_ds(f, graph + '-locations')
            for i, l in zip(ids.data, loc.data):
                graph_annotations.append(
                    neuroglancer.EllipsoidAnnotation(center=l[::-1],
                                                     radii=(5, 5, 5),
                                                     id=i))
            graph_layer = neuroglancer.AnnotationLayer(
Exemple #5
0
def visualize_hdf5(hdf5_file: Path,
                   voxel_size,
                   mst=False,
                   maxima_for=None,
                   skip=None):
    path_list = str(hdf5_file.absolute()).split("/")
    setups_dir = Path("/", *path_list[:-3])
    setup_config = DEFAULT_CONFIG
    try:
        setup_config.update(
            json.load((setups_dir / path_list[-3] / "config.json").open()))
    except:
        pass
    voxel_size = daisy.Coordinate(setup_config["VOXEL_SIZE"])
    coordinate_scale = (setup_config["COORDINATE_SCALE"] *
                        np.array(voxel_size) / max(voxel_size))
    dataset = h5py.File(hdf5_file)
    volumes = list(dataset.get("volumes", {}).keys())
    points = list(dataset.get("points", {}).keys())

    points = set([p.split("-")[0] for p in points])

    node_id = itertools.count(start=1)

    viewer = neuroglancer.Viewer()
    with viewer.txn() as s:
        for volume in volumes:
            if skip == volume:
                continue
            v = daisy.open_ds(str(hdf5_file.absolute()), f"volumes/{volume}")
            if len(v.shape) == 5:
                v.n_channel_dims -= 1
                v.materialize()
                v.data = v.data[0]
            if v.dtype == np.int64:
                v.materialize()
                v.data = v.data.astype(np.uint64)
            if volume == maxima_for:
                v.materialize()
                max_filtered = maximum_filter(v.data, (3, 10, 10))
                maxima = np.logical_and(max_filtered == v.data, v.data > 0.01)
                m = daisy.Array(maxima, v.roi, v.voxel_size)
                add_layer(s, m, f"{volume}-maxima")
            if volume == "embedding":
                offset = v.roi.get_offset()
                mst = get_embedding_mst(
                    v.data,
                    1,
                    coordinate_scale,
                    offset / voxel_size,
                    daisy.open_ds(str(hdf5_file.absolute()),
                                  f"volumes/fg_maxima").to_ndarray(),
                )
                add_trees(s, mst, node_id, name="MST", visible=True)
                v.materialize()
                v.data = (v.data + 1) / 2
            add_layer(s, v, volume, visible=False)

        for point_set in points:
            node_ids = dataset["points"][f"{point_set}-ids"]
            locations = dataset["points"][f"{point_set}-locations"]
            edges = dataset["points"][f"{point_set}-edges"]
            components = build_trees(node_ids, locations, edges, voxel_size)
            add_trees(s, components, node_id, name=point_set, visible=False)
        if mst and False:
            emst = h5py.File(hdf5_file)["emst"]
            edges_u = h5py.File(hdf5_file)["edges_u"]
            edges_v = h5py.File(hdf5_file)["edges_v"]
            alpha = setup_config["ALPHA"]
            coordinate_scale = setup_config["COORDINATE_SCALE"]
            offset = daisy.open_ds(str(hdf5_file.absolute()),
                                   f"volumes/gt_fg").roi.get_offset()
            mst_trees = build_trees_from_mst(emst, edges_u, edges_v, alpha,
                                             coordinate_scale, offset,
                                             voxel_size)
            add_trees(s, mst_trees, node_id, name="MST", visible=True)
    print(viewer)
    input("Hit ENTER to quit!")
Exemple #6
0
import daisy
import neuroglancer
import numpy as np
import sys
import os
from funlib.show.neuroglancer import add_layer, ScalePyramid

datasets = ["raw", "soft_mask", "tracing"]
base_dir = os.path.abspath(sys.argv[1])
experiment = sys.argv[2]

setup_number = int(sys.argv[3])
snapshot = int(sys.argv[4])
snapshot_path = os.path.join(
    base_dir, experiment,
    "01_train/setup_{}/snapshots/batch_{}.hdf".format(setup_number, snapshot))

view_dsets = {}
for dset in datasets:
    view_dsets[dset] = daisy.open_ds(snapshot_path, dset)

viewer = neuroglancer.Viewer()
with viewer.txn() as s:
    for dset, data in view_dsets.items():
        add_layer(s, data, dset)

print(viewer)
    daisy.open_ds(output_file, f'volumes/segmentation_benjamin/s{i}')
    for i in range(9)
]

mtlsd_frags_best_effort = [
    daisy.open_ds(output_file, f'volumes/fragments_gt_best_effort/s{i}')
    for i in range(9)
]
# mtlsd_frags_best_effort = daisy.open_ds(
# output_file, 'volumes/fragments_gt_best_effort')

# mtlsd_seg = [
# daisy.open_ds(f, 'volumes/segmentation_64/s%d' % s)
# for s in range(9)
# ]
# mtlsd_seg = daisy.open_ds(f, 'volumes/segmentation_40')

viewer = neuroglancer.Viewer()
with viewer.txn() as s:
    add_layer(s, raw, 'raw')

    add_layer(s, mtlsd_frags_best_effort, 'mtlsd frags best effort')
    add_layer(s, mtlsd_frags, 'mtlsd frags')
    add_layer(s, mtlsd_segmentation_benjamin, 'mtlsd segmentation benjamin')
    # does not work
    # add_layer(s, gt, 'original gt')
    add_layer(s, relabelled, 'relabelled gt')
    # add_layer(s, mtlsd_affs, 'mtlsd affs', shader='rgb')
    # add_layer(s, mtlsd_seg, 'mtlsd seg')
print(viewer)
    edges.append(
        neuroglancer.LineAnnotation(point_a=pos_u, point_b=pos_v, id=next(ngid))
    )
    if len(nodes) > 10000:
        break
nodes.append(
    neuroglancer.EllipsoidAnnotation(
        center=pos_v, radii=(1, 1, 1) / voxel_size, id=next(ngid)
    )
)


a = daisy.open_ds(str(n5_path.absolute()), "volume")

with viewer.txn() as s:
    add_layer(s, a, "volume", shader="rgb", c=[0, 0, 0])

with viewer.txn() as s:
    s.layers["edges"] = neuroglancer.AnnotationLayer(
        filter_by_segmentation=False, annotation_color="#add8e6", annotations=edges
    )
    s.layers["nodes"] = neuroglancer.AnnotationLayer(
        filter_by_segmentation=False, annotation_color="#ff00ff", annotations=nodes
    )

url = str(viewer)
logging.info(url)

import time

time.sleep(60 * minutes)