예제 #1
0
    def __call__(self, chunks: dict):
        """
        Parameters:
        chunks: multiple chunks 
        """
        ng.set_static_content_source(
            url='https://neuromancer-seung-import.appspot.com')
        ng.set_server_bind_address(bind_port=self.port)
        viewer = ng.Viewer()

        with viewer.txn() as s:
            for chunk_name, chunk in chunks.items():
                global_offset = chunk.global_offset
                chunk = np.ascontiguousarray(chunk)

                s.layers.append(
                    name=chunk_name,
                    layer=ng.LocalVolume(
                        data=chunk,
                        dimensions=neuroglancer.CordinateSpace(
                            scales=[1, *self.voxel_size[::-1]],
                            units=['', 'nm', 'nm', 'nm'],
                            names=['c^', 'x', 'y', 'z']),
                        # offset is in nm, not voxels
                        offset=list(o * v for o, v in zip(
                            global_offset[::-1][-3:], self.voxel_size[::-1]))))
        print('Open this url in browser: ')
        print(viewer)
        input('Press Enter to exit neuroglancer.')
예제 #2
0
파일: cli.py 프로젝트: tartavull/trace
def visualize(dataset, split, aff, ip, port):
    """
    Opens a tab in your webbrowser showing the chosen dataset
    """
    import neuroglancer

    config = config_dict(dataset)

    neuroglancer.set_static_content_source(
        url='https://neuroglancer-demo.appspot.com')
    neuroglancer.set_server_bind_address(bind_address=ip, bind_port=port)
    viewer = neuroglancer.Viewer(voxel_size=[6, 6, 30])
    if aff:
        import augmentation
        augmentation.maybe_create_affinities(split)
        add_affinities(config.folder, split + '-affinities', viewer)
    else:
        add_file(config.folder, split + '-input', viewer)
        add_file(config.folder, split + '-labels', viewer)

    print('open your brower at:')
    print(viewer.__str__().replace('172.17.0.2', '54.166.106.209')
          )  # Replace the second argument with your own server's ip address
    webbrowser.open(viewer.__str__())
    print("press any key to exit")
    input()
예제 #3
0
    def on_launch(self, *args):
        neuroglancer.set_server_bind_address(
            bind_address=self.model.bind_address.get())
        QGuiApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
        try:
            fixed_level =\
                2 ** (self.model.nuggt_fixed_decimation_level.get() - 1)
            fixed_ar = ArrayReader(fixed_neuroglancer_url(self.model),
                                   format="blockfs",
                                   level=fixed_level)
            set_status_bar_message("Loading fixed volume...")
            fixed_volume = fixed_ar[:, :, :]
            moving_level =\
                2 ** (self.model.nuggt_moving_decimation_level.get() - 1)
            moving_ar = ArrayReader(moving_neuroglancer_url(self.model),
                                    format="blockfs",
                                    level=moving_level)
            set_status_bar_message("Loading moving volume...")
            moving_volume = moving_ar[:, :, :]
            set_status_bar_message("Starting Neuroglancer")
            self.viewer_pair = ViewerPair(fixed_volume,
                                          moving_volume,
                                          None,
                                          self.model.nuggt_points_path.get(),
                                          (1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
                                          n_workers=self.model.n_workers.get())
            self.viewer_pair.max_batch_size = 1
            real_save = self.viewer_pair.save_points

            #
            # Do some housekeeping associated with saving files
            # * make the rescaled points file
            # * enable the rough alignment button
            #
            def on_save(*args):
                real_save()
                with open(self.model.nuggt_points_path.get()) as fd:
                    coords = json.load(fd)
                coords["reference"], coords["moving"] = [[
                    [_ * level for _ in __] for __ in coords[k]
                ] for k, level in (("reference", fixed_level), ("moving",
                                                                moving_level))]
                with open(self.model.nuggt_rescaled_points_path.get(),
                          "w") as fd:
                    json.dump(coords, fd)
                self.update_controls()

            self.viewer_pair.save_points = on_save
            reference_url = self.viewer_pair.reference_viewer.get_viewer_url()
            moving_url = self.viewer_pair.moving_viewer.get_viewer_url()
            self.model.nuggt_reference_url.set(reference_url)
            self.model.nuggt_moving_url.set(moving_url)
            self.launched = True
        except:
            why = traceback.format_exc()
            QMessageBox.critical(None, "Error during execution", why)
        finally:
            clear_status_bar_message()
            QGuiApplication.restoreOverrideCursor()
def main(args=sys.argv[1:]):
    opts = parse_args(args)
    neuroglancer.set_static_content_source(url=opts.static_content_source)
    neuroglancer.set_server_bind_address(opts.bind_address, opts.port)
    viewer_pair = ViewerPair(opts.fixed_url, opts.moving_url,
                             opts.filtered_pts)
    while True:
        time.sleep(10)
예제 #5
0
def main():
    app = QtWidgets.QApplication(sys.argv)
    args = parse_args()
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(
            url=args.static_content_source)
    neuroglancer.set_server_bind_address(
    args.bind_address, bind_port=args.port)
    viewer = neuroglancer.Viewer()
    print("Neuroglancer URL: %s" % str(viewer))
    window = ApplicationWindow(viewer)
    window.show()
    sys.exit(app.exec())
    def __init__(self):
        # Address
        neuroglancer.set_server_bind_address('127.0.0.1')
        neuroglancer.set_static_content_source(url='http://localhost:8080')

        # Data
        img = tifffile.imread('sample.tif')
        img = img * 10 / 256
        img = img.astype('uint8')
        img = np.transpose(img, (1, 0, 2, 3))
        self.img = img

        # Same viewer every function call
        viewer = self.viewer = neuroglancer.Viewer()
예제 #7
0
def main():
    app = QtWidgets.QApplication(sys.argv)
    args = parse_args()
    if args.static_content_source is not None:
        print(
            "Please do not use --static-content-source."
            " It's no longer necessary and is disabled.",
            file=sys.stderr)
    neuroglancer.set_server_bind_address(args.bind_address,
                                         bind_port=args.port)
    viewer = neuroglancer.Viewer()
    print("Neuroglancer URL: %s" % str(viewer))
    window = ApplicationWindow(viewer)
    window.show()
    sys.exit(app.exec())
예제 #8
0
def create_neuroglancer_viewer(model:Model) -> neuroglancer.Viewer:
    """
    Create a viewer for a Neuroglancer instance

    :param model: has the details for the static Neuroglancer elements
    :return: a Neuroglancer viewer that can be used to display volumes
    """
    if not model.neuroglancer_initialized.get():
        neuroglancer.set_static_content_source(
            url=model.static_content_source.get())
        neuroglancer.set_server_bind_address(
            model.bind_address.get(),
            model.port_number.get())
        model.neuroglancer_initialized.set(True)
    return neuroglancer.Viewer()
예제 #9
0
    def __call__(self, datas: dict, selected: str = None):
        """
        Parameters:
        chunks: multiple chunks
        """
        if selected is None:
            selected = datas.keys()
        elif isinstance(selected, str):
            selected = selected.split(',')

        # ng.set_static_content_source(
        #     url='https://neuromancer-seung-import.appspot.com')
        ng.set_server_bind_address(bind_address='0.0.0.0', bind_port=self.port)
        viewer = ng.Viewer()
        with viewer.txn() as viewer_state:
            for name in selected:
                data = datas[name]
                if data is None:
                    continue
                elif isinstance(data, Synapses):
                    # this could be synapses
                    self._append_synapse_annotation_layer(
                        viewer_state, name, data)
                elif isinstance(
                        data,
                        np.ndarray) and 2 == data.ndim and 3 == data.shape[1]:
                    # points
                    self._append_point_annotation_layer(
                        viewer_state, name, data)
                elif data.is_image or (data.ndim == 3 and np.issubdtype(
                        data.dtype, np.floating)):
                    self._append_image_layer(viewer_state, name, data)
                elif data.is_segmentation:
                    self._append_segmentation_layer(viewer_state, name, data)
                elif data.is_probability_map:
                    self._append_probability_map_layer(viewer_state, name,
                                                       data)
                else:
                    breakpoint()
                    raise ValueError(f'do not support this type: {type(data)}')

        print('Open this url in browser: ')
        viewer_url = viewer.get_viewer_url()
        print(viewer_url)

        key = None
        while key != 'q':
            key = input('Press q and enter/return to quit neuroglancer.')
def run_interactive(args, graph):
    # Make splitter a global variable so that it is accessible from the
    # interactive `python -i` shell.
    global splitter

    if args.bind_address:
        neuroglancer.set_server_bind_address(args.bind_address)
    if args.static_content_url:
        neuroglancer.set_static_content_source(url=args.static_content_url)

    splitter = InteractiveSplitter(graph,
                                   agglo_id=args.agglo_id,
                                   image_url=args.image_url,
                                   segmentation_url=args.segmentation_url,
                                   state_path=args.state)
    print(splitter.viewer)
예제 #11
0
    def __init__(self,
                 ip='localhost',
                 port=98100,
                 res=[6, 6, 30],
                 label_dtype=np.uint16):
        super(NeuroG, self).__init__()

        self.port = port
        self.ip = ip
        self.res = res

        neuroglancer.set_server_bind_address(bind_address=self.ip,
                                             bind_port=self.port)
        self.viewer = neuroglancer.Viewer()

        self.label_dtype = label_dtype
예제 #12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("files_and_colors",
                        nargs="+",
                        help="File name followed by display name followed by"
                        "\"red\", \"green\", \"blue\" or \"gray\".")
    parser.add_argument("--segmentation",
                        default=None,
                        help="Segmentation volume to display")
    parser.add_argument("--ip-address",
                        default="127.0.0.1",
                        help="IP address of neuroglancer server.")
    parser.add_argument("--port",
                        default=0,
                        type=int,
                        help="Port # of neuroglancer server.")
    parser.add_argument("--static-content-source",
                        default=None,
                        help="The URL of the static content source, e.g. "
                        "http://localhost:8080 if being served via npm.")
    args = parser.parse_args()
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(url=args.static_content_source)
    neuroglancer.set_server_bind_address(args.ip_address, args.port)
    viewer = neuroglancer.Viewer()
    with viewer.txn() as txn:
        for filename, name, colorname in zip(args.files_and_colors[::3],
                                             args.files_and_colors[1::3],
                                             args.files_and_colors[2::3]):
            img = tifffile.imread(filename).astype(np.float32)
            if colorname.lower() == "red":
                shader = red_shader
            elif colorname.lower() == "green":
                shader = green_shader
            elif colorname.lower() == "blue":
                shader = blue_shader
            else:
                shader = gray_shader
            layer(txn, name, img, shader, 1.0)
        if args.segmentation != None:
            seg = tifffile.imread(args.segmentation).astype(np.uint32)
            seglayer(txn, "segmentation", seg)

    print(viewer.get_viewer_url())
    webbrowser.open(viewer.get_viewer_url())
    while True:
        time.sleep(5)
def run_interactive(args, graph):
    # Make splitter a global variable so that it is accessible from the
    # interactive `python -i` shell.
    global splitter

    if args.bind_address:
        neuroglancer.set_server_bind_address(args.bind_address)
    if args.static_content_url:
        neuroglancer.set_static_content_source(url=args.static_content_url)

    splitter = InteractiveSplitter(
        graph,
        agglo_id=args.agglo_id,
        image_url=args.image_url,
        segmentation_url=args.segmentation_url,
        state_path=args.state)
    print(splitter.viewer)
예제 #14
0
파일: align.py 프로젝트: richardqiu/nuggt
def main():
    logging.basicConfig(level=logging.INFO)
    args = parse_args()
    if args.ip_address is not None and args.port is not None:
        neuroglancer.set_server_bind_address(bind_address=args.ip_address,
                                             bind_port=args.port)
    elif args.ip_address is not None:
        neuroglancer.set_server_bind_address(bind_address=args.ip_address)
    elif args.port is not None:
        neuroglancer.set_server_bind_address(bind_port=args.port)
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(url=args.static_content_source)
    reference_voxel_size = \
        [float(_)*1000 for _ in args.reference_voxel_size.split(",")]
    moving_voxel_size = \
        [float(_)*1000 for _ in args.moving_voxel_size.split(",")]
    logging.info("Reading reference image")
    reference_image = tifffile.imread(args.reference_image).astype(np.float32)
    logging.info("Reading moving image")
    moving_image = tifffile.imread(args.moving_image).astype(np.float32)
    if args.segmentation is not None:
        logging.info("Reading segmentation")
        segmentation = tifffile.imread(args.segmentation).astype(np.uint32)
    else:
        segmentation = None
    vp = ViewerPair(reference_image, moving_image, segmentation, args.points,
                    reference_voxel_size, moving_voxel_size)
    if not args.no_launch:
        vp.launch_viewers()
    vp.print_viewers()
    while True:
        time.sleep(10)
예제 #15
0
def main():
    global output_file_name
    global synapses
    global viewer
    args = parse_args()
    neuroglancer.set_static_content_source(url=args.static_content_source)
    neuroglancer.set_server_bind_address(bind_port=int(args.port))
    image_url = args.image_url
    segmentation_url = args.segmentation_url
    output_file_name = args.output

    synapse_dict = json.load(open(args.synapses))

    for n1, n2, x, y, z in zip(synapse_dict["neuron_1"],
                               synapse_dict["neuron_2"],
                               synapse_dict["synapse_center"]["x"],
                               synapse_dict["synapse_center"]["y"],
                               synapse_dict["synapse_center"]["z"]):
        synapses.append(Synapse(n1, n2, x, y, z))

    viewer = neuroglancer.Viewer()
    with viewer.txn() as s:
        s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
        s.layers['segmentation'] = neuroglancer.SegmentationLayer(
            source=segmentation_url)
    viewer.actions.add("yes", lambda _: yes())
    viewer.actions.add("no", lambda _: no())
    viewer.actions.add("skip", lambda _: skip())
    viewer.actions.add("back", lambda _: back())
    viewer.actions.add("revert", lambda _: set_viewer_state())
    with viewer.config_state.txn() as s:
        s.input_event_bindings.viewer['shift+keyy'] = 'yes'
        s.input_event_bindings.viewer["shift+keyn"] = "no"
        s.input_event_bindings.viewer["shift+keys"] = "skip"
        s.input_event_bindings.viewer["shift+keyr"] = "revert"
        s.input_event_bindings.viewer["shift+keyb"] = "back"
    set_viewer_state()
    webbrowser.open_new(viewer.get_viewer_url())
    while True:
        time.sleep(1)
예제 #16
0
    def __init__(self, stack='MD585'):
        self.stack = stack
        self.local_volume_fp_root = './'

        neuroglancer.set_server_bind_address('0.0.0.0')
        global_server_args['bind_port'] = 8099

        # Create viewer
        self.viewer = neuroglancer.Viewer()

        # Get the IP address
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        s.connect(("8.8.8.8", 80))
        print('s.getsockname', s.getsockname())
        ip_name = s.getsockname()[0]
        s.close()
        #ip_name = '127.0.0.1'

        # Removes the following symbols: ', ", [, ]
        self.url = str(
            'http://' + ip_name + ':' +
            self.viewer.get_viewer_url().split(':')[2])  ##Remote URL
예제 #17
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('precomputed',
                        default=None,
                        help='relative path in the file server')
    parser.add_argument('--server_port', type=int, default=41000)
    parser.add_argument('--client_port', type=int, default=42000)
    args = parser.parse_args()

    neuroglancer.set_server_bind_address(bind_address='127.0.0.1',
                                         bind_port=args.client_port)
    viewer = neuroglancer.Viewer()

    def dummy():
        print('hello callback')

    # viewer.defer_callback(dummy)
    image_path = os.path.join(args.precomputed, 'image')
    labels_path = os.path.join(args.precomputed, 'labels')

    #subprocess.call('http-server --cors -p '+str(args.server_port), shell=False)
    #os.system('http-server --headless -macro '+self.ijm_file)
    url = glance_precomputed(viewer=viewer,
                             image=image_path,
                             labels=labels_path,
                             port=args.server_port)
    print(url)
    webbrowser.open_new_tab(url)

    def signal_handler(signal, frame):
        print('You pressed Ctrl+C!')
        sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)
    print('Press Ctrl+C')
    signal.pause()
예제 #18
0
def main():
    logging.basicConfig(level=logging.INFO)
    args = parse_args()
    if args.ip_address is not None and args.port is not None:
        neuroglancer.set_server_bind_address(bind_address=args.ip_address,
                                             bind_port=args.port)
    elif args.ip_address is not None:
        neuroglancer.set_server_bind_address(bind_address=args.ip_address)
    elif args.port is not None:
        neuroglancer.set_server_bind_address(bind_port=args.port)
    if args.static_content_source is not None:
        logging.warning("--static-content-source no longer has any effect")
        logging.warning("You can omit it if you want.")
    reference_voxel_size = \
        [float(_)*1 for _ in args.reference_voxel_size.split(",")]
    moving_voxel_size = \
        [float(_)*1 for _ in args.moving_voxel_size.split(",")]
    logging.info("Reading reference image")
    reference_image = tifffile.imread(args.reference_image).astype(np.float32)
    logging.info("Reading moving image")
    moving_image = tifffile.imread(args.moving_image).astype(np.float32)
    if args.segmentation is not None:
        logging.info("Reading segmentation")
        segmentation = tifffile.imread(args.segmentation).astype(np.uint32)
    else:
        segmentation = None
    vp = ViewerPair(reference_image,
                    moving_image,
                    segmentation,
                    args.points,
                    reference_voxel_size,
                    moving_voxel_size,
                    n_workers=args.n_workers)
    if not args.no_launch:
        vp.launch_viewers()
    vp.print_viewers()
    print("Hit ctrl+D to exit")
    while len(sys.stdin.read(1)) > 0:
        time.sleep(.1)
예제 #19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("files_and_colors", nargs="+",
                        help="File name followed by display name followed by"
                        "\"red\", \"green\", \"blue\", \"gray\", "
                        "\"jet\" or \"cubehelix\".")
    parser.add_argument("--segmentation",
                        default=None,
                        help="Segmentation volume to display")
    parser.add_argument("--ip-address",
                        default="127.0.0.1",
                        help="IP address of neuroglancer server.")
    parser.add_argument("--port",
                        default=0,
                        type=int,
                        help="Port # of neuroglancer server.")
    parser.add_argument("--static-content-source",
                        default=None,
                        help="The URL of the static content source, e.g. "
                        "http://localhost:8080 if being served via npm.")
    parser.add_argument("--points",
                        help="A points file in X, Y, Z order to display")
    parser.add_argument("--show-n",
                        type=int,
                        help="Show only a certain number of randomly selected "
                        "points.")
    args = parser.parse_args()
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(url=args.static_content_source)
    neuroglancer.set_server_bind_address(args.ip_address, args.port)
    viewer = neuroglancer.Viewer()
    with viewer.txn() as txn:
        for filename, name, colorname in zip(args.files_and_colors[::3],
                                             args.files_and_colors[1::3],
                                             args.files_and_colors[2::3]):
            if colorname.lower() == "red":
                shader = red_shader
            elif colorname.lower() == "green":
                shader = green_shader
            elif colorname.lower() == "blue":
                shader = blue_shader
            elif colorname.lower() in ("gray", "grey"):
                shader = gray_shader
            elif colorname.lower() == "jet":
                shader = jet_shader
            else:
                shader = cubehelix_shader
            if filename.startswith("precomputed://"):
                txn.layers[name] = neuroglancer.ImageLayer(
                    source = filename,
                    shader = shader % 1.0
                )
                continue
            paths = sorted(glob.glob(filename))
            if len(paths) == 0:
                sys.stderr.write("Could not find any files named %s" % filename)
                exit(1)
            elif len(paths) == 1:
                img = tifffile.imread(paths[0]).astype(np.float32)
            else:
                img = np.array([tifffile.imread(_) for _ in paths], np.float32)
            layer(txn, name, img, shader, 1.0)
        if args.segmentation != None:
            seg = tifffile.imread(args.segmentation).astype(np.uint32)
            seglayer(txn, "segmentation", seg)
        if args.points is not None:
            with open(args.points) as fd:
                points = np.array(json.load(fd))
                if args.show_n is not None:
                    points = points[np.random.choice(len(points), args.show_n)]
                pointlayer(txn, "points",
                           points[:, 0], points[:, 1], points[:, 2], "red")

    print(viewer.get_viewer_url())
    webbrowser.open(viewer.get_viewer_url())
    while True:
        time.sleep(5)
예제 #20
0
def main():
    app = QtWidgets.QApplication(sys.argv)
    args = parse_args()
    patches_xy, patches_xz, patches_yz = [[] for _ in range(3)]
    for i, patch_file in enumerate(args.patch_file):
        with h5py.File(patch_file, "r") as fd:
            if len(patches_xy) > 0:
                if len(fd["x"]) != len(x) or \
                   not np.all(fd["x"][:] == x) or\
                   not np.all(fd["y"][:] == y) or\
                   not np.all(fd["z"][:] == z):
                    raise ValueError("The patch files need to be constructed "
                                     "using the same set of blob coordinates")
                if fd["patches_xy"].shape != patches_xy[0].shape:
                    raise ValueError("Patch sizes need to be the same")
            else:
                x = fd["x"][:]
                y = fd["y"][:]
                z = fd["z"][:]
            patches_xy.append(fd["patches_xy"][:])
            patches_xz.append(fd["patches_xz"][:])
            patches_yz.append(fd["patches_yz"][:])
    if args.neuroglancer is None or len(args.neuroglancer) == 0:
        viewer = None
        image_names = None
        multiplier = None
        colors = []
    else:
        import neuroglancer
        import webbrowser
        if args.static_content_source is not None:
            print("--static-content-source is no longer used", file=sys.stderr)
        neuroglancer.set_server_bind_address(args.bind_address,
                                             bind_port=args.port)
        viewer = neuroglancer.Viewer()
        print("Neuroglancer URL: %s" % str(viewer))
        image_names = args.image_name or []
        while len(image_names) < len(args.neuroglancer):
            image_names.append("image_%d" % (len(image_names) + 1))
        colors = args.color or []
        ckeys = list(COLORS.keys())
        while len(colors) < len(args.neuroglancer):
            for color in ckeys:
                if color not in colors:
                    colors.append(color)
                    break
            else:
                colors.append(ckeys[len(colors) % len(COLORS)])
        multiplier = args.multiplier or []
        while len(multiplier) < len(args.neuroglancer):
            multiplier.append(1.0)

        with viewer.txn() as txn:
            for i in range(len(args.neuroglancer)):
                layer(txn,
                      image_names[i],
                      args.neuroglancer[i],
                      shader=COLORS[colors[i]],
                      multiplier=multiplier[i])

        webbrowser.open_new(viewer.get_viewer_url())
    window = ApplicationWindow(patches_xy, patches_xz, patches_yz, x, y, z,
                               args.n_components, args.use_position,
                               args.whiten, args.max_samples, args.n_jobs,
                               args.output, viewer, args.neuroglancer,
                               image_names, multiplier,
                               [COLORS[_] for _ in colors])
    window.setWindowTitle("Train")
    window.show()
    sys.exit(app.exec())
예제 #21
0
import argparse
import numpy as np

import neuroglancer

ap = argparse.ArgumentParser()
ap.add_argument(
    '-a',
    '--bind-address',
    help='Bind address for Python web server.  Use 127.0.0.1 (the default) to restrict access '
    'to browers running on the local machine, use 0.0.0.0 to permit access from remote browsers.')
ap.add_argument(
    '--static-content-url', help='Obtain the Neuroglancer client code from the specified URL.')
args = ap.parse_args()
if args.bind_address:
    neuroglancer.set_server_bind_address(args.bind_address)
if args.static_content_url:
    neuroglancer.set_static_content_source(url=args.static_content_url)

a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
ix, iy, iz = np.meshgrid(* [np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

b = np.cast[np.uint32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
b = np.pad(b, 1, 'constant')

viewer = neuroglancer.Viewer()
with viewer.txn() as s:
    s.voxel_size = [10, 10, 10]
예제 #22
0
#%%
import sys
sys.path.append('/home/zhw272/programming/pipeline_utility')
import neuroglancer
import imageio
from notebooks.Will.toolbox.IOs.get_path import get_subpath_to_tif_files
ip='localhost' # or public IP of the machine for sharable display
port=98092 # change to an unused port number
neuroglancer.set_server_bind_address(bind_address=ip,bind_port=port)
viewer=neuroglancer.Viewer()
import matplotlib.pyplot as plt
# %%
# SNEMI
import numpy as np
from imageio import imread
import glob
import os

def folder2Vol(D0,dt=np.uint16,max_number_of_file_to_read=-1,down_sample_ratio=[1,1,1],file_list=None):
    if file_list is None:
        file_list = sorted(glob.glob(D0+'/*.tif'))
    number_of_files = len(file_list)
    if max_number_of_file_to_read>0:
        number_of_files = min(number_of_files,max_number_of_file_to_read)
    number_of_files = number_of_files//down_sample_ratio[0]
    image_resolution = np.array(imread(file_list[0]).shape)[:2]//down_sample_ratio[1:]

    image_stack = np.zeros((number_of_files,image_resolution[0],image_resolution[1]), dtype=dt)
    section = 0
    for filei in range(number_of_files):
        print(f'loading section {section}')
예제 #23
0
"""
structure = '3N_R'
str_contour, first_sec, last_sec = image_contour_generator(stack, detector_id, structure, use_local_alignment=True,
                                                           image_prep=2, threshold=0.2)
print(str_contour, first_sec, last_sec)


ng_structure_volume_normal = add_structure_to_neuroglancer(viewer, str_contour, structure, stack, first_sec, last_sec, \
                                                           color_radius=5, xy_ng_resolution_um=10, threshold=0.2,
                                                           color=5, \
                                                           solid_volume=False, no_offset_big_volume=False,
                                                           save_results=False, \
                                                           return_with_offsets=False, add_to_ng=True,
                                                           human_annotation=False)
"""
neuroglancer.set_server_bind_address(bind_port='33645')
viewer = neuroglancer.Viewer()

# Sets 'Image' layer to be prep2 images from S3 of <stack>
with viewer.txn() as s:
    s.layers['image'] = neuroglancer.ImageLayer(
        source=
        'precomputed://https://mousebrainatlas-datajoint-jp2k.s3.amazonaws.com/precomputed/'
        + stack + '_fullres')
    s.layout = 'xy'  # '3d'/'4panel'/'xy'
print(viewer)

# CREATE ENTIRE BRAIN VOLUME
xy_ng_resolution_um = 5

structure_filepath = os.path.join(DIR, 'neuroglancer/contours/json_cache',
예제 #24
0
def main():
    global viewer

    parser = argparse.ArgumentParser(description="NeUroGlancer Ground Truth")
    parser.add_argument("--port",
                        type=int,
                        help="HTTP port for server",
                        default=0)
    parser.add_argument("--image", help="Path to image file", required=True)
    parser.add_argument("--alt-image", help="Path to a second image channel")
    parser.add_argument("--output",
                        help="Path to list of point annotations",
                        required=True)
    parser.add_argument("--detected",
                        help="Path to list of detected point annotations",
                        required=False)
    parser.add_argument("--min-distance",
                        help="Minimum distance between two annotations",
                        type=int,
                        default=10)
    parser.add_argument("--segmentation",
                        help="Path to existing segmentation file (optional)")
    parser.add_argument("--coordinates",
                        help="Coordinates of the image volume to edit"
                        "(x0,x1,y0,y1,z0,z1). "
                        "Example: \"1000,2000,4000,5000,300,500\" to edit"
                        "x=1000 to 2000, y=4000 to 5000, z=300 to 500.")
    parser.add_argument("--box-coordinates",
                        help="Coordinates of the bounding box to display. "
                        "Example: \"1000,2000,4000,5000,300,500\" to edit "
                        "x=1000 to 2000, y=4000 to 5000, z=300 to 500",
                        default=None)
    parser.add_argument("--bind-address",
                        help="The IP address to bind to as a webserver. "
                        "The default is 127.0.0.1 which is constrained to "
                        "the local machine.",
                        default="127.0.0.1")
    parser.add_argument("--static-content-source",
                        default=None,
                        help="The URL of the static content source, e.g. "
                        "http://localhost:8080 if being served via npm.")
    parser.add_argument("--reference-image",
                        help="An image of a reference volume to be used "
                        "for navigation.")
    parser.add_argument("--reference-segmentation",
                        help="A segmentation image of the reference volume "
                        "to be used for navigation.")
    parser.add_argument("--reference-points",
                        help="The point annotations, transformed into the "
                        "reference space.")
    parser.add_argument("--point-correspondence-file",
                        help="A .json file containing arrays of moving "
                        "and reference points to be used to warp the reference"
                        " frame into the moving frame.")
    parser.add_argument("--repositioning-log-file",
                        default=None,
                        help="This file saves a record of the coordinates "
                        "of each repositioning in order to keep track of "
                        "which areas have been visited.")
    parser.add_argument("--multiplier",
                        default=1.0,
                        type=float,
                        help="Multiplier for the image. Higher = brighter.")
    parser.add_argument("--alt-multiplier",
                        default=1.0,
                        type=float,
                        help="Multiplier for the alternate image. "
                        "Higher=brighter")
    args = parser.parse_args()
    neuroglancer.set_server_bind_address(args.bind_address,
                                         bind_port=args.port)
    if args.box_coordinates is None:
        box_coordinates = None
    else:
        box_coordinates = list(map(int, args.box_coordinates.split(",")))
        box_coordinates = [box_coordinates[::2], box_coordinates[1::2]]
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(url=args.static_content_source)

    if args.coordinates:
        x0, x1, y0, y1, z0, z1 = list(map(int, args.coordinates.split(",")))
        viewer = NuggtViewer(args.image,
                             args.alt_image,
                             args.segmentation,
                             args.output,
                             args.detected,
                             x0,
                             x1,
                             y0,
                             y1,
                             z0,
                             z1,
                             min_distance=args.min_distance,
                             multiplier=args.multiplier,
                             alt_multiplier=args.alt_multiplier,
                             box_coords=box_coordinates)
    else:
        viewer = NuggtViewer(args.image,
                             args.alt_image,
                             args.segmentation,
                             args.output,
                             args.detected,
                             min_distance=args.min_distance,
                             multiplier=args.multiplier,
                             alt_multiplier=args.alt_multiplier,
                             box_coords=box_coordinates)

    print("Editing viewer: %s" % viewer.viewer.get_viewer_url())
    webbrowser.open_new(viewer.viewer.get_viewer_url())
    if args.reference_image is not None:
        ref_img = tifffile.imread(args.reference_image).astype(np.float32)
        ref_seg = tifffile.imread(args.reference_segmentation).astype(
            np.uint16)
        with open(args.point_correspondence_file) as fd:
            d = json.load(fd)
        moving = np.array(d["moving"])
        reference = np.array(d["reference"])
        nav_viewer = NavViewer(ref_img, ref_seg, moving, reference,
                               viewer.shape)
        nav_viewer.bind()
        if args.repositioning_log_file is not None:
            nav_viewer.repositioning_log_file = args.repositioning_log_file
        sample = np.random.permutation(len(viewer.points))[:10000]
        if args.reference_points is not None:
            with open(args.reference_points) as fd:
                rp = np.array(json.load(fd))
                pts = rp[sample, ::-1]
        else:
            pts = viewer.points[sample, ::-1]
        nav_viewer.add_points(pts)
        print("Navigating viewer: %s" % nav_viewer.viewer.get_viewer_url())
    print("Hit control-c to exit")
    while True:
        time.sleep(10)
예제 #25
0
def vis_points_with_array(raw: np.ndarray, points: nx.DiGraph,
                          voxel_size: np.ndarray):
    ngid = itertools.count(start=1)

    neuroglancer.set_server_bind_address("0.0.0.0")
    viewer = neuroglancer.Viewer()

    nodes = []
    edges = []

    for node_a, node_b in points.edges:
        a = points.nodes[node_a]["location"][::-1]
        b = points.nodes[node_b]["location"][::-1]

        pos_u = a
        pos_v = b

        nodes.append(
            neuroglancer.EllipsoidAnnotation(center=pos_u,
                                             radii=(3, 3, 3) / voxel_size,
                                             id=next(ngid)))
        edges.append(
            neuroglancer.LineAnnotation(point_a=pos_u,
                                        point_b=pos_v,
                                        id=next(ngid)))
    nodes.append(
        neuroglancer.EllipsoidAnnotation(center=pos_v,
                                         radii=(1, 1, 1) / voxel_size,
                                         id=next(ngid)))

    print(raw.shape)

    max_raw = np.max(raw)
    min_raw = np.min(raw)
    diff_raw = max_raw - min_raw

    try:
        raw = ((raw - min_raw) / float(diff_raw) * 255).astype("uint8")
    except Exception as e:
        print(min_raw, max_raw)
        raise e

    with viewer.txn() as s:
        s.layers["raw"] = neuroglancer.ImageLayer(
            source=neuroglancer.LocalVolume(data=raw.transpose([2, 1, 0]),
                                            voxel_size=voxel_size))
        s.layers["edges"] = neuroglancer.AnnotationLayer(
            voxel_size=voxel_size,
            filter_by_segmentation=False,
            annotation_color="#add8e6",
            annotations=edges,
        )
        s.layers["nodes"] = neuroglancer.AnnotationLayer(
            voxel_size=voxel_size,
            filter_by_segmentation=False,
            annotation_color="#ff00ff",
            annotations=nodes,
        )
        position = np.array(raw.shape) // 2
        s.navigation.position.voxelCoordinates = tuple(position)
    print(viewer)

    input("done?")
예제 #26
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--bind-address",
                        help="The IP address to bind to as a webserver. "
                        "The default is 127.0.0.1 which is constrained to "
                        "the local machine.",
                        default="127.0.0.1")
    parser.add_argument("--port",
                        type=int,
                        help="HTTP port for server",
                        default=0)
    parser.add_argument("--gray-image",
                        help="Path to an image file to be displayed in gray.")
    parser.add_argument("--gray-image-name",
                        help="The name of the gray image",
                        default="image")
    parser.add_argument("--red-image",
                        help="Path to an image file to be displayed in red.")
    parser.add_argument("--red-image-name",
                        help="The name of the red image",
                        default="red")
    parser.add_argument("--green-image",
                        help="Path to an image file to be displayed in green.")
    parser.add_argument("--green-image-name",
                        help="The name of the green image",
                        default="green")
    parser.add_argument("--blue-image",
                        help="Path to an image file to be displayed in blue.")
    parser.add_argument("--blue-image-name",
                        help="The name of the blue image",
                        default="blue")
    parser.add_argument("--input-coordinates",
                        help="A JSON file of input coordinates as a list of "
                        "three-tuples in X, Y, Z order",
                        required=True)
    parser.add_argument("--yea-coordinates",
                        help="The name of a JSON file to be written with "
                        "the coordinates of \"yea\" points.")
    parser.add_argument("--nay-coordinates",
                        help="The name of a JSON file to be written with the "
                        "coordinates of \"nay\" points.")
    parser.add_argument("--no-browser",
                        help="Do not launch the browser",
                        action="store_true")
    parser.add_argument("--new-browser-window",
                        help="Open Neuroglancer in a new browser window.",
                        action="store_true")
    parser.add_argument("--static-content-source",
                        default=None,
                        help="The URL of the static content source, e.g. "
                        "http://localhost:8080 if being served via npm.")

    args = parser.parse_args()
    if args.static_content_source != None:
        neuroglancer.set_static_content_source(url=args.static_content_source)
    neuroglancer.set_server_bind_address(args.bind_address,
                                         bind_port=args.port)

    with open(args.input_coordinates) as fd:
        points = np.array(json.load(fd))

    imgs = []
    for path, name, shader in ((args.gray_image, args.gray_image_name,
                                gray_shader),
                               (args.red_image, args.red_image_name,
                                red_shader),
                               (args.green_image, args.green_image_name,
                                green_shader), (args.blue_image,
                                                args.blue_image_name,
                                                blue_shader)):
        if path is not None:
            img = tifffile.imread(path)
            imgs.append((img, name, shader))

    def save_cb(yea, nay):
        if args.yea_coordinates is not None:
            yea = yea.astype(points.dtype)
            with open(args.yea_coordinates, "w") as fd:
                json.dump(yea.tolist(), fd)
        if args.nay_coordinates is not None:
            nay = nay.astype(points.dtype)
            with open(args.nay_coordinates, "w") as fd:
                json.dump(nay.tolist(), fd)

    if args.no_browser:
        yea, nay = sort_points(imgs, points, save_cb=save_cb)
    elif args.new_browser_window:
        yea, nay = sort_points(imgs, points, launch_ui="new", save_cb=save_cb)
    else:
        yea, nay = sort_points(imgs, points, launch_ui=True, save_cb=save_cb)
    save_cb(yea, nay)
예제 #27
0
    .apply(lambda x: x.replace(',,', ',')).apply(lambda x: x.replace(',,', ','))
hand_annotations['vertices'] = hand_annotations['vertices'].apply(lambda x: ast.literal_eval(x))

hand_annotations.to_csv('hand_annotations.csv')
"""
csvfile = os.path.join(DIR, 'contours', 'hand_annotations.csv')
hand_annotations = pd.read_csv(csvfile)
hand_annotations['vertices'] = hand_annotations['vertices'].apply(
    lambda x: ast.literal_eval(x))

#annotation = np.load(filepath, allow_pickle = True, encoding='latin1')
#contours = pd.DataFrame(annotation)
#hand_annotations = contours.rename(columns={0:"name", 1:"section", 2:"vertices"})
#unique_values = hand_annotations.groupby(['name']).size()
print(hand_annotations.head())
#sys.exit()
str_contours_annotation, first_sec, last_sec = get_contours_from_annotations(
    animal, target_structure, hand_annotations, densify=0)
color = 2
neuroglancer.set_server_bind_address('0.0.0.0')
viewer = neuroglancer.Viewer()
print(viewer)

ng_structure_volume = add_structure_to_neuroglancer( viewer, str_contours_annotation, target_structure, animal, first_sec, last_sec, \
                                                    color_radius=2, xy_ng_resolution_um=5, threshold=1, color=color, \
                                                    solid_volume=False, no_offset_big_volume=True, save_results=False, \
                                                    return_with_offsets=False, add_to_ng=True, human_annotation=True )

print('ng_structure_volume shape', ng_structure_volume.shape)
print('ng_structure_volume max', np.amax(ng_structure_volume))
예제 #28
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("files_and_colors",
                        nargs="+",
                        help="File name followed by display name followed by"
                        "\"red\", \"green\", \"blue\", \"gray\", "
                        "\"jet\" or \"cubehelix\".")
    parser.add_argument("--segmentation",
                        default=None,
                        help="Segmentation volume to display")
    parser.add_argument("--ip-address",
                        default="127.0.0.1",
                        help="IP address of neuroglancer server.")
    parser.add_argument("--port",
                        default=0,
                        type=int,
                        help="Port # of neuroglancer server.")
    parser.add_argument("--static-content-source",
                        default=None,
                        help="Obsolete - no longer has any effect")
    parser.add_argument("--points",
                        help="A points file in Z, Y, X order to display")
    parser.add_argument("--show-n",
                        type=int,
                        help="Show only a certain number of randomly selected "
                        "points.")
    args = parser.parse_args()
    if args.static_content_source is not None:
        print("Warning - --static-content-source no longer has any effect",
              file=sys.stderr)
        print("          You can safely omit this from your command line",
              file=sys.stderr)
    neuroglancer.set_server_bind_address(args.ip_address, args.port)

    # Define default dimensions for the viewer
    dim_names = ["xyzct"[d] for d in range(3)]
    dim_units = ["µm"] * 3
    dim_scales = [1.0] * 3

    default_dimensions = neuroglancer.CoordinateSpace(names=dim_names,
                                                      units=dim_units,
                                                      scales=dim_scales)

    viewer = neuroglancer.Viewer()
    with viewer.txn() as txn:
        txn.dimensions = default_dimensions
        for filename, name, colorname in zip(args.files_and_colors[::3],
                                             args.files_and_colors[1::3],
                                             args.files_and_colors[2::3]):
            if colorname.lower() == "red":
                shader = red_shader
            elif colorname.lower() == "green":
                shader = green_shader
            elif colorname.lower() == "blue":
                shader = blue_shader
            elif colorname.lower() in ("gray", "grey"):
                shader = gray_shader
            elif colorname.lower() == "jet":
                shader = jet_shader
            else:
                shader = cubehelix_shader
            if filename.startswith("precomputed://"):
                txn.layers[name] = neuroglancer.ImageLayer(source=filename,
                                                           shader=shader % 1.0)
                continue
            paths = sorted(glob.glob(filename))
            if len(paths) == 0:
                sys.stderr.write("Could not find any files named %s" %
                                 filename)
                exit(1)
            elif len(paths) == 1:
                img = tifffile.imread(paths[0])
            else:
                img = np.array([tifffile.imread(_) for _ in paths])
            layer(txn, name, img, shader, 1.0, dimensions=default_dimensions)
        if args.segmentation != None:
            seg = tifffile.imread(args.segmentation).astype(np.uint32)
            seglayer(txn, "segmentation", seg)
        if args.points is not None:
            with open(args.points) as fd:
                points = np.array(json.load(fd))
                if args.show_n is not None:
                    points = points[np.random.choice(len(points), args.show_n)]
                pointlayer(txn, "points", points[:, 0], points[:, 1],
                           points[:, 2], "red")

    print(viewer.get_viewer_url())
    #webbrowser.open(viewer.get_viewer_url())
    while True:
        time.sleep(5)
예제 #29
0
## basic shim to load up neuroglancer in a browser:
import neuroglancer
import logging
from time import sleep
import progproxy as pp
import redis

logging.basicConfig(level=logging.DEBUG)
# we are currently using the seunglab hosted neuroglancer static resources
# ideally this would be self hosted for local development against nglancer
logging.info("configuring neuroglancer defaults")
neuroglancer.set_static_content_source(
    url="https://neuromancer-seung-import.appspot.com")
## neuroglancer setup segment:
## set the tornado server that is launched to talk on all ips and at port 8080
neuroglancer.set_server_bind_address("0.0.0.0", "8080")

neuroglancer.debug = True
neuroglancer.server.debug = True

logging.info("starting viewer subprocess")
# setup a viewer with pre-configured defaults and launch.
viewer = neuroglancer.Viewer()

logging.info("viewer token: {}".format(viewer.token))

logging.info("setting viewers default volume")
# load data from cloudvolume container:
with viewer.txn() as s:
    s.layers["segmentation"] = neuroglancer.SegmentationLayer(
        source="precomputed://http://localhost/testcv/")
예제 #30
0
def init_ngserver(bind='0.0.0.0', port=8080):
    neuroglancer.set_server_bind_address('0.0.0.0', port)
예제 #31
0
    units='nm',
    scales=[10, 10, 10],
)
with viewer.txn() as s:
    s.layers.append(
        name='a',
        layer=neuroglancer.SegmentationLayer(
            source=[
                neuroglancer.LocalVolume(
                    data=segmentation,
                    dimensions=dimensions,
                ),
                SkeletonSource(dimensions),
            ],
            skeleton_shader='void main() { emitRGB(colormapJet(affinity)); }',
            selected_alpha=0,
            not_selected_alpha=0,
        ))

if __name__ == '__main__':
    ap = argparse.ArgumentParser()
    ap.add_argument('--static-content-url')
    ap.add_argument('-a', '--bind-address')
    args = ap.parse_args()
    neuroglancer.server.debug = True
    if args.bind_address:
        neuroglancer.set_server_bind_address(args.bind_address)
    if args.static_content_url:
        neuroglancer.set_static_content_source(url=args.static_content_url)
    print(viewer)
예제 #32
0
def set_server_bind_adress(neuroglancer, address="localhost", port=9000):
    neuroglancer.set_server_bind_address(address, port)