예제 #1
0
파일: cli.py 프로젝트: tartavull/trace
def visualize(dataset, split, aff, ip, port):
    """
    Opens a tab in your webbrowser showing the chosen dataset
    """
    import neuroglancer

    config = config_dict(dataset)

    neuroglancer.set_static_content_source(
        url='https://neuroglancer-demo.appspot.com')
    neuroglancer.set_server_bind_address(bind_address=ip, bind_port=port)
    viewer = neuroglancer.Viewer(voxel_size=[6, 6, 30])
    if aff:
        import augmentation
        augmentation.maybe_create_affinities(split)
        add_affinities(config.folder, split + '-affinities', viewer)
    else:
        add_file(config.folder, split + '-input', viewer)
        add_file(config.folder, split + '-labels', viewer)

    print('open your brower at:')
    print(viewer.__str__().replace('172.17.0.2', '54.166.106.209')
          )  # Replace the second argument with your own server's ip address
    webbrowser.open(viewer.__str__())
    print("press any key to exit")
    input()
예제 #2
0
    def __call__(self, chunks: dict):
        """
        Parameters:
        chunks: multiple chunks 
        """
        ng.set_static_content_source(
            url='https://neuromancer-seung-import.appspot.com')
        ng.set_server_bind_address(bind_port=self.port)
        viewer = ng.Viewer()

        with viewer.txn() as s:
            for chunk_name, chunk in chunks.items():
                global_offset = chunk.global_offset
                chunk = np.ascontiguousarray(chunk)

                s.layers.append(
                    name=chunk_name,
                    layer=ng.LocalVolume(
                        data=chunk,
                        dimensions=neuroglancer.CordinateSpace(
                            scales=[1, *self.voxel_size[::-1]],
                            units=['', 'nm', 'nm', 'nm'],
                            names=['c^', 'x', 'y', 'z']),
                        # offset is in nm, not voxels
                        offset=list(o * v for o, v in zip(
                            global_offset[::-1][-3:], self.voxel_size[::-1]))))
        print('Open this url in browser: ')
        print(viewer)
        input('Press Enter to exit neuroglancer.')
예제 #3
0
파일: align.py 프로젝트: richardqiu/nuggt
def main():
    logging.basicConfig(level=logging.INFO)
    args = parse_args()
    if args.ip_address is not None and args.port is not None:
        neuroglancer.set_server_bind_address(bind_address=args.ip_address,
                                             bind_port=args.port)
    elif args.ip_address is not None:
        neuroglancer.set_server_bind_address(bind_address=args.ip_address)
    elif args.port is not None:
        neuroglancer.set_server_bind_address(bind_port=args.port)
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(url=args.static_content_source)
    reference_voxel_size = \
        [float(_)*1000 for _ in args.reference_voxel_size.split(",")]
    moving_voxel_size = \
        [float(_)*1000 for _ in args.moving_voxel_size.split(",")]
    logging.info("Reading reference image")
    reference_image = tifffile.imread(args.reference_image).astype(np.float32)
    logging.info("Reading moving image")
    moving_image = tifffile.imread(args.moving_image).astype(np.float32)
    if args.segmentation is not None:
        logging.info("Reading segmentation")
        segmentation = tifffile.imread(args.segmentation).astype(np.uint32)
    else:
        segmentation = None
    vp = ViewerPair(reference_image, moving_image, segmentation, args.points,
                    reference_voxel_size, moving_voxel_size)
    if not args.no_launch:
        vp.launch_viewers()
    vp.print_viewers()
    while True:
        time.sleep(10)
def main(args=sys.argv[1:]):
    opts = parse_args(args)
    neuroglancer.set_static_content_source(url=opts.static_content_source)
    neuroglancer.set_server_bind_address(opts.bind_address, opts.port)
    viewer_pair = ViewerPair(opts.fixed_url, opts.moving_url,
                             opts.filtered_pts)
    while True:
        time.sleep(10)
def run_vol(vol_idx):
    neuroglancer.set_static_content_source(url='https://neuroglancer-demo.appspot.com')

    with h5py.File(os.path.expanduser('~/data/div_detect/full_records.h5'), 'r') as rec_file:
        record = rec_file['records'][vol_idx,:, :, :, 1]

    viewer = neuroglancer.Viewer(voxel_size=[1, 1, 1])
    viewer.add(vol_to_int8(record),
               name='record')
    #viewer.add(b, name='b')
    return viewer
예제 #6
0
def main():
    app = QtWidgets.QApplication(sys.argv)
    args = parse_args()
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(
            url=args.static_content_source)
    neuroglancer.set_server_bind_address(
    args.bind_address, bind_port=args.port)
    viewer = neuroglancer.Viewer()
    print("Neuroglancer URL: %s" % str(viewer))
    window = ApplicationWindow(viewer)
    window.show()
    sys.exit(app.exec())
    def __init__(self):
        # Address
        neuroglancer.set_server_bind_address('127.0.0.1')
        neuroglancer.set_static_content_source(url='http://localhost:8080')

        # Data
        img = tifffile.imread('sample.tif')
        img = img * 10 / 256
        img = img.astype('uint8')
        img = np.transpose(img, (1, 0, 2, 3))
        self.img = img

        # Same viewer every function call
        viewer = self.viewer = neuroglancer.Viewer()
예제 #8
0
def create_neuroglancer_viewer(model:Model) -> neuroglancer.Viewer:
    """
    Create a viewer for a Neuroglancer instance

    :param model: has the details for the static Neuroglancer elements
    :return: a Neuroglancer viewer that can be used to display volumes
    """
    if not model.neuroglancer_initialized.get():
        neuroglancer.set_static_content_source(
            url=model.static_content_source.get())
        neuroglancer.set_server_bind_address(
            model.bind_address.get(),
            model.port_number.get())
        model.neuroglancer_initialized.set(True)
    return neuroglancer.Viewer()
def run_interactive(args, graph):
    # Make splitter a global variable so that it is accessible from the
    # interactive `python -i` shell.
    global splitter

    if args.bind_address:
        neuroglancer.set_server_bind_address(args.bind_address)
    if args.static_content_url:
        neuroglancer.set_static_content_source(url=args.static_content_url)

    splitter = InteractiveSplitter(graph,
                                   agglo_id=args.agglo_id,
                                   image_url=args.image_url,
                                   segmentation_url=args.segmentation_url,
                                   state_path=args.state)
    print(splitter.viewer)
예제 #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("files_and_colors",
                        nargs="+",
                        help="File name followed by display name followed by"
                        "\"red\", \"green\", \"blue\" or \"gray\".")
    parser.add_argument("--segmentation",
                        default=None,
                        help="Segmentation volume to display")
    parser.add_argument("--ip-address",
                        default="127.0.0.1",
                        help="IP address of neuroglancer server.")
    parser.add_argument("--port",
                        default=0,
                        type=int,
                        help="Port # of neuroglancer server.")
    parser.add_argument("--static-content-source",
                        default=None,
                        help="The URL of the static content source, e.g. "
                        "http://localhost:8080 if being served via npm.")
    args = parser.parse_args()
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(url=args.static_content_source)
    neuroglancer.set_server_bind_address(args.ip_address, args.port)
    viewer = neuroglancer.Viewer()
    with viewer.txn() as txn:
        for filename, name, colorname in zip(args.files_and_colors[::3],
                                             args.files_and_colors[1::3],
                                             args.files_and_colors[2::3]):
            img = tifffile.imread(filename).astype(np.float32)
            if colorname.lower() == "red":
                shader = red_shader
            elif colorname.lower() == "green":
                shader = green_shader
            elif colorname.lower() == "blue":
                shader = blue_shader
            else:
                shader = gray_shader
            layer(txn, name, img, shader, 1.0)
        if args.segmentation != None:
            seg = tifffile.imread(args.segmentation).astype(np.uint32)
            seglayer(txn, "segmentation", seg)

    print(viewer.get_viewer_url())
    webbrowser.open(viewer.get_viewer_url())
    while True:
        time.sleep(5)
def run_interactive(args, graph):
    # Make splitter a global variable so that it is accessible from the
    # interactive `python -i` shell.
    global splitter

    if args.bind_address:
        neuroglancer.set_server_bind_address(args.bind_address)
    if args.static_content_url:
        neuroglancer.set_static_content_source(url=args.static_content_url)

    splitter = InteractiveSplitter(
        graph,
        agglo_id=args.agglo_id,
        image_url=args.image_url,
        segmentation_url=args.segmentation_url,
        state_path=args.state)
    print(splitter.viewer)
예제 #12
0
def main():
    global output_file_name
    global synapses
    global viewer
    args = parse_args()
    neuroglancer.set_static_content_source(url=args.static_content_source)
    neuroglancer.set_server_bind_address(bind_port=int(args.port))
    image_url = args.image_url
    segmentation_url = args.segmentation_url
    output_file_name = args.output

    synapse_dict = json.load(open(args.synapses))

    for n1, n2, x, y, z in zip(synapse_dict["neuron_1"],
                               synapse_dict["neuron_2"],
                               synapse_dict["synapse_center"]["x"],
                               synapse_dict["synapse_center"]["y"],
                               synapse_dict["synapse_center"]["z"]):
        synapses.append(Synapse(n1, n2, x, y, z))

    viewer = neuroglancer.Viewer()
    with viewer.txn() as s:
        s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
        s.layers['segmentation'] = neuroglancer.SegmentationLayer(
            source=segmentation_url)
    viewer.actions.add("yes", lambda _: yes())
    viewer.actions.add("no", lambda _: no())
    viewer.actions.add("skip", lambda _: skip())
    viewer.actions.add("back", lambda _: back())
    viewer.actions.add("revert", lambda _: set_viewer_state())
    with viewer.config_state.txn() as s:
        s.input_event_bindings.viewer['shift+keyy'] = 'yes'
        s.input_event_bindings.viewer["shift+keyn"] = "no"
        s.input_event_bindings.viewer["shift+keys"] = "skip"
        s.input_event_bindings.viewer["shift+keyr"] = "revert"
        s.input_event_bindings.viewer["shift+keyb"] = "back"
    set_viewer_state()
    webbrowser.open_new(viewer.get_viewer_url())
    while True:
        time.sleep(1)
예제 #13
0
import neuroglancer

ap = argparse.ArgumentParser()
ap.add_argument(
    '-a',
    '--bind-address',
    help='Bind address for Python web server.  Use 127.0.0.1 (the default) to restrict access '
    'to browers running on the local machine, use 0.0.0.0 to permit access from remote browsers.')
ap.add_argument(
    '--static-content-url', help='Obtain the Neuroglancer client code from the specified URL.')
args = ap.parse_args()
if args.bind_address:
    neuroglancer.set_server_bind_address(args.bind_address)
if args.static_content_url:
    neuroglancer.set_static_content_source(url=args.static_content_url)

a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
ix, iy, iz = np.meshgrid(* [np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

b = np.cast[np.uint32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
b = np.pad(b, 1, 'constant')

viewer = neuroglancer.Viewer()
with viewer.txn() as s:
    s.voxel_size = [10, 10, 10]
    s.layers.append(
        name='a',
예제 #14
0
ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]],
                         indexing='ij')
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

b = np.cast[np.uint32](np.floor(
    np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))

# Obtain the bundled Neuroglancer client code (HTML, CSS, and JavaScript) from
# the demo server, so that this example works even if
#
#   python setup.py bundle_client
#
# has not been run.
neuroglancer.set_static_content_source(
    url='https://neuroglancer-demo.appspot.com')

viewer = neuroglancer.Viewer()
viewer.add(a,
           name='a',
           offset=(20, 30, 50),
           shader="""
void main() {
  emitRGB(vec3(toNormalized(getDataValue(0)),
               toNormalized(getDataValue(1)),
               toNormalized(getDataValue(2))));
}
""")
viewer.add(b, name='b')
print(viewer)
#!/usr/bin/env python2
"""Tool for extending via equivalences a set of segments."""

from __future__ import absolute_imports, print_function

import argparse
import copy
import os
import webbrowser

import neuroglancer
from neuroglancer.json_utils import decode_json, encode_json

neuroglancer.set_static_content_source(url='http://localhost:8080')


def get_segmentation_layer(layers):
    for layer in layers:
        if isinstance(layer.user_layer, neuroglancer.SegmentationLayer):
            return layer


class Annotator(object):
    def __init__(self, filename):
        self.filename = filename
        self.point_annotation_layer_name = 'false-merges'
        self.states = []
        self.state_index = None
        viewer = self.viewer = neuroglancer.managed_viewer.ManagedViewer()
        self.other_state_segment_ids = dict()
예제 #16
0
def main():
    app = QtWidgets.QApplication(sys.argv)
    args = parse_args()
    patches_xy, patches_xz, patches_yz = [[] for _ in range(3)]
    for i, patch_file in enumerate(args.patch_file):
        with h5py.File(patch_file, "r") as fd:
            if len(patches_xy) > 0:
                if len(fd["x"]) != len(x) or \
                   not np.all(fd["x"][:] == x) or\
                   not np.all(fd["y"][:] == y) or\
                   not np.all(fd["z"][:] == z):
                    raise ValueError("The patch files need to be constructed "
                                     "using the same set of blob coordinates")
                if fd["patches_xy"].shape != patches_xy[0].shape:
                    raise ValueError("Patch sizes need to be the same")
            else:
                x = fd["x"][:]
                y = fd["y"][:]
                z = fd["z"][:]
            patches_xy.append(fd["patches_xy"][:])
            patches_xz.append(fd["patches_xz"][:])
            patches_yz.append(fd["patches_yz"][:])
    if args.neuroglancer is None or len(args.neuroglancer) == 0:
        viewer = None
        image_names = None
        multiplier = None
        colors = []
    else:
        import neuroglancer
        import webbrowser
        if args.static_content_source is not None:
            neuroglancer.set_static_content_source(
                url=args.static_content_source)
        neuroglancer.set_server_bind_address(args.bind_address,
                                             bind_port=args.port)
        viewer = neuroglancer.Viewer()
        print("Neuroglancer URL: %s" % str(viewer))
        image_names = args.image_name or []
        while len(image_names) < len(args.neuroglancer):
            image_names.append("image_%d" % (len(image_names) + 1))
        colors = args.color or []
        ckeys = list(COLORS.keys())
        while len(colors) < len(args.neuroglancer):
            for color in ckeys:
                if color not in colors:
                    colors.append(color)
                    break
            else:
                colors.append(ckeys[len(colors) % len(COLORS)])
        multiplier = args.multiplier or []
        while len(multiplier) < len(args.neuroglancer):
            multiplier.append(1.0)

        with viewer.txn() as txn:
            for i in range(len(args.neuroglancer)):
                txn.layers[image_names[i]] = neuroglancer.ImageLayer(
                    source=args.neuroglancer[i],
                    shader=COLORS[colors[i]] % multiplier[i])
        webbrowser.open_new(viewer.get_viewer_url())
    window = ApplicationWindow(patches_xy, patches_xz, patches_yz, x, y, z,
                               args.n_components, args.use_position,
                               args.whiten, args.max_samples, args.n_jobs,
                               args.output, viewer, image_names, multiplier,
                               [COLORS[_] for _ in colors])
    window.setWindowTitle("Train")
    window.show()
    sys.exit(app.exec())
예제 #17
0
def main():
    global viewer

    parser = argparse.ArgumentParser(description="NeUroGlancer Ground Truth")
    parser.add_argument("--port",
                        type=int,
                        help="HTTP port for server",
                        default=0)
    parser.add_argument("--image", help="Path to image file", required=True)
    parser.add_argument("--alt-image", help="Path to a second image channel")
    parser.add_argument("--output",
                        help="Path to list of point annotations",
                        required=True)
    parser.add_argument("--detected",
                        help="Path to list of detected point annotations",
                        required=False)
    parser.add_argument("--min-distance",
                        help="Minimum distance between two annotations",
                        type=int,
                        default=10)
    parser.add_argument("--segmentation",
                        help="Path to existing segmentation file (optional)")
    parser.add_argument("--coordinates",
                        help="Coordinates of the image volume to edit"
                        "(x0,x1,y0,y1,z0,z1). "
                        "Example: \"1000,2000,4000,5000,300,500\" to edit"
                        "x=1000 to 2000, y=4000 to 5000, z=300 to 500.")
    parser.add_argument("--box-coordinates",
                        help="Coordinates of the bounding box to display. "
                        "Example: \"1000,2000,4000,5000,300,500\" to edit "
                        "x=1000 to 2000, y=4000 to 5000, z=300 to 500",
                        default=None)
    parser.add_argument("--bind-address",
                        help="The IP address to bind to as a webserver. "
                        "The default is 127.0.0.1 which is constrained to "
                        "the local machine.",
                        default="127.0.0.1")
    parser.add_argument("--static-content-source",
                        default=None,
                        help="The URL of the static content source, e.g. "
                        "http://localhost:8080 if being served via npm.")
    parser.add_argument("--reference-image",
                        help="An image of a reference volume to be used "
                        "for navigation.")
    parser.add_argument("--reference-segmentation",
                        help="A segmentation image of the reference volume "
                        "to be used for navigation.")
    parser.add_argument("--reference-points",
                        help="The point annotations, transformed into the "
                        "reference space.")
    parser.add_argument("--point-correspondence-file",
                        help="A .json file containing arrays of moving "
                        "and reference points to be used to warp the reference"
                        " frame into the moving frame.")
    parser.add_argument("--repositioning-log-file",
                        default=None,
                        help="This file saves a record of the coordinates "
                        "of each repositioning in order to keep track of "
                        "which areas have been visited.")
    parser.add_argument("--multiplier",
                        default=1.0,
                        type=float,
                        help="Multiplier for the image. Higher = brighter.")
    parser.add_argument("--alt-multiplier",
                        default=1.0,
                        type=float,
                        help="Multiplier for the alternate image. "
                        "Higher=brighter")
    args = parser.parse_args()
    neuroglancer.set_server_bind_address(args.bind_address,
                                         bind_port=args.port)
    if args.box_coordinates is None:
        box_coordinates = None
    else:
        box_coordinates = list(map(int, args.box_coordinates.split(",")))
        box_coordinates = [box_coordinates[::2], box_coordinates[1::2]]
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(url=args.static_content_source)

    if args.coordinates:
        x0, x1, y0, y1, z0, z1 = list(map(int, args.coordinates.split(",")))
        viewer = NuggtViewer(args.image,
                             args.alt_image,
                             args.segmentation,
                             args.output,
                             args.detected,
                             x0,
                             x1,
                             y0,
                             y1,
                             z0,
                             z1,
                             min_distance=args.min_distance,
                             multiplier=args.multiplier,
                             alt_multiplier=args.alt_multiplier,
                             box_coords=box_coordinates)
    else:
        viewer = NuggtViewer(args.image,
                             args.alt_image,
                             args.segmentation,
                             args.output,
                             args.detected,
                             min_distance=args.min_distance,
                             multiplier=args.multiplier,
                             alt_multiplier=args.alt_multiplier,
                             box_coords=box_coordinates)

    print("Editing viewer: %s" % viewer.viewer.get_viewer_url())
    webbrowser.open_new(viewer.viewer.get_viewer_url())
    if args.reference_image is not None:
        ref_img = tifffile.imread(args.reference_image).astype(np.float32)
        ref_seg = tifffile.imread(args.reference_segmentation).astype(
            np.uint16)
        with open(args.point_correspondence_file) as fd:
            d = json.load(fd)
        moving = np.array(d["moving"])
        reference = np.array(d["reference"])
        nav_viewer = NavViewer(ref_img, ref_seg, moving, reference,
                               viewer.shape)
        nav_viewer.bind()
        if args.repositioning_log_file is not None:
            nav_viewer.repositioning_log_file = args.repositioning_log_file
        sample = np.random.permutation(len(viewer.points))[:10000]
        if args.reference_points is not None:
            with open(args.reference_points) as fd:
                rp = np.array(json.load(fd))
                pts = rp[sample, ::-1]
        else:
            pts = viewer.points[sample, ::-1]
        nav_viewer.add_points(pts)
        print("Navigating viewer: %s" % nav_viewer.viewer.get_viewer_url())
    print("Hit control-c to exit")
    while True:
        time.sleep(10)
예제 #18
0
#!/usr/bin/env python2
"""Tool for extending via equivalences a set of segments."""

from __future__ import absolute_import, print_function

import argparse
import copy
import os
import webbrowser

import neuroglancer
from neuroglancer.json_utils import decode_json, encode_json

neuroglancer.set_static_content_source(url='http://localhost:8080')


def get_segmentation_layer(layers):
    for layer in layers:
        if isinstance(layer.layer, neuroglancer.SegmentationLayer):
            return layer


class Annotator(object):
    def __init__(self, filename):
        self.filename = filename
        self.point_annotation_layer_name = 'false-merges'
        self.states = []
        self.state_index = None
        viewer = self.viewer = neuroglancer.Viewer()
        self.other_state_segment_ids = dict()
예제 #19
0
a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

b = np.cast[np.uint32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
b = np.pad(b, 1, 'constant')

# Obtain the bundled Neuroglancer client code (HTML, CSS, and JavaScript) from
# the demo server, so that this example works even if
#
#   python setup.py bundle_client
#
# has not been run.
neuroglancer.set_static_content_source(url='https://neuroglancer-demo.appspot.com')

viewer = neuroglancer.Viewer(voxel_size=[10, 10, 10])
viewer.add(a,
           name='a',
           # offset is in nm, not voxels
           offset=(200, 300, 150),
           shader="""
void main() {
  emitRGB(vec3(toNormalized(getDataValue(0)),
               toNormalized(getDataValue(1)),
               toNormalized(getDataValue(2))));
}
""")
viewer.add(b, name='b')
print(viewer)
예제 #20
0
파일: glance.py 프로젝트: seung-lab/yacn
    raw_input("press enter to continue")
    commit(cutout)
    raw_input("press enter to continue")
    load(cutout.pos)


current_index = 0


def next_index(jump=1):
    global current_index
    current_index = current_index + jump
    return current_index


neuroglancer.set_static_content_source(
    url='http://seung-titan02.pni.princeton.edu:8080')

#basename = sys.argv[1]
basename = os.path.expanduser("~/mydatasets/3_3_1/")
print("loading files...")
vertices = h5read(os.path.join(basename, "vertices.h5"), force=True)
edges = h5read(os.path.join(basename, "epoch1_edges.h5"), force=True)

V = Volume(
    basename,
    {
        "image": "image.h5",
        "errors": "epoch1_errors.h5",
        "raw_labels": "raw.h5",
        "affinities": "aff.h5",
        "valid_list": "valid.h5",
예제 #21
0
#! /bin/env python

## basic shim to load up neuroglancer in a browser:
import neuroglancer
import logging
from time import sleep
import progproxy as pp
import redis

logging.basicConfig(level=logging.DEBUG)
# we are currently using the seunglab hosted neuroglancer static resources
# ideally this would be self hosted for local development against nglancer
logging.info("configuring neuroglancer defaults")
neuroglancer.set_static_content_source(
    url="https://neuromancer-seung-import.appspot.com")
## neuroglancer setup segment:
## set the tornado server that is launched to talk on all ips and at port 8080
neuroglancer.set_server_bind_address("0.0.0.0", "8080")

neuroglancer.debug = True
neuroglancer.server.debug = True

logging.info("starting viewer subprocess")
# setup a viewer with pre-configured defaults and launch.
viewer = neuroglancer.Viewer()

logging.info("viewer token: {}".format(viewer.token))

logging.info("setting viewers default volume")
# load data from cloudvolume container:
with viewer.txn() as s:
###WINDOW 2###
#to add another layer (aka the atlas), in a new ipython window:
from cloudvolume import CloudVolume

brainname = "20200701_12_55_28_20170207_db_bl6_crii_rpv_01"
port = 1350
layer_dir = "/jukebox/scratch/zmd/save/contra_ipsi_projection_studies_20191125/%s/atlas" % brainname
vol = CloudVolume(f"file://{layer_dir}")
vol.viewer(port=port + 1)  #make sure this port is different from the first

###WINDOW 3###
#in another ipython window run:
import neuroglancer

neuroglancer.set_static_content_source(
    url="https://nglancer.pni.princeton.edu")
brainname = "20200701_12_55_28_20170207_db_bl6_crii_rpv_01"
port = 1350
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
    s.layers["%s" % brainname] = neuroglancer.ImageLayer(
        source="precomputed://http://localhost:%s" % port)
print(viewer)
#this should add the above volume to the neuroglancer window

with viewer.txn() as s:
    s.layers["%s_atlas" % brainname] = neuroglancer.SegmentationLayer(
        source="precomputed://http://localhost:%s" % int(port + 1))
print(viewer)
#this should add the atlas volume to the neuroglancer window
예제 #23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--bind-address",
                        help="The IP address to bind to as a webserver. "
                        "The default is 127.0.0.1 which is constrained to "
                        "the local machine.",
                        default="127.0.0.1")
    parser.add_argument("--port",
                        type=int,
                        help="HTTP port for server",
                        default=0)
    parser.add_argument("--gray-image",
                        help="Path to an image file to be displayed in gray.")
    parser.add_argument("--gray-image-name",
                        help="The name of the gray image",
                        default="image")
    parser.add_argument("--red-image",
                        help="Path to an image file to be displayed in red.")
    parser.add_argument("--red-image-name",
                        help="The name of the red image",
                        default="red")
    parser.add_argument("--green-image",
                        help="Path to an image file to be displayed in green.")
    parser.add_argument("--green-image-name",
                        help="The name of the green image",
                        default="green")
    parser.add_argument("--blue-image",
                        help="Path to an image file to be displayed in blue.")
    parser.add_argument("--blue-image-name",
                        help="The name of the blue image",
                        default="blue")
    parser.add_argument("--input-coordinates",
                        help="A JSON file of input coordinates as a list of "
                        "three-tuples in X, Y, Z order",
                        required=True)
    parser.add_argument("--yea-coordinates",
                        help="The name of a JSON file to be written with "
                        "the coordinates of \"yea\" points.")
    parser.add_argument("--nay-coordinates",
                        help="The name of a JSON file to be written with the "
                        "coordinates of \"nay\" points.")
    parser.add_argument("--no-browser",
                        help="Do not launch the browser",
                        action="store_true")
    parser.add_argument("--new-browser-window",
                        help="Open Neuroglancer in a new browser window.",
                        action="store_true")
    parser.add_argument("--static-content-source",
                        default=None,
                        help="The URL of the static content source, e.g. "
                        "http://localhost:8080 if being served via npm.")

    args = parser.parse_args()
    if args.static_content_source != None:
        neuroglancer.set_static_content_source(url=args.static_content_source)
    neuroglancer.set_server_bind_address(args.bind_address,
                                         bind_port=args.port)

    with open(args.input_coordinates) as fd:
        points = np.array(json.load(fd))

    imgs = []
    for path, name, shader in ((args.gray_image, args.gray_image_name,
                                gray_shader),
                               (args.red_image, args.red_image_name,
                                red_shader),
                               (args.green_image, args.green_image_name,
                                green_shader), (args.blue_image,
                                                args.blue_image_name,
                                                blue_shader)):
        if path is not None:
            img = tifffile.imread(path)
            imgs.append((img, name, shader))

    def save_cb(yea, nay):
        if args.yea_coordinates is not None:
            yea = yea.astype(points.dtype)
            with open(args.yea_coordinates, "w") as fd:
                json.dump(yea.tolist(), fd)
        if args.nay_coordinates is not None:
            nay = nay.astype(points.dtype)
            with open(args.nay_coordinates, "w") as fd:
                json.dump(nay.tolist(), fd)

    if args.no_browser:
        yea, nay = sort_points(imgs, points, save_cb=save_cb)
    elif args.new_browser_window:
        yea, nay = sort_points(imgs, points, launch_ui="new", save_cb=save_cb)
    else:
        yea, nay = sort_points(imgs, points, launch_ui=True, save_cb=save_cb)
    save_cb(yea, nay)
예제 #24
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("files_and_colors", nargs="+",
                        help="File name followed by display name followed by"
                        "\"red\", \"green\", \"blue\", \"gray\", "
                        "\"jet\" or \"cubehelix\".")
    parser.add_argument("--segmentation",
                        default=None,
                        help="Segmentation volume to display")
    parser.add_argument("--ip-address",
                        default="127.0.0.1",
                        help="IP address of neuroglancer server.")
    parser.add_argument("--port",
                        default=0,
                        type=int,
                        help="Port # of neuroglancer server.")
    parser.add_argument("--static-content-source",
                        default=None,
                        help="The URL of the static content source, e.g. "
                        "http://localhost:8080 if being served via npm.")
    parser.add_argument("--points",
                        help="A points file in X, Y, Z order to display")
    parser.add_argument("--show-n",
                        type=int,
                        help="Show only a certain number of randomly selected "
                        "points.")
    args = parser.parse_args()
    if args.static_content_source is not None:
        neuroglancer.set_static_content_source(url=args.static_content_source)
    neuroglancer.set_server_bind_address(args.ip_address, args.port)
    viewer = neuroglancer.Viewer()
    with viewer.txn() as txn:
        for filename, name, colorname in zip(args.files_and_colors[::3],
                                             args.files_and_colors[1::3],
                                             args.files_and_colors[2::3]):
            if colorname.lower() == "red":
                shader = red_shader
            elif colorname.lower() == "green":
                shader = green_shader
            elif colorname.lower() == "blue":
                shader = blue_shader
            elif colorname.lower() in ("gray", "grey"):
                shader = gray_shader
            elif colorname.lower() == "jet":
                shader = jet_shader
            else:
                shader = cubehelix_shader
            if filename.startswith("precomputed://"):
                txn.layers[name] = neuroglancer.ImageLayer(
                    source = filename,
                    shader = shader % 1.0
                )
                continue
            paths = sorted(glob.glob(filename))
            if len(paths) == 0:
                sys.stderr.write("Could not find any files named %s" % filename)
                exit(1)
            elif len(paths) == 1:
                img = tifffile.imread(paths[0]).astype(np.float32)
            else:
                img = np.array([tifffile.imread(_) for _ in paths], np.float32)
            layer(txn, name, img, shader, 1.0)
        if args.segmentation != None:
            seg = tifffile.imread(args.segmentation).astype(np.uint32)
            seglayer(txn, "segmentation", seg)
        if args.points is not None:
            with open(args.points) as fd:
                points = np.array(json.load(fd))
                if args.show_n is not None:
                    points = points[np.random.choice(len(points), args.show_n)]
                pointlayer(txn, "points",
                           points[:, 0], points[:, 1], points[:, 2], "red")

    print(viewer.get_viewer_url())
    webbrowser.open(viewer.get_viewer_url())
    while True:
        time.sleep(5)
예제 #25
0
def set_neuroglancer_static_source(url):
    neuroglancer.set_static_content_source(url=url)
예제 #26
0
    units='nm',
    scales=[10, 10, 10],
)
with viewer.txn() as s:
    s.layers.append(
        name='a',
        layer=neuroglancer.SegmentationLayer(
            source=[
                neuroglancer.LocalVolume(
                    data=segmentation,
                    dimensions=dimensions,
                ),
                SkeletonSource(dimensions),
            ],
            skeleton_shader='void main() { emitRGB(colormapJet(affinity)); }',
            selected_alpha=0,
            not_selected_alpha=0,
        ))

if __name__ == '__main__':
    ap = argparse.ArgumentParser()
    ap.add_argument('--static-content-url')
    ap.add_argument('-a', '--bind-address')
    args = ap.parse_args()
    neuroglancer.server.debug = True
    if args.bind_address:
        neuroglancer.set_server_bind_address(args.bind_address)
    if args.static_content_url:
        neuroglancer.set_static_content_source(url=args.static_content_url)
    print(viewer)
예제 #27
0
logging.info("configuring neuroglancer defaults")
task = subprocess.Popen("ip route | awk 'NR==1 {print $3}'",shell=True,stdout=subprocess.PIPE)
data = task.stdout.read()
localhost_addr = data.decode('utf-8').strip('\n')
logging.debug("Got localhost address inside container:")
logging.debug(localhost_addr)
# response = requests.get(f'http://nglancerstatic:8080')
response = requests.get(f'http://{localhost_addr}:8080')
logging.debug("Response from neuroglancer container is:")
logging.debug(response)
logging.debug(response.text)
# neuroglancer.set_static_content_source(
#     url="https://neuromancer-seung-import.appspot.com"
# )
neuroglancer.set_static_content_source(
    url=f"http://{localhost_addr}:8080"
)
## neuroglancer setup segment:	
## set the tornado server that is launched to talk on all ips and at port 8080
neuroglancer.set_server_bind_address("0.0.0.0", "8081")

neuroglancer.debug = True
neuroglancer.server.debug = True

logging.info("starting viewer subprocess")
# setup a viewer with pre-configured defaults and launch.
viewer = neuroglancer.Viewer()
# sleep(0.5)
logging.info("viewer token: {}".format(viewer.token))

logging.info("setting viewers default volume")