def convert_chunks(source_url, dest_url, copy_info=False,
                   options={}):
    """Convert precomputed chunks between different encodings"""
    source_accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
        source_url
    )
    chunk_reader = precomputed_io.get_IO_for_existing_dataset(source_accessor)
    source_info = chunk_reader.info
    dest_accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
        dest_url, options
    )
    if copy_info:
        chunk_writer = precomputed_io.get_IO_for_new_dataset(
            source_info, dest_accessor, encoder_options=options
        )
    else:
        chunk_writer = precomputed_io.get_IO_for_existing_dataset(
            dest_accessor, encoder_options=options
        )
    dest_info = chunk_writer.info

    chunk_transformer = data_types.get_chunk_dtype_transformer(
        source_info["data_type"], dest_info["data_type"]
    )
    for scale_index in reversed(range(len(dest_info["scales"]))):
        convert_chunks_for_scale(chunk_reader,
                                 dest_info, chunk_writer, scale_index,
                                 chunk_transformer)
示例#2
0
def volume_file_to_precomputed(volume_filename,
                               dest_url,
                               dset_name=None,
                               ignore_scaling=False,
                               input_min=None,
                               input_max=None,
                               load_full_volume=True,
                               options={}):
    if dset_name is None:
        img = nibabel.load(volume_filename)
        accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
            dest_url, options
        )
        print(img)
        try:
            precomputed_writer = precomputed_io.get_IO_for_existing_dataset(
                accessor
            )
        except neuroglancer_scripts.accessor.DataAccessError as exc:
            logger.error("No 'info' file was found (%s). You can generate one by "
                         "running this program with the --generate-info option, "
                         "then using generate_scales_info.py on the result",
                         exc)
            return 1
        except ValueError as exc:  # TODO use specific exception for invalid JSON
            logger.error("Invalid 'info' file: %s", exc)
            return 1
        return nibabel_image_to_precomputed(img, precomputed_writer,
                                            ignore_scaling, input_min, input_max,
                                            load_full_volume, options)
    else:
        if ( os.path.splitext(volume_filename)[-1] == ".zarr"):
            img = zarr.open_group(volume_filename,'r')
        else:
            img = h5py.File(volume_filename,'r')
        accessor = neuroglancer_scripts.accessor.get_accessor_for_url( dest_url, options )
        try:
            precomputed_writer = precomputed_io.get_IO_for_existing_dataset(
                accessor
            )
        except neuroglancer_scripts.accessor.DataAccessError as exc:
            logger.error("No 'info' file was found (%s). You can generate one by "
                         "running this program with the --generate-info option, "
                         "then using generate_scales_info.py on the result",
                         exc)
            return 1
        except ValueError as exc:  # TODO use specific exception for invalid JSON
            logger.error("Invalid 'info' file: %s", exc)
            return 1
        return h5py_file_to_precomputed(img,dset_name, precomputed_writer,
                                            load_full_volume)
示例#3
0
def make_mesh_fragment_links(input_csv,
                             dest_url,
                             no_colon_suffix=False,
                             options={}):
    if no_colon_suffix:
        filename_format = "{0}/{1}"
    else:
        filename_format = "{0}/{1}:0"
    accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
        dest_url, options)
    info = precomputed_io.get_IO_for_existing_dataset(accessor).info
    if "mesh" not in info:
        logger.critical('The info file is missing the "mesh" key, please '
                        'use mesh-to-precomputed first.')
        return 1
    mesh_dir = info["mesh"]

    with open(input_csv, newline="") as csv_file:
        for line in csv.reader(csv_file):
            numeric_label = int(line[0])
            fragment_list = line[1:]
            # Output a warning for missing fragments
            for fragment_name in fragment_list:
                if not accessor.file_exists(mesh_dir + "/" + fragment_name):
                    logger.warning("missing fragment %s", fragment_name)
            relative_filename = filename_format.format(mesh_dir, numeric_label)
            json_str = json.dumps({"fragments": fragment_list},
                                  separators=(",", ":"))
            accessor.store_file(relative_filename,
                                json_str.encode("utf-8"),
                                mime_type="application/json")
示例#4
0
def mesh_file_to_precomputed(input_path,
                             dest_url,
                             mesh_name=None,
                             mesh_dir=None,
                             coord_transform=None,
                             options={}):
    """Convert a mesh read by nibabel to Neuroglancer precomputed format"""
    input_path = pathlib.Path(input_path)
    accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
        dest_url, options)
    info = precomputed_io.get_IO_for_existing_dataset(accessor).info
    if mesh_dir is None:
        mesh_dir = "mesh"  # default value
    if "mesh" not in info:
        info["mesh"] = mesh_dir
        # Write the updated info file
        precomputed_io.get_IO_for_new_dataset(info,
                                              accessor,
                                              overwrite_info=True)
    if mesh_dir != info["mesh"]:
        logger.critical("The provided --mesh-dir value does not match the "
                        "value stored in the info file")
        return 1
    if info["type"] != "segmentation":
        logger.warning('The dataset has type "image" instead of '
                       '"segmentation", Neuroglancer will not use the meshes.')

    if mesh_name is None:
        mesh_name = input_path.stem

    mesh = nibabel.load(str(input_path))

    points_list = mesh.get_arrays_from_intent("NIFTI_INTENT_POINTSET")
    assert len(points_list) == 1
    points = points_list[0].data

    triangles_list = mesh.get_arrays_from_intent("NIFTI_INTENT_TRIANGLE")
    assert len(triangles_list) == 1
    triangles = triangles_list[0].data

    if coord_transform is not None:
        points_dtype = points.dtype
        points, triangles = neuroglancer_scripts.mesh.affine_transform_mesh(
            points, triangles, coord_transform)
        # Convert vertices back to their original type to avoid the warning
        # that save_mesh_as_precomputed prints when downcasting to float32.
        points = points.astype(np.promote_types(points_dtype, np.float32),
                               casting="same_kind")

    # Gifti uses millimetres, Neuroglancer expects nanometres.
    # points can be a read-only array, so we cannot use the *= operator.
    points = 1e6 * points

    io_buf = io.BytesIO()
    neuroglancer_scripts.mesh.save_mesh_as_precomputed(
        io_buf, points, triangles.astype("uint32"))
    accessor.store_file(mesh_dir + "/" + mesh_name,
                        io_buf.getvalue(),
                        mime_type="application/octet-stream")
def compute_scales(work_dir=".", downscaling_method="average", options={}):
    """Generate lower scales following an input info file"""
    accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
        work_dir, options)
    pyramid_io = precomputed_io.get_IO_for_existing_dataset(
        accessor, encoder_options=options)
    downscaler = neuroglancer_scripts.downscaling.get_downscaler(
        downscaling_method, options)
    neuroglancer_scripts.dyadic_pyramid.compute_dyadic_scales(
        pyramid_io, downscaler)
示例#6
0
def test_precomputed_IO_chunk_roundtrip(tmpdir):
    accessor = get_accessor_for_url(str(tmpdir))
    # Minimal info file
    io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
    dummy_chunk = np.arange(8 * 3 * 7, dtype="uint16").reshape(1, 7, 3, 8)
    chunk_coords = (0, 8, 0, 3, 8, 15)
    io.write_chunk(dummy_chunk, "key", chunk_coords)
    assert np.array_equal(io.read_chunk("key", chunk_coords), dummy_chunk)

    io2 = get_IO_for_existing_dataset(accessor)
    assert io2.info == DUMMY_INFO
    assert np.array_equal(io2.read_chunk("key", chunk_coords), dummy_chunk)
示例#7
0
def test_precomputed_IO_info_error(tmpdir):
    with (tmpdir / "info").open("w") as f:
        f.write("invalid JSON")
    accessor = get_accessor_for_url(str(tmpdir))
    with pytest.raises(InvalidInfoError):
        get_IO_for_existing_dataset(accessor)
def slices_to_raw_chunks(slice_filename_lists,
                         dest_url,
                         input_orientation,
                         options={}):
    """Convert a list of 2D slices to Neuroglancer pre-computed chunks.

    :param dict info: the JSON dictionary that describes the dataset for
        Neuroglancer. Only the information from the first scale is used.
    :param list slice_filename_lists: a list of lists of filenames. Files from
        each inner list are read as 2D images and concatenated along the third
        axis. Blocks from the outer list are concatenated along a fourth axis,
        representing the image channels.
    :param tuple input_axis_inversions: a 3-tuple in (column, row, slice)
        order. Each value must be 1 (preserve orientation along the axis) or -1
        (invert orientation along the axis).
    :param tuple input_axis_permutation: a 3-tuple in (column, row, slice)
      order. Each value is 0 for X (L-R axis), 1 for Y (A-P axis), 2 for Z (I-S
      axis).
    """
    accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
        dest_url, options)
    pyramid_writer = precomputed_io.get_IO_for_existing_dataset(
        accessor, encoder_options=options)
    info = pyramid_writer.info

    assert len(info["scales"][0]["chunk_sizes"]) == 1  # more not implemented
    chunk_size = info["scales"][0]["chunk_sizes"][0]  # in RAS order (X, Y, Z)
    size = info["scales"][0]["size"]  # in RAS order (X, Y, Z)
    key = info["scales"][0]["key"]
    dtype = np.dtype(info["data_type"]).newbyteorder("<")
    num_channels = info["num_channels"]

    input_axis_permutation = tuple(AXIS_PERMUTATION_FOR_RAS[a]
                                   for a in input_orientation)
    input_axis_inversions = tuple(AXIS_INVERSION_FOR_RAS[a]
                                  for a in input_orientation)

    # Here x, y, and z refer to the data orientation in output chunks (which
    # should correspond to RAS+ anatomical axes). For the data orientation in
    # input slices the terms (column (index along image width), row (index
    # along image height), slice) are used.

    # permutation_to_input is a 3-tuple in RAS (X, Y, Z) order.
    # Each value is 0 column, 1 for row, 2 for slice.
    permutation_to_input = invert_permutation(input_axis_permutation)

    # input_size and input_chunk_size are in (column, row, slice) order.
    input_size = permute(size, input_axis_permutation)
    input_chunk_size = permute(chunk_size, input_axis_permutation)

    for filename_list in slice_filename_lists:
        if len(filename_list) != input_size[2]:
            raise ValueError("{} slices found where {} were expected".format(
                len(filename_list), input_size[2]))

    for slice_chunk_idx in trange(
        (input_size[2] - 1) // input_chunk_size[2] + 1,
            desc="converting slice groups",
            leave=True,
            unit="slice groups"):
        first_slice_in_order = input_chunk_size[2] * slice_chunk_idx
        last_slice_in_order = min(input_chunk_size[2] * (slice_chunk_idx + 1),
                                  input_size[2])

        if input_axis_inversions[2] == -1:
            first_slice = input_size[2] - first_slice_in_order - 1
            last_slice = input_size[2] - last_slice_in_order - 1
        else:
            first_slice = first_slice_in_order
            last_slice = last_slice_in_order
        slice_slicing = np.s_[first_slice:last_slice:input_axis_inversions[2]]
        tqdm.write("Reading slices {0} to {1} ({2}B memory needed)... ".format(
            first_slice, last_slice - input_axis_inversions[2],
            readable_count(input_size[0] * input_size[1] *
                           (last_slice_in_order - first_slice_in_order + 1) *
                           num_channels * dtype.itemsize)))

        def load_z_stack(slice_filenames):
            # Loads the data in [slice, row, column] C-contiguous order
            block = skimage.io.concatenate_images(
                skimage.io.imread(str(filename))
                for filename in slice_filenames[slice_slicing])
            assert block.shape[2] == input_size[0]  # check slice width
            assert block.shape[1] == input_size[1]  # check slice height
            if block.ndim == 4:
                # Scikit-image loads multi-channel (e.g. RGB) images in [slice,
                # row, column, channel] order, while Neuroglancer expects
                # channel to come first (in C-contiguous indexing).
                block = np.moveaxis(block, (3, 0, 1, 2), (0, 1, 2, 3))
            elif block.ndim == 3:
                block = block[np.newaxis, :, :, :]
            else:
                raise ValueError(
                    "block has unexpected dimensionality (ndim={})".format(
                        block.ndim))
            return block

        # Concatenate all channels from different directories
        block = np.concatenate([
            load_z_stack(filename_list)
            for filename_list in slice_filename_lists
        ],
                               axis=0)
        assert block.shape[0] == num_channels

        # Flip and permute axes to go from input (channel, slice, row, column)
        # to Neuroglancer (channel, Z, Y, X)
        block = block[:, :, ::input_axis_inversions[1], ::
                      input_axis_inversions[0]]
        block = np.moveaxis(block, (3, 2, 1),
                            (3 - a for a in input_axis_permutation))
        # equivalent: np.transpose(block, axes=([0] + [3 - a for a in
        # reversed(invert_permutation(input_axis_permutation))]))

        chunk_dtype_transformer = get_chunk_dtype_transformer(
            block.dtype, dtype)

        progress_bar = tqdm(
            total=(((input_size[1] - 1) // input_chunk_size[1] + 1) *
                   ((input_size[0] - 1) // input_chunk_size[0] + 1)),
            desc="writing chunks",
            unit="chunks",
            leave=False)

        for row_chunk_idx in range((input_size[1] - 1) // input_chunk_size[1] +
                                   1):
            row_slicing = np.s_[input_chunk_size[1] * row_chunk_idx:min(
                input_chunk_size[1] * (row_chunk_idx + 1), input_size[1])]
            for column_chunk_idx in range((input_size[0] - 1) //
                                          input_chunk_size[0] + 1):
                column_slicing = np.s_[input_chunk_size[0] *
                                       column_chunk_idx:min(
                                           input_chunk_size[0] *
                                           (column_chunk_idx +
                                            1), input_size[0])]

                input_slicing = (column_slicing, row_slicing, np.s_[:])
                x_slicing, y_slicing, z_slicing = permute(
                    input_slicing, permutation_to_input)
                chunk = block[:, z_slicing, y_slicing, x_slicing]

                # This variable represents the coordinates with real slice
                # numbers, instead of within-block slice numbers.
                input_coords = ((column_slicing.start, column_slicing.stop),
                                (row_slicing.start, row_slicing.stop),
                                (first_slice_in_order, last_slice_in_order))
                x_coords, y_coords, z_coords = permute(input_coords,
                                                       permutation_to_input)
                assert chunk.size == ((x_coords[1] - x_coords[0]) *
                                      (y_coords[1] - y_coords[0]) *
                                      (z_coords[1] - z_coords[0]) *
                                      num_channels)
                chunk_coords = (x_coords[0], x_coords[1], y_coords[0],
                                y_coords[1], z_coords[0], z_coords[1])
                pyramid_writer.write_chunk(
                    chunk_dtype_transformer(chunk, preserve_input=False), key,
                    chunk_coords)
                progress_bar.update()
        # free up memory before reading next block (prevent doubled memory
        # usage)
        del block
def show_scale_file_info(url, options={}):
    """Show information about a list of scales."""
    accessor = neuroglancer_scripts.accessor.get_accessor_for_url(url, options)
    io = precomputed_io.get_IO_for_existing_dataset(accessor)
    info = io.info
    show_scales_info(info)
示例#10
0
 def _bootstrap(self):
     accessor = get_accessor_for_url(self.url)
     self._io = get_IO_for_existing_dataset(accessor)
     self._scales_cached = sorted(
         [NeuroglancerScale(self, i) for i in self._io.info["scales"]])