Пример #1
0
def create_downsample_scales(layer_path,
                             mip,
                             ds_shape,
                             axis='z',
                             preserve_chunk_size=False,
                             chunk_size=None,
                             encoding=None,
                             factor=None):
    vol = CloudVolume(layer_path, mip)

    scales = compute_scales(vol, mip, ds_shape, axis, factor, chunk_size)

    if len(scales) == 0:
        print("WARNING: No scales generated.")

    for scale in scales:
        scale = scale // vol.meta.resolution(0)
        vol.add_scale(scale, encoding=encoding, chunk_size=chunk_size)

    if chunk_size is None:
        if preserve_chunk_size or len(scales) == 0:
            chunk_size = vol.scales[mip]['chunk_sizes']
        else:
            chunk_size = vol.scales[mip + 1]['chunk_sizes']
    else:
        chunk_size = [chunk_size]

    if encoding is None:
        encoding = vol.scales[mip]['encoding']

    for i in range(mip + 1, mip + len(scales) + 1):
        vol.scales[i]['chunk_sizes'] = chunk_size

    vol.commit_info()
    return vol
Пример #2
0
def create_downsample_scales(layer_path,
                             mip,
                             ds_shape,
                             axis='z',
                             preserve_chunk_size=False,
                             chunk_size=None,
                             encoding=None):
    vol = CloudVolume(layer_path, mip)
    shape = min2(vol.volume_size, ds_shape)

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)

    if chunk_size:
        underlying_shape = Vec(*chunk_size).astype(np.float32)

    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    scales = downsample_scales.compute_plane_downsampling_scales(
        size=shape,
        preserve_axis=axis,
        max_downsampled_size=int(min(*underlying_shape)),
    )
    scales = scales[1:]  # omit (1,1,1)
    scales = [
        list(map(int, vol.downsample_ratio * Vec(*factor3)))
        for factor3 in scales
    ]

    if len(scales) == 0:
        print("WARNING: No scales generated.")

    for scale in scales:
        vol.add_scale(scale, encoding=encoding, chunk_size=chunk_size)

    if chunk_size is None:
        if preserve_chunk_size or len(scales) == 0:
            chunk_size = vol.scales[mip]['chunk_sizes']
        else:
            chunk_size = vol.scales[mip + 1]['chunk_sizes']
    else:
        chunk_size = [chunk_size]

    if encoding is None:
        encoding = vol.scales[mip]['encoding']

    for i in range(mip + 1, mip + len(scales) + 1):
        vol.scales[i]['chunk_sizes'] = chunk_size

    return vol.commit_info()
Пример #3
0
def create_cloud_volume(precomputed_path,
                        img_size,
                        voxel_size,
                        num_hierarchy_levels=5,
                        parallel=True):
    global args
    if args.dtype == 'uint64':
        layer_type = 'segmentation'
    else:
        layer_type = 'image'
    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type=layer_type,
        data_type=args.dtype,  # Channel images might be 'uint8'
        encoding='raw',  # raw, jpeg, compressed_segmentation, fpzip, kempressed
        resolution=voxel_size,  # Voxel scaling, units are in nanometers
        voxel_offset=[0, 0, 0],  # x,y,z offset in voxels from the origin
        # Pick a convenient size for your underlying chunk representation
        # Powers of two are recommended, doesn't need to cover image exactly
        chunk_size=[512, 512, 1],  # units are voxels
        volume_size=img_size,  # e.g. a cubic millimeter dataset
    )
    vol = CloudVolume(precomputed_path, info=info, parallel=parallel)
    # add mip 1
    [
        vol.add_scale((2**i, 2**i, 1), chunk_size=[512, 512, 1])
        for i in range(num_hierarchy_levels)
    ]
    vol.commit_info()
    return vol
Пример #4
0
def create_binarized_vol(vol_path_bin,
                         vol_path_old,
                         ltype="image",
                         dtype="float32",
                         res=0,
                         parallel=False):
    vol = CloudVolume(vol_path_old)
    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type=ltype,
        data_type=dtype,  # Channel images might be 'uint8'
        encoding="raw",  # raw, jpeg, compressed_segmentation, fpzip, kempressed
        resolution=vol.scales[res]
        ["resolution"],  # Voxel scaling, units are in nanometers
        voxel_offset=[0, 0, 0],  # x,y,z offset in voxels from the origin
        # Pick a convenient size for your underlying chunk representation
        # Powers of two are recommended, doesn't need to cover image exactly
        chunk_size=[1024, 1024, 1],  # units are voxels
        volume_size=vol.scales[res]["size"],
    )
    bin_vol = CloudVolume(vol_path_bin, info=info, parallel=parallel)
    [
        bin_vol.add_scale((2**i, 2**i, 1), chunk_size=[1024, 1024, 1])
        for i in range(6)
    ]
    bin_vol.commit_info()
    return bin_vol
Пример #5
0
def create_cloud_volume(
    precomputed_path,
    img_size,
    voxel_size,
    num_downsampling_levels=6,
    chunk_size=[1024, 1024, 1],
    parallel=False,
    layer_type="image",
    dtype="uint16",
):
    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type=layer_type,
        data_type=dtype,  # Channel images might be 'uint8'
        encoding="raw",  # raw, jpeg, compressed_segmentation, fpzip, kempressed
        resolution=voxel_size,  # Voxel scaling, units are in nanometers
        voxel_offset=[0, 0, 0],  # x,y,z offset in voxels from the origin
        # Pick a convenient size for your underlying chunk representation
        # Powers of two are recommended, doesn't need to cover image exactly
        chunk_size=chunk_size,  # units are voxels
        volume_size=img_size,  # e.g. a cubic millimeter dataset
    )
    vol = CloudVolume(precomputed_path, info=info, parallel=parallel)
    [
        vol.add_scale((2**i, 2**i, 1), chunk_size=chunk_size)
        for i in range(num_downsampling_levels)
    ]

    vol.commit_info()
    return vol
Пример #6
0
def get_dst_cloudvolume(spec, dst_path, res, parallel=1, data_type="float32"):
    """Create CloudVolume where result will be written

    Args:
        spec (dict)
        dst_path (str)
        res (Vec): MIP0
        parallel (int)

    Returns:
        CloudVolume
    """
    mip = spec["dst_mip"]
    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type="image",
        data_type=data_type,
        encoding="raw",
        resolution=res,
        voxel_offset=[0, 0, 0],
        chunk_size=[spec["x_size"], spec["y_size"], 1],
        volume_size=[
            spec["x_size"] * 2**mip,
            spec["y_size"] * 2**mip,
            2 * len(spec["pairs"]),
        ],
    )

    dst = CloudVolume(dst_path,
                      mip=0,
                      info=info,
                      cdn_cache=False,
                      parallel=parallel)
    for m in range(1, mip + 1):
        factor = Vec(2**m, 2**m, 1)
        dst.add_scale(factor=factor,
                      chunk_size=[spec["x_size"], spec["y_size"], 1])

    dst.commit_info()
    dst = CloudVolume(dst_path, mip=mip, cdn_cache=False, parallel=parallel)
    return dst
Пример #7
0
def create_skeleton_layer(s3_bucket, skel_res, img_dims, num_res=7):
    """Creates segmentation layer for skeletons

    Arguments:
        s3_bucket {str} -- path to precomputed skeleton destination
        skel_res {list} -- x,y,z dimensions of highest res voxel size (nm)
        img_dims {list} -- x,y,z voxel dimensions of tiff images

    Keyword Arguments:
        num_res {int} -- number of image resolutions to be downsampled

    Returns:
        vol {cloudvolume.CloudVolume} -- CloudVolume to upload skeletons to
    """
    # create cloudvolume info
    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type="segmentation",
        data_type="uint64",  # Channel images might be 'uint8'
        encoding="raw",  # raw, jpeg, compressed_segmentation, fpzip, kempressed
        # Voxel scaling, units are in nanometers
        resolution=skel_res,
        voxel_offset=[0, 0, 0],  # x,y,z offset in voxels from the origin
        # Pick a convenient size for your underlying chunk representation
        # Powers of two are recommended, doesn't need to cover image exactly
        chunk_size=[int(i / 4) for i in img_dims],
        # chunk_size=[128, 128, 64],  # units are voxels
        volume_size=[i * 2 ** (num_res - 1) for i in img_dims],  # units are voxels
        skeletons="skeletons",
    )
    skel_info = {
        "@type": "neuroglancer_skeletons",
        "transform": [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
        "vertex_attributes": [
            {"id": "radius", "data_type": "float32", "num_components": 1},
            {"id": "vertex_types", "data_type": "float32", "num_components": 1},
            {"id": "vertex_color", "data_type": "float32", "num_components": 4},
        ],
    }
    # get cloudvolume info
    vol = CloudVolume(s3_bucket, info=info, parallel=True)
    [vol.add_scale((2 ** i, 2 ** i, 2 ** i)) for i in range(num_res)]  # num_res - 1
    vol.commit_info()

    # upload skeleton info to /skeletons/ dir
    with storage.SimpleStorage(vol.cloudpath) as stor:
        stor.put_json(str(Path("skeletons") / "info"), skel_info)

    return vol
Пример #8
0
def create_downsample_scales(layer_path,
                             mip,
                             ds_shape,
                             axis='z',
                             preserve_chunk_size=False):
    vol = CloudVolume(layer_path, mip)
    shape = min2(vol.volume_size, ds_shape)

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)

    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    scales = downsample_scales.compute_plane_downsampling_scales(
        size=shape,
        preserve_axis=axis,
        max_downsampled_size=int(min(*underlying_shape)),
    )
    scales = scales[1:]  # omit (1,1,1)
    scales = [
        list(map(int, vol.downsample_ratio * Vec(*factor3)))
        for factor3 in scales
    ]

    for scale in scales:
        vol.add_scale(scale)

    if preserve_chunk_size:
        for i in range(1, len(vol.scales)):
            vol.scales[i]['chunk_sizes'] = vol.scales[0]['chunk_sizes']

    return vol.commit_info()
def create_cloud_volume(
    precomputed_path,
    img_size,
    voxel_size,
    num_mips,
    chunk_size,
    parallel=False,
    layer_type="image",
    dtype="uint16",
):
    """Create Neuroglancer precomputed volume S3

    Args:
        precomputed_path (str): S3 Path to location where precomputed layer will be stored
        img_size (list of int): Size of the image (in 3D) to be uploaded
        voxel_size ([type]): Voxel size in nanometers
        num_mips (int, optional): Number of downsampling levels in X and Y. Defaults to 6.
        chunk_size (list, optional): Size of each chunk stored on S3. Defaults to [1024, 1024, 1].
        parallel (bool, optional): Whether or not the returned CloudVlue object will use parallel threads. Defaults to False.
        layer_type (str, optional): Neuroglancer type of layer. Can be image or segmentation. Defaults to "image".
        dtype (str, optional): Datatype of precomputed volume. Defaults to "uint16".

    Returns:
        cloudvolume.CloudVolume: CloudVolume object associated with this precomputed volume
    """
    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type=layer_type,
        data_type=dtype,  # Channel images might be 'uint8'
        encoding="raw",  # raw, jpeg, compressed_segmentation, fpzip, kempressed
        resolution=voxel_size,  # Voxel scaling, units are in nanometers
        voxel_offset=[0, 0, 0],  # x,y,z offset in voxels from the origin
        # Pick a convenient size for your underlying chunk representation
        # Powers of two are recommended, doesn't need to cover image exactly
        chunk_size=chunk_size,  # units are voxels
        volume_size=img_size,  # e.g. a cubic millimeter dataset
    )
    vol = CloudVolume(precomputed_path, info=info, parallel=parallel)
    [vol.add_scale((2 ** i, 2 ** i, 1), chunk_size=chunk_size) for i in range(num_mips)]

    vol.commit_info()
    return vol
Пример #10
0
def create_image_layer(s3_bucket, tif_dimensions, voxel_size, num_resolutions):
    """Creates segmentation layer for skeletons

    Arguments:
        s3_bucket {str} -- path to SWC file
        voxel_size {list} -- 3 floats for voxel size in nm
        num_resolutions {int} -- number of resolutions for the image
    Returns:
        vols {list} -- List of num_resolutions CloudVolume objects, starting from lowest resolution
    """
    # create cloudvolume info
    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type="image",
        data_type="uint16",  # Channel images might be 'uint8'
        encoding="raw",  # raw, jpeg, compressed_segmentation, fpzip, kempressed
        resolution=voxel_size,  # Voxel scaling, units are in nanometers
        voxel_offset=[0, 0, 0],  # x,y,z offset in voxels from the origin
        # Pick a convenient size for your underlying chunk representation
        # Powers of two are recommended, doesn't need to cover image exactly
        chunk_size=[int(d / 4) for d in tif_dimensions],  # units are voxels
        # USING MAXIMUM VOLUME size
        volume_size=[i * 2**(num_resolutions - 1) for i in tif_dimensions],
    )
    # get cloudvolume info
    vol = CloudVolume(s3_bucket, info=info, parallel=False)  # compress = False
    # scales resolution up, volume size down
    [vol.add_scale((2**i, 2**i, 2**i))
     for i in range(num_resolutions)]  # ignore chunk size
    vol.commit_info()
    vols = [
        CloudVolume(s3_bucket, mip=i,
                    parallel=False)  # parallel False, compress
        for i in range(num_resolutions - 1, -1, -1)
    ]
    return vols
Пример #11
0
def create_cloud_volume(
    precomputed_path: str,
    img_size: Sequence[int],
    voxel_size: Sequence[Union[int, float]],
    num_resolutions: int,
    chunk_size: Optional[Sequence[int]] = None,
    parallel: Optional[bool] = False,
    layer_type: Optional[str] = "image",
    dtype: Optional[str] = None,
    commit_info: Optional[bool] = True,
) -> CloudVolumePrecomputed:
    """Create CloudVolume object and info file.

    Handles both image volumes and segmentation volumes from octree structure.

    Arguments:
        precomputed_path: cloudvolume path
        img_size: x, y, z voxel dimensions of tiff images.
        voxel_size: x, y, z dimensions of highest res voxel size (nm).
        num_resolutions: The number of resolutions to upload.
        chunk_size: The size of chunks to use for upload. If None, uses img_size/2.
        parallel: Whether to upload chunks in parallel.
        layer_type: The type of cloudvolume object to create.
        dtype: The data type of the volume. If None, uses default for layer type.
        commit_info: Whether to create an info file at the path, defaults to True.
    Returns:
        vol: Volume designated for upload.
    """
    # defaults
    if chunk_size is None:
        chunk_size = [int(i / 4) for i in img_size]  # /2 took 42 hrs
    if dtype is None:
        if layer_type == "image":
            dtype = "uint16"
        elif layer_type == "segmentation" or layer_type == "annotation":
            dtype = "uint64"
        else:
            raise ValueError(
                f"layer type is {layer_type}, when it should be image or str")

    # check inputs
    check_precomputed(precomputed_path)
    check_size(img_size, allow_float=False)
    check_size(voxel_size)
    check_type(num_resolutions, (int, np.integer))
    if num_resolutions < 1:
        raise ValueError(
            f"Number of resolutions should be > 0, not {num_resolutions}")
    check_size(chunk_size)
    check_type(parallel, bool)
    check_type(layer_type, str)
    if layer_type not in ["image", "segmentation", "annotation"]:
        raise ValueError(
            f"{layer_type} should be 'image', 'segmentation', or 'annotation'")
    check_type(dtype, str)
    if dtype not in ["uint16", "uint64"]:
        raise ValueError(f"{dtype} should be 'uint16' or 'uint64'")
    check_type(commit_info, bool)

    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type=layer_type,
        data_type=dtype,  # Channel images might be 'uint8'
        encoding="raw",  # raw, jpeg, compressed_segmentation, fpzip, kempressed
        resolution=voxel_size,  # Voxel scaling, units are in nanometers
        voxel_offset=[0, 0, 0],  # x,y,z offset in voxels from the origin
        chunk_size=chunk_size,  # units are voxels
        volume_size=[i * 2**(num_resolutions - 1) for i in img_size],
    )
    vol = CloudVolume(precomputed_path, info=info, parallel=parallel)
    [
        vol.add_scale((2**i, 2**i, 2**i), chunk_size=chunk_size)
        for i in range(num_resolutions)
    ]
    if commit_info:
        vol.commit_info()
    if layer_type == "image" or layer_type == "annotation":
        vols = [
            CloudVolume(precomputed_path, mip=i, parallel=parallel)
            for i in range(num_resolutions - 1, -1, -1)
        ]
    elif layer_type == "segmentation":
        info.update(skeletons="skeletons")

        skel_info = {
            "@type":
            "neuroglancer_skeletons",
            "transform": [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
            "vertex_attributes": [
                {
                    "id": "radius",
                    "data_type": "float32",
                    "num_components": 1
                },
                {
                    "id": "vertex_types",
                    "data_type": "float32",
                    "num_components": 1
                },
                {
                    "id": "vertex_color",
                    "data_type": "float32",
                    "num_components": 4
                },
            ],
        }
        with storage.SimpleStorage(vol.cloudpath) as stor:
            stor.put_json(str(Path("skeletons") / "info"), skel_info)
        vols = [vol]
    return vols
Пример #12
0
def create_cloud_volume(
    precomputed_path,
    img_size,
    voxel_size,
    num_resolutions=2,
    chunk_size=None,
    parallel=False,
    layer_type="image",
    dtype=None,
):
    """Create CloudVolume volume object and info file.

    Arguments:
        precomputed_path {str} -- cloudvolume path
        img_size {list} -- x,y,z voxel dimensions of tiff images
        voxel_size {list} -- x,y,z dimensions of highest res voxel size (nm)
        
    Keyword Arguments:
        num_resolutions {int} -- the number of resolutions to upload
        chunk_size {list} -- size of chunks to upload. If None, uses img_size/2.
        parallel {bool} -- whether to upload chunks in parallel
        layer_type {str} -- one of "image" or "segmentation"
        dtype {str} -- one of "uint16" or "uint64". If None, uses default for layer type.
    Returns:
        vol {cloudvolume.CloudVolume} -- volume to upload to
    """
    if chunk_size is None:
        chunk_size = [int(i / 2) for i in img_size]
    if dtype is None:
        if layer_type == "image":
            dtype = "uint16"
        elif layer_type == "segmentation":
            dtype = "uint64"
        else:
            raise ValueError(
                f"layer type is {layer_type}, when it should be image or str")

    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type=layer_type,
        data_type=dtype,  # Channel images might be 'uint8'
        encoding="raw",  # raw, jpeg, compressed_segmentation, fpzip, kempressed
        resolution=voxel_size,  # Voxel scaling, units are in nanometers
        voxel_offset=[0, 0, 0],  # x,y,z offset in voxels from the origin
        chunk_size=chunk_size,  # units are voxels
        volume_size=[i * 2**(num_resolutions - 1) for i in img_size],
        # volume_size=img_size,  # e.g. a cubic millimeter dataset
        skeletons="skeletons",
    )
    vol = CloudVolume(precomputed_path, info=info, parallel=parallel)
    [
        vol.add_scale((2**i, 2**i, 2**i), chunk_size=chunk_size)
        for i in range(num_resolutions)
    ]
    vol.commit_info()
    if layer_type == "image":
        vols = [
            CloudVolume(precomputed_path, mip=i, parallel=parallel)
            for i in range(num_resolutions - 1, -1, -1)
        ]
    elif layer_type == "segmentation":
        skel_info = {
            "@type":
            "neuroglancer_skeletons",
            "transform": [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
            "vertex_attributes": [
                {
                    "id": "radius",
                    "data_type": "float32",
                    "num_components": 1
                },
                {
                    "id": "vertex_types",
                    "data_type": "float32",
                    "num_components": 1
                },
                {
                    "id": "vertex_color",
                    "data_type": "float32",
                    "num_components": 4
                },
            ],
        }
        with storage.SimpleStorage(vol.cloudpath) as stor:
            stor.put_json(str(Path("skeletons") / "info"), skel_info)
        vols = [vol]
    return vols