示例#1
0
def main(h5_path: str, n5_path: str, skip_confirmation: bool = False):
    crop_name = Path(split_by_suffix(h5_path, ('.h5', ))[0]).parent.name
    num_n5_suffixes = len(
        tuple(filter(lambda v: v.endswith('.n5'),
                     Path(n5_path).parts)))
    if num_n5_suffixes != 1:
        raise ValueError(
            'The n5 path must have one and only one element ending with ".n5". The path given has {num_n5_suffixes}'
        )

    dataset_name = Path(split_by_suffix(n5_path, ('.n5', ))[0]).name
    sheet_df = get_sheet_df(credfile, sheetname, sheetpage)
    crop_attrs = generate_crop_attrs(dataset_name, crop_name, sheet_df,
                                     classNameDict)
    gt_data = read(h5_path)
    if not skip_confirmation:
        print(
            f'The data created at {n5_path} will have the following attributes: '
        )
        print(crop_attrs)
        click.confirm('Do you wish to continue?', default=True)

    output_array = access(n5_path,
                          shape=gt_data.shape,
                          dtype=gt_data.dtype,
                          mode='a')
    output_array[:] = gt_data
    output_array.attrs.update(**crop_attrs)
    log.info(f'Saved data and attributes to {n5_path}')
    return 0
def test_path_splitting():
    path = 's3://0/1/2.n5/3/4'
    split = split_by_suffix(path, ('.n5', ))
    assert (split == ('s3://0/1/2.n5', '3/4', '.n5'))

    path = os.path.join('0', '1', '2.n5', '3', '4')
    split = split_by_suffix(path, ('.n5', ))
    assert (split == (os.path.join('0', '1',
                                   '2.n5'), os.path.join('3', '4'), '.n5'))

    path = os.path.join('0', '1', '2.n5')
    split = split_by_suffix(path, ('.n5', ))
    assert (split == (os.path.join('0', '1', '2.n5'), '', '.n5'))
示例#3
0
def precomputed_to_dask(urlpath: str,
                        chunks: Union[str, Sequence[int]],
                        channel: int = 0):
    store_path, key, _ = split_by_suffix(urlpath, (".precomputed", ))
    tsa = access_precomputed(store_path, key,
                             mode="r")[ts.d["channel"][channel]]
    shape = tuple(tsa.shape)
    dtype = tsa.dtype.numpy_dtype
    if chunks == "original":
        chunks = tsa.spec().to_json()["scale_metadata"]["chunk_size"]
    _chunks = normalize_chunks(chunks, shape, dtype=dtype)

    def chunk_loader(store_path, key, block_info=None):
        idx = tuple(
            slice(*idcs) for idcs in block_info[None]["array-location"])
        tsa = access_precomputed(store_path, key,
                                 mode="r")[ts.d["channel"][channel]]
        result = tsa[idx].read().result()
        return result

    arr = map_blocks(chunk_loader,
                     store_path,
                     key,
                     chunks=_chunks,
                     dtype=dtype)
    return arr
示例#4
0
def n5_to_dask(urlpath: str, chunks: Union[str, Sequence[int]], **kwargs):
    store_path, key, _ = split_by_suffix(urlpath, (".n5", ))
    arr = access_n5(store_path, key, mode="r", **kwargs)
    if not hasattr(arr, "shape"):
        raise ValueError(f"{store_path}/{key} is not an n5 array")
    if chunks == "original":
        _chunks = arr.chunks
    else:
        _chunks = chunks
    darr = da.from_array(arr, chunks=_chunks, inline_array=True)
    return darr