def make_seg(dataset, split, out_path):
    seg = load_seg(dataset, split)
    key = 'setup0/timepoint0/s0'
    chunks = (32, 256, 256)
    with z5py.File(out_path, 'a') as f:
        f.create_dataset(key,
                         data=seg,
                         compression='gzip',
                         chunks=chunks,
                         n_threads=16)

    tmp_folder = './tmp_ds_seg'
    downscale(out_path,
              key,
              out_path,
              RESOLUTION,
              SCALE_FACTORS,
              chunks,
              tmp_folder,
              target='local',
              max_jobs=16,
              block_shape=chunks,
              library='vigra',
              library_kwargs={'order': 0})
    add_max_id(out_path,
               key,
               out_path,
               key,
               tmp_folder,
               target='local',
               max_jobs=16)
def create_mobie_dataset(dataset_name,
                         root_in,
                         is_default,
                         volumes_per_row=10,
                         int_sort=False):

    root_out = './data'
    dataset_folder = make_dataset_folders(root_out, dataset_name)

    raw_name = 'em-tomogram'
    data_path = os.path.join(dataset_folder, 'images', 'local',
                             f'{raw_name}.n5')
    xml_path = os.path.join(dataset_folder, 'images', 'local',
                            f'{raw_name}.xml')

    chunks = (32, 128, 128)
    resolution = get_resolution(dataset_name)
    scale_factors = [[1, 2, 2], [1, 2, 2], [1, 2, 2], [2, 2, 2]]

    pattern = os.path.join(root_in, '*.h5')
    files = glob(pattern)
    files = sort_files(files, int_sort)

    out_key = 'setup0/timepoint0/s0'
    grid_center_positions = make_grid_dataset(files,
                                              chunks,
                                              data_path,
                                              out_key,
                                              volumes_per_row=volumes_per_row,
                                              dry_run=False)

    tmp_folder = f'tmp_{dataset_name}'
    downscale(data_path,
              out_key,
              data_path,
              resolution,
              scale_factors,
              chunks,
              tmp_folder=tmp_folder,
              target='local',
              max_jobs=N_THREADS,
              block_shape=chunks,
              library='skimage')

    add_to_image_dict(dataset_folder, 'image', xml_path, add_remote=True)

    make_bookmarks(dataset_folder, grid_center_positions, files, raw_name,
                   resolution)
def make_raw(dataset, split, out_path):
    raw = load_raw(dataset, split)
    key = 'setup0/timepoint0/s0'
    chunks = (32, 256, 256)
    with z5py.File(out_path, 'a') as f:
        f.create_dataset(key,
                         data=raw,
                         compression='gzip',
                         chunks=chunks,
                         n_threads=16)

    tmp_folder = './tmp_ds_raw'
    downscale(out_path,
              key,
              out_path,
              RESOLUTION,
              SCALE_FACTORS,
              chunks,
              tmp_folder,
              target='local',
              max_jobs=16,
              block_shape=chunks,
              library='skimage')
Exemple #4
0
def make_example_data():
    target = 'local'
    max_jobs = 4

    resolution = [0.04, 0.004, 0.004]
    scale_factors = [[1, 2, 2], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2],
                     [2, 2, 2]]
    chunks = [1, 256, 256]

    tmp_folder = './tmp_example'
    downscale(in_path,
              raw_key,
              out_path,
              resolution,
              scale_factors,
              chunks,
              tmp_folder,
              target,
              max_jobs,
              block_shape=None,
              library='skimage',
              metadata_format='paintera',
              out_key='raw')

    tmp_folder = './tmp_example_ws'
    relabel(tmp_folder, target, max_jobs)
    downscale(in_path,
              ws_key,
              out_path,
              resolution,
              scale_factors,
              chunks,
              tmp_folder,
              target,
              max_jobs,
              block_shape=None,
              library='vigra',
              library_kwargs={'order': 0},
              metadata_format='paintera',
              out_key='watersheds')

    tmp_folder = './tmp_example_seg'
    relabel(tmp_folder, target, max_jobs)
    downscale(in_path,
              seg_key,
              out_path,
              resolution,
              scale_factors,
              chunks,
              tmp_folder,
              target,
              max_jobs,
              block_shape=None,
              library='vigra',
              library_kwargs={'order': 0},
              metadata_format='paintera',
              out_key='segmentation')

    node_labels = compute_node_labels(out_path, 'watersheds/s0', out_path,
                                      'segmentation/s0', tmp_folder, target,
                                      max_jobs)
    node_labels = np.concatenate(
        [np.arange(len(node_labels))[:, None], node_labels[:, None]], axis=1)

    with open_file(out_path, 'a') as f:
        f.create_dataset('node_labels/initial',
                         data=node_labels,
                         compression='gzip',
                         chunks=(len(node_labels), 1))
Exemple #5
0
def mock_segmentation(ds_name, seg_name, scale_factor, resolution, tmp_folder):

    chunks = (32, 128, 128)

    root_in = f'/g/emcf/common/5792_Sars-Cov-2/Exp_300420/TEM/Tomography/raw_data/{ds_name}/bdv/tomos'
    pattern = os.path.join(root_in, '*.h5')
    files = glob(pattern)
    files.sort()

    raw_name = 'em-tomogram'
    dataset_folder = os.path.join('data', ds_name)
    data_path = os.path.join(dataset_folder, 'images', 'local',
                             f'{raw_name}.n5')
    out_key = 'setup0/timepoint0/s0'

    volumes_per_row = min(10, len(files))
    grid_center_positions = make_grid_dataset(files,
                                              chunks,
                                              data_path,
                                              out_key,
                                              volumes_per_row=volumes_per_row,
                                              dry_run=True)

    with open_file(data_path, 'r') as f:
        ds = f[out_key]
        shape = ds.shape
    shape = tuple(sh // sf for sh, sf in zip(shape, scale_factor))

    seg_path = os.path.join(dataset_folder, 'images', 'local',
                            f'{seg_name}.n5')
    f = open_file(seg_path, 'a')
    ds = f.require_dataset(out_key,
                           shape=shape,
                           compression='gzip',
                           chunks=chunks,
                           dtype='uint16')
    # max_id = ds[:].max()
    # print(max_id)
    # ds.attrs['maxId'] = int(max_id)
    # return

    h5_key = 't00000/s00/0/cells'
    for label_id, (in_file, grid_center) in enumerate(
            zip(files, grid_center_positions.values()), 1):

        grid_center = [gc // sf for gc, sf in zip(grid_center, scale_factor)]

        with open_file(in_file, 'r') as f_in:
            vol_shape = f_in[h5_key].shape
        vol_shape = [vs // sf for vs, sf in zip(vol_shape, scale_factor)]
        bb = tuple(
            slice(gc - vs // 2, gc + vs // 2)
            for gc, vs in zip(grid_center, vol_shape))

        ds[bb] = label_id

    ds.attrs['maxId'] = label_id

    ds_factors = [[2, 2, 2]]
    downscale(seg_path,
              out_key,
              seg_path,
              resolution,
              ds_factors,
              chunks,
              tmp_folder=tmp_folder,
              target='local',
              max_jobs=4,
              block_shape=chunks,
              library='vigra',
              library_kwargs={'order': 0})