示例#1
0
def blocks_from_mask(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    with open(config_path, 'r') as f:
        config = json.load(f)

    mask_path = config['mask_path']
    mask_key = config['mask_key']
    output_path = config['output_path']
    shape = config['shape']
    block_shape = config['block_shape']
    n_threads = config.get('threads_per_job', 1)

    # NOTE we assume that the mask is small and will fit into memory
    with vu.file_reader(mask_path, 'r') as f:
        ds = f[mask_key]
        ds.n_threads = n_threads
        mask_data = ds[:]
    mask = ResizedVolume(mask_data, tuple(shape))

    blocking = nt.blocking([0, 0, 0], shape, list(block_shape))
    blocks_in_mask = _get_blocks_in_mask(mask, blocking, n_threads)

    with open(output_path, 'w') as f:
        json.dump(blocks_in_mask, f)

    fu.log_job_success(job_id)
示例#2
0
    def _test_toy(self):
        from elf.wrapper.resized_volume import ResizedVolume
        from skimage.transform import resize
        from skimage.data import astronaut
        x = astronaut()[..., 0].astype('float32')
        x = vigra.sampling.resize(x, (256, 256))
        x = np.concatenate(256 * [x[None]], axis=0)

        out_shape = 3 * (128,)
        order = 0
        out1 = vigra.sampling.resize(x, shape=out_shape, order=order)
        out2 = ResizedVolume(x, shape=out_shape, order=order)
        out3 = resize(x, out_shape, order=0, preserve_range=True, anti_aliasing=False)
        assert out1.shape == out2.shape == out_shape
        # bb = np.s_[:64, :, 64:]
        bb = np.s_[:]
        o1 = out1[bb]
        o2 = out2[bb]
        o3 = out3[bb]
        import napari
        with napari.gui_qt():
            viewer = napari.Viewer()
            viewer.add_image(o1, name='elf')
            viewer.add_image(o2, name='vigra')
            viewer.add_image(o3, name='skimage')
示例#3
0
def prefilter_blocks(mask_path, mask_key,
                     shape, block_shape,
                     save_file, n_threads=48):
    if os.path.exists(save_file):
        print("Loading block list from file")
        with open(save_file) as f:
            return json.load(f)

    with open_file(mask_path, 'r') as f:
        ds = f[mask_key]
        mask = ResizedVolume(ds, shape=shape, order=0)

        blocking = nt.blocking([0, 0, 0], shape, block_shape)
        n_blocks = blocking.numberOfBlocks

        def check_block(block_id):
            block = blocking.getBlock(block_id)
            bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
            d = mask[bb]
            if d.sum() > 0:
                return block_id
            else:
                return None

        print("Computing block list ...")
        with futures.ThreadPoolExecutor(n_threads) as tp:
            blocks = list(tqdm(tp.map(check_block, range(n_blocks)), total=n_blocks))
        blocks = [bid for bid in blocks if bid is not None]

    with open(save_file, 'w') as f:
        json.dump(blocks, f)
    return blocks
示例#4
0
def block_node_labels(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    # get the config
    with open(config_path) as f:
        config = json.load(f)

    ws_path = config['ws_path']
    ws_key = config['ws_key']
    input_path = config['input_path']
    input_key = config['input_key']
    output_path = config['output_path']
    output_key = config['output_key']

    block_shape = config['block_shape']
    block_list = config['block_list']
    ignore_label = config['ignore_label']

    with vu.file_reader(ws_path, 'r') as f:
        shape = f[ws_key].shape

    blocking = nt.blocking([0, 0, 0],
                           list(shape),
                           list(block_shape))

    # labels can either be interpolated or full volume
    f_lab = vu.file_reader(input_path, 'r')
    ds_labels = f_lab[input_key]
    lab_shape = ds_labels.shape
    # label shape is smaller than ws shape
    # -> interpolated
    if any(lsh < sh for lsh, sh in zip(lab_shape, shape)):
        assert not any(lsh > sh for lsh, sh in zip(lab_shape, shape)),\
            "Can't have label shape bigger then volshape"
        labels = ResizedVolume(ds_labels, shape, order=0)
    else:
        assert lab_shape == shape, "%s, %s" % (str(lab_shape), shape)
        labels = ds_labels

    if ignore_label is None:
        fu.log("accumulating labels without ignore label")
    else:
        fu.log("accumulating labels with ignore label %i" % ignore_label)

    with vu.file_reader(ws_path, 'r') as f_in:
        ds_ws = f_in[ws_key]
        if ds_ws.attrs.get('isLabelMultiset', False):
            ds_ws = LabelMultisetWrapper(ds_ws)
        [_labels_for_block(block_id, blocking,
                           ds_ws, output_path, output_key,
                           labels, ignore_label)
         for block_id in block_list]

    f_lab.close()
    fu.log_job_success(job_id)
示例#5
0
def load_mask(mask_path, mask_key, shape):
    with file_reader(mask_path, 'r') as f_mask:
        mshape = f_mask[mask_key].shape
    # check if th mask is at full - shape, otherwise interpolate
    if tuple(mshape) == tuple(shape):
        mask = file_reader(mask_path, 'r')[mask_key]
    else:
        with file_reader(mask_path, 'r') as f_mask:
            mask = f_mask[mask_key][:].astype('bool')
        mask = ResizedVolume(mask, shape=shape, order=0)
    return mask
示例#6
0
    def test_upscale_full_volume(self):
        from elf.wrapper.resized_volume import ResizedVolume
        shape = 3 * (128, )
        x = np.random.rand(*shape).astype('float32')

        out_shape = 3 * (256, )
        orders = (0, 3)
        for order in orders:
            out1 = vigra.sampling.resize(x, shape=out_shape, order=order)
            out2 = ResizedVolume(x, shape=out_shape, order=order)
            self.assertEqual(out1.shape, out2.shape)
            self._check_index(out1, out2, np.s_[:])
示例#7
0
    def _test_resize(self, shape, out_shape, indices):
        from elf.wrapper.resized_volume import ResizedVolume
        x = np.random.rand(*shape).astype('float32')

        halo = 8
        orders = [0, 3]
        for order in orders:
            out1 = vigra.sampling.resize(x, shape=out_shape, order=order)
            out2 = ResizedVolume(x, shape=out_shape, order=order)
            self.assertEqual(out1.shape, out2.shape)
            self.assertEqual(out1.shape, out_shape)
            for index in indices:
                self._check_index(out1, out2, index, halo=halo)
示例#8
0
def compute_mean_and_std():
    key = 'setup0/timepoint0/s1'
    f = open_file(RAW_PATH, 'r')
    ds = f[key]

    mask_key = 'setup0/timepoint0/s0'
    mask = open_file(MASK_PATH)[mask_key][:].astype('bool')
    mask = ResizedVolume(mask, ds.shape)

    m, s = mean_and_std(ds, mask=mask, n_threads=16, verbose=True)
    print("Computed mean and standard deviation:")
    print("Mean:", m)
    print("Standard deviation:", s)
示例#9
0
def insert_affinities(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    with open(config_path, 'r') as f:
        config = json.load(f)
    input_path = config['input_path']
    input_key = config['input_key']
    output_path = config['output_path']
    output_key = config['output_key']
    objects_path = config['objects_path']
    objects_key = config['objects_key']

    erode_by = config['erode_by']
    erode_3d = config.get('erode_3d', True)
    zero_objects_list = config['zero_objects_list']
    dilate_by = config.get('dilate_by', 2)

    fu.log("Fitting objects to affinities with erosion strenght %i and erosion in 3d: %s" % (erode_by, str(erode_3d)))
    if zero_objects_list is not None:
        fu.log("Zeroing affinities for the objects %s" % str(zero_objects_list))

    block_list = config['block_list']
    block_shape = config['block_shape']
    offsets = config['offsets']

    with vu.file_reader(input_path) as f_in, vu.file_reader(output_path) as f_out,\
            vu.file_reader(objects_path) as f_obj:
        ds_in = f_in[input_key]
        ds_out = f_out[output_key]
        shape = ds_in.shape[1:]

        # TODO actually check that objects are on a lower scale
        ds_objs = f_obj[objects_key]
        objects = ResizedVolume(ds_objs, shape)

        blocking = nt.blocking([0, 0, 0], list(shape), block_shape)
        [_insert_affinities_block(block_id, blocking, ds_in, ds_out, objects, offsets,
                                  erode_by, erode_3d, zero_objects_list, dilate_by)
         for block_id in block_list]

    fu.log_job_success(job_id)
示例#10
0
def scale_to_boundaries(job_id, config_path):
    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)
    with open(config_path, 'r') as f:
        config = json.load(f)

    # read paths from the config
    input_path = config['input_path']
    input_key = config['input_key']
    output_path = config['output_path']
    output_key = config['output_key']
    boundaries_path = config['boundaries_path']
    boundaries_key = config['boundaries_key']
    offset = config['offset']

    # additional config
    erode_by = config['erode_by']
    erode_3d = config.get('erode_3d', True)
    channel = config['channel']

    block_shape = list(config['block_shape'])
    block_list = config['block_list']

    with vu.file_reader(input_path, 'r') as fin,\
            vu.file_reader(boundaries_path, 'r') as fb,\
            vu.file_reader(output_path) as fout:

        ds_bd = fb[boundaries_key]
        ds_out = fout[output_key]

        shape = ds_out.shape
        blocking = nt.blocking([0, 0, 0], list(shape), block_shape)

        ds_in = ResizedVolume(fin[input_key], shape)

        for block_id in block_list:
            _scale_block(block_id, blocking,
                         ds_in, ds_bd, ds_out,
                         offset, erode_by, erode_3d, channel)

    # log success
    fu.log_job_success(job_id)
示例#11
0
def make_foreground_mask(path, input_key, output_key, mask_path, mask_key,
                         threshold, chunks, n_threads):

    with open_file(path, 'a') as f, open_file(mask_path, 'r') as f_mask:
        ds = NormalizeWrapper(f[input_key])
        ds_out = f.require_dataset(output_key,
                                   shape=ds.shape,
                                   compression='gzip',
                                   dtype='uint8',
                                   chunks=chunks)

        ds_mask = f_mask[mask_key]
        ds_mask = ResizedVolume(ds_mask, shape=ds.shape, order=0)

        greater_equal(ds,
                      threshold,
                      out=ds_out,
                      verbose=True,
                      n_threads=n_threads,
                      mask=ds_mask)
def check_exported(with_seg=False, with_boundaries=False, scale=3):
    from heimdall import view, to_source
    from elf.wrapper.resized_volume import ResizedVolume

    path = '/g/arendt/EM_6dpf_segmentation/platy-browser-data/data/rawdata/sbem-6dpf-1-whole-raw.n5'
    key = 'setup0/timepoint0/s%i' % (scale + 1, )

    f = z5py.File(path, 'r')
    ds = f[key]
    ds.n_threads = 8
    raw = ds[:]
    shape = raw.shape
    data = [to_source(raw, name='raw')]

    path = './data.n5'
    key = 'volumes/segmentation2/s%i' % scale
    f = z5py.File(path, 'r')

    if with_seg:
        ds = f[key]
        ds.n_threads = 8
        seg = ds[:].astype('uint32')
        data.append(to_source(seg, name='segmentation'))

    key = 'volumes/clustering'
    ds = f[key]
    ds.n_threads = 8
    clustered = ResizedVolume(ds[:], shape=shape)[:]
    data.append(to_source(clustered, name='clustered'))

    path = '/g/arendt/EM_6dpf_segmentation/corrections_and_proofreading/data.n5'
    key = 'boundaries/s%i' % scale
    f = z5py.File(path, 'r')
    if with_boundaries:
        ds = f[key]
        ds.n_threads = 8
        bd = ds[:]
        data = to_source(bd, name='boundaries')

    view(*data)
示例#13
0
    def test_downscale(self):
        from elf.wrapper.resized_volume import ResizedVolume
        shape = 3 * (256, )
        x = np.random.rand(*shape).astype('float32')

        out_shape = 3 * (128, )
        orders = (0, 3)
        indices = (np.s_[1:-1, 2:-2, 3:-3], np.s_[:64, :, 64:], np.s_[:64, :48,
                                                                      40:95])
        halo = 8
        for order in orders:
            out1 = vigra.sampling.resize(x, shape=out_shape, order=order)
            out2 = ResizedVolume(x, shape=out_shape, order=order)
            self.assertEqual(out1.shape, out2.shape)
            for index in indices:
                self._check_index(out1,
                                  out2,
                                  index,
                                  check_close=True,
                                  halo=halo)
            index = np.s_[32:96, 33:55, 70]
            self._check_index(out1, out2, index, check_close=False)
示例#14
0
    def test_upscale(self):
        from elf.wrapper.resized_volume import ResizedVolume
        shape = 3 * (128, )
        x = np.random.rand(*shape).astype('float32')

        out_shape = 3 * (256, )
        orders = (0, 3)
        indices = (np.s_[1:-1, 2:-2, 3:-3], np.s_[:128, :,
                                                  128:], np.s_[:128, :97,
                                                               123:250])
        halo = 8
        for order in orders:
            out1 = vigra.sampling.resize(x, shape=out_shape, order=order)
            out2 = ResizedVolume(x, shape=out_shape, order=order)
            self.assertEqual(out1.shape, out2.shape)
            for index in indices:
                self._check_index(out1,
                                  out2,
                                  index,
                                  check_close=True,
                                  halo=halo)
            index = np.s_[64:107, 153:179, 93]
            self._check_index(out1, out2, index, check_close=False)
示例#15
0
def run_thresh(path, mask_path, n_jobs):

    f = open_file(path)
    ds = NormalizeWrapper(f['predictions/foreground'])
    threshold = .5

    ds_out = f.require_dataset('predictions/mask',
                               shape=ds.shape,
                               compression='gzip',
                               dtype='uint8',
                               chunks=ds.chunks)

    # with mask for the big volume
    mask_key = 'setup0/timepoint0/s0'
    ds_mask = f[mask_key]
    ds_mask = ResizedVolume(ds_mask, shape=ds.shape, order=0)

    n_threads = 32
    greater_equal(ds,
                  threshold,
                  out=ds_out,
                  verbose=True,
                  n_threads=n_threads,
                  mask=ds_mask)
def make_resin_mask_2d(z=None, scale=3):
    path = '../data.n5'
    raw_key = 'volumes/raw-samplexy/s%i' % scale
    mask_path = '../../data/rawdata/sbem-6dpf-1-whole-segmented-inside.n5'
    mask_key = 'setup0/timepoint0/s0'

    n_threads = 8

    f = z5py.File(path)
    ds_raw = f[raw_key]

    f_mask = z5py.File(mask_path)
    ds = f_mask[mask_key]
    ds.n_threads = n_threads
    mask = ds[:]
    mask = ResizedVolume(mask, ds_raw.shape, order=0)

    size_thresh = 5000

    def mask_2d(z):
        print(z, "/", ds_raw.shape[0])
        raw = ds_raw[z]
        maskz = np.logical_not(mask[z])
        maskz = np.logical_or(maskz, raw == 0)
        maskz = np.logical_or(maskz, raw == 255)

        # run otsu on the remaining data to get rid of the embedded silver
        masked = raw[maskz]
        thresh = threshold_otsu(masked)
        maskz = np.logical_and(maskz, raw > thresh)

        # get rid of upper quantile
        masked = raw[maskz]
        # thresh = threshold_otsu(masked)
        thresh = np.quantile(masked, .9)
        maskz = np.logical_and(maskz, raw < thresh)

        # only keep the biggest component
        ccs = vigra.analysis.labelImageWithBackground(maskz.astype('uint32'))
        ids, sizes = np.unique(ccs, return_counts=True)
        ids, sizes = ids[1:], sizes[1:]
        keep_ids = ids[sizes > size_thresh]
        maskz = np.isin(ccs, keep_ids)

        maskz = maskz.astype('uint8') * 255
        return maskz

    if z is not None:
        resin_mask = mask_2d(z)
        raw = ds_raw[z]
        mask = mask[z]
        view(raw, mask, resin_mask)

    else:
        print("Compute mask")
        with futures.ThreadPoolExecutor(n_threads) as tp:
            tasks = [tp.submit(mask_2d, z) for z in range(ds_raw.shape[0])]
            res = [t.result() for t in tasks]

        resin_mask = np.concatenate([re[None] for re in res], axis=0)
        print(resin_mask.shape)

        # save as bdv, why not
        # s0: .025, .02, .02
        # s1: .025, .08, .08
        # s2: .025, .16, .16
        # s3: .025, .32, .32
        res = [.025, .32, .32]
        make_bdv(resin_mask, 'sbem-6dpf-1-whole-segmented-resin.n5',
                 downscale_factors=[[1, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]],
                 resolution=res, unit='micrometer', downscale_mode='min')
示例#17
0
def check_ids(remaining_ids, saved_ids, false_merges, project_path, state):
    scale = 3
    pathr = '/g/kreshuk/data/arendt/platyneris_v1/data.n5'
    paths = os.path.join(
        '/g/arendt/EM_6dpf_segmentation/platy-browser-data/data/0.3.1',
        'segmentations/sbem-6dpf-1-whole-segmented-cells-labels.h5')
    table_path = os.path.join(
        '/g/arendt/EM_6dpf_segmentation/platy-browser-data/data/0.3.1',
        'tables/sbem-6dpf-1-whole-segmented-cells-labels/default.csv')

    table = pd.read_csv(table_path, sep='\t')
    res = scale_to_res(scale)

    fr = z5py.File(pathr, 'r')
    dsr = fr['volumes/raw/s%i' % scale]
    km = 'volumes/labels/muscle'
    dsm = fr[km]
    dsm = ResizedVolume(dsm, shape=dsr.shape)
    assert dsm.shape == dsr.shape

    check_fps, current_id = state['check_fps'], state['current_id']

    with h5py.File(paths, 'r') as fs:
        dss = fs['t00000/s00/%i/cells' % (scale - 1, )]

        for ii, fid in enumerate(remaining_ids):
            bb = get_bb(table, fid, res)
            if check_fps:
                print("Checking false positives - id:", fid)
            else:
                print("Checking false negatives - id:", fid)
            raw = dsr[bb]
            seg = dss[bb]
            muscle = dsm[bb]
            muscle = (muscle > 0).astype('uint32')
            mask = (seg == fid).astype('uint32')
            save_id, false_merge, save_state, done = view_candidate(
                raw, mask, muscle)

            if save_id:
                saved_ids.append(fid)
                print("Confirm id", fid, "we now have", len(saved_ids),
                      "confirmed ids.")

            if false_merge:
                print("Add id", fid, "to false merges")
                false_merges.append(fid)

            if save_state:
                print("Save current state to", project_path)
                with h5py.File(project_path) as f:
                    f.attrs['check_fps'] = check_fps

                    if 'false_merges' in f:
                        del f['false_merges']
                    if len(false_merges) > 0:
                        f.create_dataset('false_merges', data=false_merges)

                    g = f['false_positives'] if check_fps else f[
                        'false_negatives']
                    g.attrs['current_id'] = current_id + ii + 1
                    if 'proofread' in g:
                        del g['proofread']
                    if len(saved_ids) > 0:
                        g.create_dataset('proofread', data=saved_ids)

            if done:
                print("Quit")
                return False
    return True