Exemple #1
0
def _skeletonize_id_block(blocking, block_id, ds_in, ds_out, sizes, bb_min,
                          bb_max, resolution, size_threshold, method):

    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)
    id_begin, id_end = block.begin[0], block.end[0]
    # we don't compute the skeleton for id 0, which is reserved for the ignore label
    id_begin = 1 if id_begin == 0 else id_begin

    # skeletonize ids in range and serialize skeletons
    for seg_id in range(id_begin, id_end):
        if size_threshold is not None:
            if sizes[seg_id] < size_threshold:
                continue
        bb = tuple(
            slice(mi, ma) for mi, ma in zip(bb_min[seg_id], bb_max[seg_id]))
        obj = ds_in[bb] == seg_id

        # try to skeletonize the object, skip if any exception is thrown
        try:
            nodes, edges = skel_impl(obj, resolution=resolution, method=method)
        except Exception:
            continue

        offsets = [b.start for b in bb]
        skelio.write_n5(ds_out, seg_id, nodes, edges, offsets)
    fu.log_block_success(block_id)
Exemple #2
0
def _compute_meshes_id_block(blocking, block_id, ds_in, output_path, sizes,
                             bb_min, bb_max, resolution, size_threshold,
                             smoothing_iterations, output_format):

    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)
    id_begin, id_end = block.begin[0], block.end[0]
    # we don't compute the skeleton for id 0, which is reserved for the ignore label
    id_begin = 1 if id_begin == 0 else id_begin

    # compute_meshes ids in range and serialize skeletons
    for seg_id in range(id_begin, id_end):
        if size_threshold is not None:
            if sizes[seg_id] < size_threshold:
                continue
        bb = tuple(
            slice(mi, ma) for mi, ma in zip(bb_min[seg_id], bb_max[seg_id]))
        obj = ds_in[bb] == seg_id

        # try to compute_meshes the object, skip if any exception is thrown
        verts, faces, normals = marching_cubes(
            obj,
            smoothing_iterations=smoothing_iterations,
            resolution=resolution)
        offsets = [b.start * res for b, res in zip(bb, resolution)]
        verts += np.array(offsets)

        if output_format == 'npy':
            out_path = os.path.join(output_path, '%i.npz' % seg_id)
            meshio.write_numpy(out_path, verts, faces, normals)
        else:
            out_path = os.path.join(output_path, '%.obj' % seg_id)
            meshio.write_obj(out_path, verts, faces, normals)

    fu.log_block_success(block_id)
def _cc_block(block_id, blocking, ds_in, ds_out, threshold, threshold_mode,
              channel):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)

    bb = vu.block_to_bb(block)
    if channel is None:
        input_ = input_[bb]
    else:
        block_shape = tuple(b.stop - b.start for b in bb)
        input_ = np.zeros(block_shape, dtype=ds_in.dtype)
        channel_ = [channel] if isinstance(channel, int) else channel
        for chan in channel_:
            bb_inp = (slice(chan, chan + 1), ) + bb
            input_ += ds_in[bb_inp].squeeze()

    if threshold_mode == 'greater':
        input_ = input_ > threshold
    elif threshold_mode == 'less':
        input_ = input_ < threshold
    elif threshold_mode == 'equal':
        input_ = input_ == threshold
    else:
        raise RuntimeError("Thresholding Mode %s not supported" %
                           threshold_mode)

    if np.sum(input_) == 0:
        fu.log_block_success(block_id)
        return 0

    components = label(input_)
    ds_out[bb] = components
    fu.log_block_success(block_id)
    return int(components.max()) + 1
def _threshold_block(block_id, blocking, ds_in, ds_out, threshold,
                     threshold_mode, channel, sigma):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)
    bb = vu.block_to_bb(block)

    bb = vu.block_to_bb(block)
    if channel is None:
        input_ = ds_in[bb]
    else:
        channel_ = [channel] if isinstance(channel, int) else channel
        in_shape = (len(channel_), ) + tuple(b.stop - b.start for b in bb)
        input_ = np.zeros(in_shape, dtype=ds_in.dtype)
        for chan_id, chan in enumerate(channel_):
            bb_inp = (slice(chan, chan + 1), ) + bb
            input_[chan_id] = ds_in[bb_inp].squeeze()
        input_ = np.mean(input_, axis=0)

    input_ = vu.normalize(input_)
    if sigma > 0:
        input_ = vu.apply_filter(input_, 'gaussianSmoothing', sigma)
        input_ = vu.normalize(input_)

    if threshold_mode == 'greater':
        input_ = input_ > threshold
    elif threshold_mode == 'less':
        input_ = input_ < threshold
    elif threshold_mode == 'equal':
        input_ = input_ == threshold
    else:
        raise RuntimeError("Thresholding Mode %s not supported" %
                           threshold_mode)

    ds_out[bb] = input_.astype('uint8')
    fu.log_block_success(block_id)
Exemple #5
0
def _stitch_faces(block_id, blocking, halo, overlap_prefix, overlap_threshold,
                  offsets, empty_blocks, ignore_label):
    fu.log("start processing block %i" % block_id)
    if block_id in empty_blocks:
        fu.log_block_success(block_id)
        return None

    assignments = [
        _stitch_face(offsets, overlap_prefix, block_a, block_b, face_a, face_b,
                     overlap_threshold, ignore_label) for _, face_a, face_b,
        block_a, block_b in vu.iterate_faces(blocking,
                                             block_id,
                                             return_only_lower=True,
                                             empty_blocks=empty_blocks,
                                             halo=halo)
    ]
    assignments = [ass for ass in assignments if ass is not None]

    # all assignments might be None, so we need to check for that
    if assignments:
        assignments = np.concatenate(assignments, axis=0)
    else:
        assignments = None
    fu.log_block_success(block_id)
    return assignments
def _predict_block(block_id, blocking, input_path, input_key, output_prefix,
                   halo, ilastik_folder, ilastik_project):
    fu.log("Start processing block %i" % block_id)
    block = blocking.getBlockWithHalo(block_id, halo).outerBlock
    _predict_block_impl(block_id, block, input_path, input_key, output_prefix,
                        ilastik_folder, ilastik_project)
    fu.log_block_success(block_id)
Exemple #7
0
def _scale_block(block_id, blocking,
                 ds_in, ds_bd, ds_out,
                 offset, erode_by, erode_3d, channel):
    fu.log("start processing block %i" % block_id)
    # load the block with halo set to 'erode_by'
    halo = compute_halo(erode_by, erode_3d)
    block = blocking.getBlockWithHalo(block_id, halo)
    in_bb = vu.block_to_bb(block.outerBlock)
    out_bb = vu.block_to_bb(block.innerBlock)
    local_bb = vu.block_to_bb(block.innerBlockLocal)

    obj = ds_in[in_bb]
    # don't scale if block is empty
    if np.sum(obj != 0) == 0:
        fu.log_block_success(block_id)
        return

    # load boundary map and fit obj to it
    if ds_bd.ndim == 4:
        in_bb = (slice(channel, channel + 1),) + in_bb
    hmap = ds_bd[in_bb].squeeze()
    obj, _ = vu.fit_to_hmap(obj, hmap, erode_by, erode_3d)
    obj = obj[local_bb]

    fg_mask = obj != 0
    obj[fg_mask] += offset

    # load previous output volume, insert obj into it and save again
    out = ds_out[out_bb]
    out[fg_mask] += obj[fg_mask]
    ds_out[out_bb] = out
    # log block success
    fu.log_block_success(block_id)
def _create_multiset_block(blocking, block_id, ds_in, ds_out):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)
    bb = vu.block_to_bb(block)

    labels = ds_in[bb]

    # we can't encode the paintra ignore label
    paintera_ignore_label = 18446744073709551615
    pignore_mask = labels == paintera_ignore_label
    if pignore_mask.sum() > 0:
        labels[pignore_mask] = 0

    if labels.sum() == 0:
        fu.log("block %i is empty" % block_id)
        fu.log_block_success(block_id)
        return

    # compute multiset from input labels
    multiset = create_multiset_from_labels(labels)
    ser = serialize_multiset(multiset)

    chunk_id = tuple(bs // ch for bs, ch in zip(block.begin, ds_out.chunks))
    ds_out.write_chunk(chunk_id, ser, True)
    fu.log_block_success(block_id)
Exemple #9
0
def _upsample_block(block_id, blocking, halo,
                    ds_in, ds_out, ds_skel,
                    scale_factor, pixel_pitch):
    fu.log("start processing block %i" % block_id)
    if halo is None:
        block = blocking.getBlock(block_id)
        inner_bb = outer_bb = vu.block_to_bb(block)
        local_bb = np.s_[:]
    else:
        block = blocking.getBlockWithHalo(block_id, halo)
        inner_bb = vu.block_to_bb(block.innerBlock)
        outer_bb = vu.block_to_bb(block.outerBlock)
        local_bb = vu.block_to_bb(block.innerBlockLocal)

    # load the segmentation
    seg = ds_in[outer_bb]
    skels_out = np.zeros_like(seg, dtype='uint64')

    # find the bounding box for downsampled skeletons
    skel_bb = tuple(slice(b.start // scale,
                          int(ceil(b.stop / scale)))
                    for b, scale in zip(outer_bb, scale_factor))
    skels = ds_skel[skel_bb]

    # get ids of skeletons in this block (excluding zeros)
    ids = np.unique(skels)[1:]
    for skel_id in ids:
        upsampled_skel = _upsample_skeleton(skel_id, seg,
                                            skels, scale_factor)
        skels_out += upsampled_skel

    ds_skel[inner_bb] = skels_out[local_bb]
    # log block success
    fu.log_block_success(block_id)
def _embedding_distances_block(block_id, blocking,
                               input_datasets, ds, offsets,
                               norm):
    fu.log("start processing block %i" % block_id)
    halo = np.max(np.abs(offsets), axis=0)

    block = blocking.getBlockWithHalo(block_id, halo.tolist())
    outer_bb = vu.block_to_bb(block.outerBlock)
    inner_bb = (slice(None),) + vu.block_to_bb(block.innerBlock)
    local_bb = (slice(None),) + vu.block_to_bb(block.innerBlockLocal)

    bshape = tuple(ob.stop - ob.start for ob in outer_bb)
    # TODO support multi-channel input data
    n_inchannels = len(input_datasets)
    in_shape = (n_inchannels,) + bshape
    in_data = np.zeros(in_shape, dtype='float32')

    for chan, inds in enumerate(input_datasets):
        in_data[chan] = inds[outer_bb]

    # TODO support thresholding the embedding before distance caclulation
    distance = compute_embedding_distances(in_data, offsets, norm)
    ds[inner_bb] = distance[local_bb]

    fu.log_block_success(block_id)
def _ws_block(blocking, block_id, ds_in, ds_out, config, pass_):
    fu.log("start processing block %i" % block_id)
    input_bb, inner_bb, output_bb = _get_bbs(blocking, block_id,
                                             config)
    input_ = _read_data(ds_in, input_bb, config)

    # apply distance transform
    dt = _apply_dt(input_, config)

    # get offset to make new seeds unique between blocks
    # (we need to relabel later to make processing efficient !)
    offset = block_id * np.prod(blocking.blockShape)

    # check which pass we are in and apply the according watershed
    if pass_ in (1, None):
        # single-pass watershed or first pass of two-pass watershed:
        # -> apply normal ws and write the results to the inner volume
        ws = _apply_watershed(input_, dt, offset, config)
        ds_out[output_bb] = ws[inner_bb]
    else:
        # second pass of two pass watershed -> apply ws with initial seeds
        # write the results to the inner volume
        if len(input_bb) == 4:
            input_bb = input_bb[1:]
        initial_seeds = ds_out[input_bb]
        ws = _apply_watershed_with_seeds(input_, dt,
                                         offset, initial_seeds, config)
        ds_out[output_bb] = ws[inner_bb]

    # log block success
    fu.log_block_success(block_id)
def _transform_block(ds_in,
                     ds_out,
                     transformation,
                     blocking,
                     block_id,
                     mask=None):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)

    bb = vu.block_to_bb(block)
    if mask is not None:
        bb_mask = mask[bb].astype('bool')
        if bb_mask.sum() == 0:
            fu.log_block_success(block_id)
            return
    else:
        bb_mask = None

    data = ds_in[bb]
    if len(transformation) == 2:
        data = _transform_data(data, transformation['a'], transformation['b'],
                               bb_mask)
    else:
        z_offset = block.begin[0]
        for z in range(data.shape[0]):
            trafo = transformation[z + z_offset]
            data[z] = _transform_data(data[z], trafo['a'], trafo['b'],
                                      bb_mask[z])

    ds_out[bb] = data
    fu.log_block_success(block_id)
def _failing_block(block_id, blocking, ds, n_retries):
    # fail for odd block ids if we are in the first try
    if n_retries == 0 and block_id % 2 == 1:
        raise RuntimeError("Fail")
    bb = vu.block_to_bb(blocking.getBlock(block_id))
    ds[bb] = 1
    fu.log_block_success(block_id)
Exemple #14
0
def _copy_blocks(ds_in, ds_out, blocking, block_list, roi_begin,
                 reduce_function):
    dtype = ds_out.dtype
    for block_id in block_list:
        fu.log("start processing block %i" % block_id)

        block = blocking.getBlock(block_id)
        bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
        if ds_in.ndim == 4:
            bb = (slice(None), ) + bb

        data = ds_in[bb]
        # don't write empty blocks
        if sum(data).sum() == 0:
            fu.log_block_success(block_id)
            continue

        # if we have a roi begin, we need to substract it
        # from the output bounding box, because in this case
        # the output shape has been fit to the roi
        if roi_begin is not None:
            bb = tuple(
                slice(b.start - off, b.stop - off)
                for b, off in zip(bb, roi_begin))

        if reduce_function is not None and data.ndim == 4:
            data = reduce_function(data[0:3], axis=0)
            bb = bb[1:]

        ds_out[bb] = cast_type(data, dtype)
        fu.log_block_success(block_id)
Exemple #15
0
def _morphology_for_block(block_id, blocking, ds_in,
                          output_path, output_key):
    fu.log("start processing block %i" % block_id)
    # read labels and input in this block
    block = blocking.getBlock(block_id)
    bb = vu.block_to_bb(block)
    seg = ds_in[bb]

    # check if segmentation block is empty
    if seg.sum() == 0:
        fu.log("block %i is empty" % block_id)
        fu.log_block_success(block_id)
        return

    chunk_id = tuple(beg // ch
                     for beg, ch in zip(block.begin,
                                        blocking.blockShape))
    # extract and save simple morphology:
    # - size of segments                 1
    # - center of mass of segments       2:5
    # - minimum coordinates of segments  5:8
    # - maximum coordinates of segments  8:11
    # [:,0] is the label id
    ndist.computeAndSerializeMorphology(seg, block.begin,
                                        output_path, output_key,
                                        chunk_id)
    fu.log_block_success(block_id)
def _mws_block_pass1(block_id, blocking, ds_in, ds_out, mask, offsets, strides,
                     randomize_strides, halo, noise_level, max_block_id,
                     tmp_folder):
    fu.log("(Pass1) start processing block %i" % block_id)

    block = blocking.getBlockWithHalo(block_id, halo)
    in_bb = vu.block_to_bb(block.outerBlock)

    if mask is None:
        bb_mask = None
    else:
        bb_mask = mask[in_bb].astype('bool')
        if np.sum(bb_mask) == 0:
            fu.log_block_success(block_id)
            return

    aff_bb = (slice(None), ) + in_bb
    affs = vu.normalize(ds_in[aff_bb])

    seg = mutex_watershed(affs,
                          offsets,
                          strides=strides,
                          mask=bb_mask,
                          randomize_strides=randomize_strides,
                          noise_level=noise_level)

    out_bb = vu.block_to_bb(block.innerBlock)
    local_bb = vu.block_to_bb(block.innerBlockLocal)
    seg = seg[local_bb]

    # FIXME once vigra supports uint64 or we implement our own ...
    # seg = vigra.analysis.labelVolumeWithBackground(seg)

    # offset with lowest block coordinate
    offset_id = block_id * np.prod(blocking.blockShape)
    vigra.analysis.relabelConsecutive(seg,
                                      start_label=offset_id,
                                      keep_zeros=True,
                                      out=seg)
    ds_out[out_bb] = seg

    # get the state of the segmentation of this block
    grid_graph = compute_grid_graph(seg.shape, mask=bb_mask)
    affs = affs[(slice(None), ) + local_bb]
    # FIXME this function yields incorrect uv-ids !
    state_uvs, state_weights, state_attractive = grid_graph.compute_state_for_segmentation(
        affs, seg, offsets, n_attractive_channels=3, ignore_label=True)
    # serialize the states
    save_path = os.path.join(tmp_folder, 'seg_state_block%i.h5' % block_id)
    with vu.file_reader(save_path) as f:
        f.create_dataset('edges', data=state_uvs)
        f.create_dataset('weights', data=state_weights)
        f.create_dataset('attractive_edge_mask', data=state_attractive)

    # write max-id for the last block
    if block_id == max_block_id:
        _write_nlabels(ds_out, seg)
    # log block success
    fu.log_block_success(block_id)
Exemple #17
0
def _write_block_res(ds_in, ds_out, block_id, blocking, block_res):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)
    bb = vu.block_to_bb(block)
    ws = ds_in[bb]

    seg = nt.takeDict(block_res, ws)
    ds_out[bb] = seg
    fu.log_block_success(block_id)
Exemple #18
0
def stack_block(block_id, blocking, ds_raw, ds_pred, ds_out, dtype):
    fu.log("start processing block %i" % block_id)
    bb = vu.block_to_bb(blocking.getBlock(block_id))
    raw = cast(ds_raw[bb], dtype)
    bb = (slice(None), ) + bb
    pred = cast(ds_pred[bb], dtype)
    out = np.concatenate([raw[None], pred], axis=0)
    ds_out[bb] = out
    fu.log_block_success(block_id)
def _minfilter_block(block_id, blocking, halo, ds_in, ds_out, filter_shape):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlockWithHalo(block_id, halo)
    outer_roi = vu.block_to_bb(block.outerBlock)
    inner_roi = vu.block_to_bb(block.innerBlock)
    local_roi = vu.block_to_bb(block.innerBlockLocal)
    mask = ds_in[outer_roi]
    min_filter_mask = minimum_filter(mask, size=filter_shape)
    ds_out[inner_roi] = min_filter_mask[local_roi]
    fu.log_block_success(block_id)
Exemple #20
0
def _insert_affinities_block(block_id, blocking, ds_in, ds_out, objects, offsets,
                             erode_by, erode_3d, zero_objects_list, dilate_by):
    fu.log("start processing block %i" % block_id)
    halo = np.max(np.abs(offsets), axis=0).tolist()
    if erode_3d:
        halo = [max(ha, erode_by)
                for axis, ha in enumerate(halo)]
    else:
        halo = [ha if axis == 0 else max(ha, erode_by)
                for axis, ha in enumerate(halo)]

    block = blocking.getBlockWithHalo(block_id, halo)
    outer_bb = vu.block_to_bb(block.outerBlock)
    inner_bb = (slice(None),) + vu.block_to_bb(block.innerBlock)
    local_bb = (slice(None),) + vu.block_to_bb(block.innerBlockLocal)

    # load objects and check if we have any in this block
    # catch run-time error for singleton dimension
    try:
        objs = objects[outer_bb]
        obj_sum = objs.sum()
    except RuntimeError:
        obj_sum = 0

    # if we don't have objs, just copy the affinities
    if obj_sum == 0:
        ds_out[inner_bb] = ds_in[inner_bb]
        fu.log_block_success(block_id)
        return

    outer_bb = (slice(None),) + outer_bb
    affs = ds_in[outer_bb]

    # fit object to hmap derived from affinities via shrinking and watershed
    if erode_by > 0:
        objs, obj_ids = vu.fit_to_hmap(objs, affs[0].copy(), erode_by, erode_3d)
    else:
        obj_ids = np.unique(objs)
        if 0 in obj_ids:
            obj_ids = obj_ids[1:]

    # insert affinities to objs into the original affinities
    affs = _insert_affinities(affs, objs.astype('uint64'), offsets, dilate_by)

    # zero out some affs if necessary
    if zero_objects_list is not None:
        zero_ids = obj_ids[np.in1d(obj_ids, zero_objects_list)]
        if zero_ids.size:
            for zero_id in zero_ids:
                # erode the mask to avoid ugly boundary artifacts
                zero_mask = binary_erosion(objs == zero_id, iterations=4)
                affs[:, zero_mask] = 0

    ds_out[inner_bb] = affs[local_bb]
    fu.log_block_success(block_id)
def _apply_filter(blocking, block_id, ds_in, ds_out,
                  halo, filter_name, sigma, apply_in_2d):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlockWithHalo(block_id, halo)
    bb_in = vu.block_to_bb(block.outerBlock)
    input_ = vu.normalize(ds_in[bb_in])
    response = vu.apply_filter(input_, filter_name, sigma, apply_in_2d)
    bb_out = vu.block_to_bb(block.innerBlock)
    inner_bb = vu.block_to_bb(block.innerBlockLocal)
    ds_out[bb_out] = response[inner_bb]
    fu.log_block_success(block_id)
def _block_features(block_id, blocking, ds_in, ds_labels, ds_out, ignore_label,
                    channel, feature_names):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)
    bb = vu.block_to_bb(block)

    labels = ds_labels[bb]

    # check if we have an ignore label and return
    # if this block is purely ignore label
    if ignore_label is not None:
        if np.sum(labels != ignore_label) == 0:
            fu.log_block_success(block_id)
            return

    # get global normalization values
    min_val = 0
    max_val = 255. if ds_in.dtype == np.dtype('uint8') else 1.

    bb_in = bb if channel is None else (channel, ) + bb
    input_ = ds_in[bb_in]
    input_ = vu.normalize(input_, min_val, max_val)

    ids = np.unique(labels)
    if ids[0] == 0:
        feat_slice = np.s_[:]
        exp_len = len(ids)
    else:
        feat_slice = np.s_[1:]
        exp_len = len(ids) + 1

    # relabel consecutive in order to save memory
    labels = relabel_sequential(labels, ids)

    feats = vigra.analysis.extractRegionFeatures(input_,
                                                 labels.astype('uint32'),
                                                 features=feature_names,
                                                 ignoreLabel=ignore_label)
    assert len(
        feats['count']) == exp_len, "%i, %i" % (len(feats['count']), exp_len)

    # make serialization
    n_cols = len(feature_names) + 1
    data = np.zeros(n_cols * len(ids), dtype='float32')
    # write the ids
    data[::n_cols] = ids.astype('float32')
    # write all the features
    for feat_id, feat_name in enumerate(feature_names, 1):
        data[feat_id::n_cols] = feats[feat_name][feat_slice]

    chunks = blocking.blockShape
    chunk_id = tuple(b.start // ch for b, ch in zip(bb, chunks))
    ds_out.write_chunk(chunk_id, data, True)
    fu.log_block_success(block_id)
def _label_to_block_mapping(input_path, input_key, output_path, output_key,
                            blocking, block_list):
    for block_id in block_list:
        id_space_block = blocking.getBlock(block_id)
        id_start, id_stop = id_space_block.begin[0], id_space_block.end[0]
        fu.log("serializing ids in block %i with labels from %i to %i" %
               (block_id, id_start, id_stop))
        ndist.serializeBlockMapping(os.path.join(input_path, input_key),
                                    os.path.join(output_path, output_key),
                                    id_start, id_stop)
        fu.log_block_success(block_id)
Exemple #24
0
def _upsample_block(blocking, block_id, ds_in, ds_out, scale_factor, sampler):
    fu.log("start processing block %i" % block_id)

    # load the block (output dataset / upscaled) coordinates
    block = blocking.getBlock(block_id)
    local_bb = np.s_[:]
    in_bb = vu.block_to_bb(block)
    out_bb = vu.block_to_bb(block)
    out_shape = block.shape

    # upsample the input bounding box
    if isinstance(scale_factor, int):
        in_bb = tuple(
            slice(int(ib.start //
                      scale_factor), min(int(ceil(ib.stop /
                                                  scale_factor)), sh))
            for ib, sh in zip(in_bb, ds_in.shape))
    else:
        in_bb = tuple(
            slice(int(ib.start // sf), min(int(ceil(ib.stop // sf)), sh))
            for ib, sf, sh in zip(in_bb, scale_factor, ds_in.shape))

    x = ds_in[in_bb]

    # don't sample empty blocks
    if np.sum(x != 0) == 0:
        fu.log_block_success(block_id)
        return

    dtype = x.dtype
    if np.dtype(dtype) != np.dtype('float32'):
        x = x.astype('float32')

    if isinstance(scale_factor, int):
        out = sampler(x, shape=out_shape)
    else:
        out = np.zeros(out_shape, dtype='float32')
        for z in range(out_shape[0]):
            out[z] = sampler(x[z], shape=out_shape[1:])

    if np.dtype(dtype) in (np.dtype('uint8'), np.dtype('uint16')):
        max_val = np.iinfo(np.dtype(dtype)).max
        np.clip(out, 0, max_val, out=out)
        np.round(out, out=out)

    try:
        ds_out[out_bb] = out[local_bb].astype(dtype)
    except IndexError as e:
        raise (IndexError("%s, %s, %s" %
                          (str(out_bb), str(local_bb), str(out.shape))))

    # log block success
    fu.log_block_success(block_id)
Exemple #25
0
def _merge_subblocks(block_id, blocking, previous_blocking, graph_path, scale):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)
    input_key = 'sub_graphs/s%i/block_' % (scale - 1, )
    output_key = 'sub_graphs/s%i/block_%i' % (scale, block_id)
    block_list = previous_blocking.getBlockIdsInBoundingBox(
        roiBegin=block.begin, roiEnd=block.end, blockHalo=[0, 0, 0])
    ndist.mergeSubgraphs(graph_path,
                         blockPrefix=input_key,
                         blockIds=block_list.tolist(),
                         outKey=output_key)
    # log block success
    fu.log_block_success(block_id)
def _write_block(ds_in, ds_out, blocking, block_id, node_labels):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)
    bb = vu.block_to_bb(block)
    seg = ds_in[bb]
    # check if this block is empty and don't write if it is
    if np.sum(seg != 0) == 0:
        fu.log_block_success(block_id)
        return

    seg = _apply_node_labels(seg, node_labels)
    ds_out[bb] = seg
    fu.log_block_success(block_id)
def uniques_in_block(block_id, blocking, ds, return_counts):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)
    bb = vu.block_to_bb(block)
    labels = ds[bb]

    if return_counts:
        uniques, counts = np.unique(labels, return_counts=True)
        fu.log_block_success(block_id)
        return uniques, counts
    else:
        uniques = nt.unique(labels)
        fu.log_block_success(block_id)
        return uniques
def _merge_subblocks(block_id, blocking, previous_blocking, graph_path,
                     output_key, scale):
    fu.log("start processing block %i" % block_id)
    block = blocking.getBlock(block_id)
    input_key = 's%i/sub_graphs' % (scale - 1, )
    block_list = previous_blocking.getBlockIdsInBoundingBox(
        roiBegin=block.begin, roiEnd=block.end, blockHalo=[0, 0, 0])
    ndist.mergeSubgraphs(graph_path,
                         subgraphKey=input_key,
                         blockIds=block_list.tolist(),
                         outKey=output_key,
                         serializeToVarlen=True)
    # log block success
    fu.log_block_success(block_id)
Exemple #29
0
def _write_block_with_offsets(ds_in, ds_out, blocking, block_id, node_labels,
                              offsets):
    fu.log("start processing block %i" % block_id)
    off = offsets[block_id]
    block = blocking.getBlock(block_id)
    bb = vu.block_to_bb(block)
    seg = ds_in[bb]
    seg[seg != 0] += off
    # choose the appropriate function for array or dictionary
    if isinstance(node_labels, np.ndarray):
        seg = nt.take(node_labels, seg)
    else:
        seg = nt.takeDict(node_labels, seg)
    ds_out[bb] = seg
    fu.log_block_success(block_id)
def _uniques_default(ds, ds_out, blocking, block_list):
    for block_id in block_list:
        block_coord = blocking.getBlock(block_id).begin
        chunk_id = tuple(bl // ch for bl, ch in zip(block_coord, ds.chunks))

        labels = ds.read_chunk(chunk_id)
        if labels is None:
            # TODO can we skip blocks with only 0 as  label in paintera format ??
            # fu.log_block_success(block_id)
            # return
            uniques = np.zeros(1, dtype='uint64')
        else:
            uniques = np.unique(labels)
        ds_out.write_chunk(chunk_id, uniques, True)
        fu.log_block_success(block_id)