コード例 #1
0
def merge_region_features(job_id, config_path):
    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    # get the config
    with open(config_path, 'r') as f:
        config = json.load(f)
    output_path = config['output_path']
    output_key = config['output_key']
    tmp_path = config['tmp_path']
    tmp_key = config['tmp_key']
    node_block_list = config['block_list']
    node_chunk_size = config['node_chunk_size']

    with vu.file_reader(output_path) as f,\
            vu.file_reader(tmp_path) as f_in:

        ds_in = f_in[tmp_key]
        ds = f[output_key]
        n_nodes = ds.shape[0]

        node_blocking = nt.blocking([0], [n_nodes], [node_chunk_size])
        node_begin = node_blocking.getBlock(node_block_list[0]).begin[0]
        node_end = node_blocking.getBlock(node_block_list[-1]).end[0]

        shape = list(ds_in.shape)
        chunks = list(ds_in.chunks)
        blocking = nt.blocking([0, 0, 0], shape, chunks)

        _extract_and_merge_region_features(blocking, ds_in, ds, node_begin, node_end)

    fu.log_job_success(job_id)
コード例 #2
0
ファイル: test_blocking.py プロジェクト: cj401/nifty
def testBlockingBlocksPerAxis():

    roiBegin =   [0,0,0]
    roiEnd =     [5,6,7]
    blockShape = [3,3,3]
    blockShift = [0,0,0]

    blocking = nt.blocking(roiBegin=roiBegin,roiEnd=roiEnd,
                             blockShape=blockShape, blockShift=blockShift)


    blocksPerAxis = blocking.blocksPerAxis

    assert_equals(blocksPerAxis[0],2)
    assert_equals(blocksPerAxis[1],2)
    assert_equals(blocksPerAxis[2],3)



    roiBegin =   [0,0,0]
    roiEnd =     [5,6,7]
    blockShape = [3,3,3]
    blockShift = [0,1,0]

    blocking = nt.blocking(roiBegin=roiBegin,roiEnd=roiEnd,
                             blockShape=blockShape, blockShift=blockShift)


    blocksPerAxis = blocking.blocksPerAxis

    assert_equals(blocksPerAxis[0],2)
    assert_equals(blocksPerAxis[1],3)
    assert_equals(blocksPerAxis[2],3)
コード例 #3
0
ファイル: test_blocking.py プロジェクト: DerThorsten/nifty
def testBlockingBlocksPerAxis():

    roiBegin =   [0,0,0]
    roiEnd =     [5,6,7]
    blockShape = [3,3,3]
    blockShift = [0,0,0]

    blocking = nt.blocking(roiBegin=roiBegin,roiEnd=roiEnd,
                             blockShape=blockShape, blockShift=blockShift)


    blocksPerAxis = blocking.blocksPerAxis

    assert_equals(blocksPerAxis[0],2)
    assert_equals(blocksPerAxis[1],2)
    assert_equals(blocksPerAxis[2],3)



    roiBegin =   [0,0,0]
    roiEnd =     [5,6,7]
    blockShape = [3,3,3]
    blockShift = [0,1,0]

    blocking = nt.blocking(roiBegin=roiBegin,roiEnd=roiEnd,
                             blockShape=blockShape, blockShift=blockShift)


    blocksPerAxis = blocking.blocksPerAxis

    assert_equals(blocksPerAxis[0],2)
    assert_equals(blocksPerAxis[1],3)
    assert_equals(blocksPerAxis[2],3)
コード例 #4
0
def linear(job_id, config_path):
    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)
    with open(config_path, 'r') as f:
        config = json.load(f)

    # read the input cofig
    input_path = config['input_path']
    input_key = config['input_key']

    block_shape = list(config['block_shape'])
    block_list = config['block_list']

    # read the output config and path to transformation
    output_path = config['output_path']
    output_key = config['output_key']
    trafo_file = config['transformation']

    mask_path = config['mask_path']
    mask_key = config['mask_key']

    if mask_path != '':
        assert mask_key != ''
        with vu.file_reader(input_path, 'r') as f:
            in_shape = f[input_key].shape
        mask = vu.load_mask(mask_path, mask_key, in_shape)

    same_file = input_path == output_path
    in_place = same_file and (input_key == output_key)

    # submit blocks
    if same_file:
        with vu.file_reader(input_path) as f:
            ds_in = f[input_key]
            ds_out = ds_in if in_place else f[output_key]

            shape = list(ds_in.shape)
            trafo = _load_transformation(trafo_file, shape)

            blocking = nt.blocking([0, 0, 0], shape, block_shape)
            _transform_linear(ds_in, ds_out, trafo, blocking, block_list, mask)

    else:
        with vu.file_reader(input_path,
                            'r') as f_in, vu.file_reader(output_path) as f_out:
            ds_in = f_in[input_key]
            ds_out = f_out[output_key]

            shape = list(ds_in.shape)
            trafo = _load_transformation(trafo_file, shape)

            blocking = nt.blocking([0, 0, 0], shape, block_shape)
            _transform_linear(ds_in, ds_out, trafo, blocking, block_list, mask)

    # log success
    fu.log_job_success(job_id)
コード例 #5
0
def downscale_multiset(job_id, config_path):
    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)
    with open(config_path, 'r') as f:
        config = json.load(f)

    # read the input cofig
    input_path = config['input_path']
    input_key = config['input_key']
    restrict_set = config['restrict_set']
    restrict_set = -1 if restrict_set is None else restrict_set
    scale_factor = config['scale_factor']
    scale_factor = scale_factor if isinstance(scale_factor,
                                              list) else [scale_factor] * 3

    block_shape = list(config['block_shape'])
    block_list = config['block_list']

    # read the output config
    output_path = config['output_path']
    output_key = config['output_key']
    shape = list(vu.get_shape(output_path, output_key))
    prev_shape = list(vu.get_shape(input_path, input_key))

    # NOTE for now, we assume that the block_shape stays constant throughout
    # the scale levels
    # get the blocking for this scale level
    blocking = nt.blocking([0, 0, 0], shape, block_shape)
    # get the blocking for the previous scale level
    blocking_prev = nt.blocking([0, 0, 0], prev_shape, block_shape)

    effective_scale_factor = config['effective_scale_factor']
    # we need the effective pixel size of the previous scale level here,
    # which we get by dividing the current effective pixel size by the product of the
    # scale factor
    effective_pixel_size = int(
        np.prod(effective_scale_factor) / np.prod(scale_factor))

    # submit blocks
    with vu.file_reader(input_path,
                        'r') as f_in, vu.file_reader(output_path) as f_out:
        ds_in = f_in[input_key]
        ds_out = f_out[output_key]

        for block_id in block_list:
            _downscale_multiset_block(blocking, block_id, ds_in, ds_out,
                                      blocking_prev, scale_factor,
                                      restrict_set, effective_pixel_size)

        if job_id == 0:
            write_metadata(ds_out, restrict_set, effective_scale_factor)

    # log success
    fu.log_job_success(job_id)
コード例 #6
0
def merge_region_features(job_id, config_path):
    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    # get the config
    with open(config_path, 'r') as f:
        config = json.load(f)
    output_path = config['output_path']
    output_key = config['output_key']
    tmp_path = config['tmp_path']
    tmp_key = config['tmp_key']
    feature_list = config['feature_list']
    num_feature_vals = config['num_feature_vals']

    number_of_labels = read_number_of_labels(tmp_path, tmp_key)

    # require the output dataset
    node_chunk_size = min(10000, number_of_labels)

    with vu.file_reader(output_path) as f,\
            vu.file_reader(tmp_path) as f_in:

        f.require_dataset(output_key,
                          dtype='float32',
                          shape=(number_of_labels * num_feature_vals, ),
                          chunks=(node_chunk_size, ),
                          compression='gzip')
        ds_in = f_in[tmp_key]
        ds = f[output_key]
        ds.attrs['num_features'] = num_feature_vals
        n_nodes = ds.shape[0]

        shape = list(ds_in.shape)
        chunks = list(ds_in.chunks)
        blocking = nt.blocking([0, 0, 0], shape, chunks)

        if blocking.numberOfBlocks == 1:
            _extract_single_block_region_features(ds_in, ds, 0,
                                                  number_of_labels,
                                                  feature_list,
                                                  num_feature_vals)
        else:
            node_block_list = config['block_list']
            node_blocking = nt.blocking([0], [n_nodes], [node_chunk_size])
            node_begin = node_blocking.getBlock(node_block_list[0]).begin[0]
            node_end = node_blocking.getBlock(node_block_list[-1]).end[0]
            _extract_and_merge_region_features(blocking, ds_in, ds, node_begin,
                                               node_end, feature_list,
                                               num_feature_vals)

    fu.log_job_success(job_id)
コード例 #7
0
def check_sub_graphs(job_id, config_path):
    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    # read the config
    with open(config_path) as f:
        config = json.load(f)
    ws_path = config['ws_path']
    ws_key = config['ws_key']
    graph_block_prefix = config['graph_block_prefix']
    block_shape = config['block_shape']
    block_list = config['block_list']
    tmp_folder = config['tmp_folder']

    with vu.file_reader(ws_path, 'r') as f:
        ds = f[ws_key]
        shape = list(ds.shape)
        blocking = nt.blocking([0, 0, 0], shape, block_shape)
        violating_blocks = [
            check_block(block_id, blocking, ds, graph_block_prefix)
            for block_id in block_list
        ]
        violating_blocks = [vb for vb in violating_blocks if vb is not None]
    save_path = os.path.join(tmp_folder, 'failed_blocks_job_%i.json' % job_id)
    with open(save_path, 'w') as f:
        json.dump(violating_blocks, f)

    # log success
    fu.log_job_success(job_id)
コード例 #8
0
ファイル: debug.py プロジェクト: mobie/platybrowser-datasets
    def check_block(block_id):
        labels = labels_to_blocks[block_id]
        if len(labels) == 0:
            return []

        assignment_mask = np.isin(assignments[:, 1], labels)
        assert assignment_mask.sum() > 0
        block_assignments = assignments[assignment_mask]
        block_ws_labels = block_assignments[:, 0]

        roi_begin, roi_end = rois_to_blocks[block_id]
        blocking_seg = nt.blocking(roi_begin, roi_end, check_block_shape)

        this_ids = set()
        for check_block_id in range(blocking_seg.numberOfBlocks):
            check_block = blocking_seg.getBlock(check_block_id)
            check_bb = tuple(
                slice(beg, end)
                for beg, end in zip(check_block.begin, check_block.end))
            seg = ds_seg[check_bb]
            ids = np.unique(seg)
            this_ids.update(ids.tolist())

        diff = set(block_ws_labels) - this_ids
        return list(diff)
コード例 #9
0
def _accumulate_with_filters(input_path, input_key, labels_path, labels_key,
                             output_path, graph_block_prefix, block_list,
                             block_shape, filters, sigmas, halo, apply_in_2d,
                             channel_agglomeration):

    fu.log("accumulate features with applying filters:")
    # TODO log filter and sigma values
    with vu.file_reader(input_path, 'r') as f:
        ds = f[input_key]
        input_dim = ds.ndim
        shape = ds.shape
        if input_dim == 4:
            shape = shape[1:]

    out_prefix = os.path.join(output_path, 'blocks', 'block_')
    blocking = nt.blocking([0, 0, 0], list(shape), list(block_shape))

    # determine if we have an ignore label
    with z5py.File(graph_block_prefix + str(block_list[0])) as f:
        ignore_label = f.attrs['ignoreLabel']

    with vu.file_reader(input_path) as f, vu.file_reader(labels_path) as f_l:
        ds_in = f[input_key]
        ds_labels = f_l[labels_key]
        for block_id in block_list:
            _accumulate_block(block_id, blocking, ds_in, ds_labels, out_prefix,
                              graph_block_prefix, filters, sigmas, halo,
                              ignore_label, apply_in_2d, channel_agglomeration)
コード例 #10
0
def block_components(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    with open(config_path, 'r') as f:
        config = json.load(f)
    input_path = config['input_path']
    input_key = config['input_key']
    output_path = config['output_path']
    output_key = config['output_key']
    block_list = config['block_list']
    tmp_folder = config['tmp_folder']
    block_shape = config['block_shape']
    threshold = config['threshold']
    threshold_mode = config['threshold_mode']

    mask_path = config.get('mask_path', '')
    mask_key = config.get('mask_key', '')

    channel = config.get('channel', None)

    fu.log("Applying threshold %f with mode %s" % (threshold, threshold_mode))

    with vu.file_reader(input_path, 'r') as f_in,\
        vu.file_reader(output_path) as f_out:

        ds_in = f_in[input_key]
        ds_out = f_out[output_key]

        shape = ds_in.shape
        if channel is not None:
            shape = shape[1:]
        assert len(shape) == 3

        blocking = nt.blocking([0, 0, 0], list(shape), block_shape)

        if mask_path != '':
            # note that the mask is usually small enough to keep it
            # in memory (and we interpolate to get to the full volume)
            # if this does not hold need to change this code!
            mask = vu.load_mask(mask_path, mask_key, shape)
            offsets = [
                _cc_block_with_mask(block_id, blocking, ds_in, ds_out,
                                    threshold, threshold_mode, mask, channel)
                for block_id in block_list
            ]

        else:
            offsets = [
                _cc_block(block_id, blocking, ds_in, ds_out, threshold,
                          threshold_mode, channel) for block_id in block_list
            ]

    offset_dict = {block_id: off for block_id, off in zip(block_list, offsets)}
    save_path = os.path.join(tmp_folder,
                             'connected_components_offsets_%i.json' % job_id)
    with open(save_path, 'w') as f:
        json.dump(offset_dict, f)
    fu.log_job_success(job_id)
    def run_impl(self):
        # get the global config and init configs
        shebang, block_shape, roi_begin, roi_end, block_list_path = self.global_config_values(True)
        self.init(shebang)

        # get shape and make block config
        shape = vu.get_shape(self.input_path, self.input_key)
        if len(shape) == 4:
            shape = shape[1:]

        # load the watershed config
        ws_config = self.get_task_config()

        # require output dataset
        # TODO read chunks from config
        chunks = tuple(bs // 2 for bs in block_shape)
        with vu.file_reader(self.output_path) as f:
            f.require_dataset(self.output_key, shape=shape, chunks=chunks,
                              compression='gzip', dtype='uint64')

        # update the config with input and output paths and keys
        # as well as block shape
        ws_config.update({'input_path': self.input_path, 'input_key': self.input_key,
                          'output_path': self.output_path, 'output_key': self.output_key,
                          'block_shape': block_shape})

        blocking = nt.blocking([0, 0, 0], list(shape), list(block_shape))
        block_lists = vu.make_checkerboard_block_lists(blocking, roi_begin, roi_end)
        for pass_id, block_list in enumerate(block_lists):
            ws_config['pass'] = pass_id
            self._ws_pass(block_list, ws_config, 'pass_%i' % pass_id)
def unique_block_labels(job_id, config_path):
    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    # read the config
    with open(config_path) as f:
        config = json.load(f)
    input_path = config['input_path']
    input_key = config['input_key']
    output_path = config['output_path']
    output_key = config['output_key']

    block_list = config['block_list']
    block_shape = config['block_shape']
    is_multiset = config['is_multiset']

    # open the input file
    with vu.file_reader(input_path,
                        'r') as f, vu.file_reader(output_path) as f_out:
        ds = f[input_key]
        ds_out = f_out[output_key]
        chunks = ds.chunks
        shape = ds.shape
        assert tuple(chunks) == tuple(block_shape),\
            "Chunks %s and block shape %s must agree" % (str(chunks), str(block_shape))

        blocking = nt.blocking([0, 0, 0], shape, block_shape)

        if is_multiset:
            _uniques_multiset(ds, ds_out, blocking, block_list)
        else:
            _uniques_default(ds, ds_out, blocking, block_list)

    # log success
    fu.log_job_success(job_id)
コード例 #13
0
def merge_morphology(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    # get the config
    with open(config_path) as f:
        config = json.load(f)

    input_path = config['input_path']
    input_key = config['input_key']
    output_path = config['output_path']
    output_key = config['output_key']

    block_list = config['block_list']
    out_shape = config['out_shape']
    out_chunks = config['out_chunks']

    blocking = nt.blocking([0], out_shape[:1], out_chunks[:1])

    # merge and serialize the overlaps
    for block_id in block_list:
        block = blocking.getBlock(block_id)
        label_begin = block.begin[0]
        label_end = block.end[0]
        ndist.mergeAndSerializeMorphology(os.path.join(input_path, input_key),
                                          os.path.join(output_path, output_key),
                                          labelBegin=label_begin, labelEnd=label_end)
    fu.log_job_success(job_id)
コード例 #14
0
def blocks_from_mask(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    with open(config_path, 'r') as f:
        config = json.load(f)

    mask_path = config['mask_path']
    mask_key = config['mask_key']
    output_path = config['output_path']
    shape = config['shape']
    block_shape = config['block_shape']
    n_threads = config.get('threads_per_job', 1)

    # NOTE we assume that the mask is small and will fit into memory
    with vu.file_reader(mask_path, 'r') as f:
        ds = f[mask_key]
        ds.n_threads = n_threads
        mask_data = ds[:]
    mask = ResizedVolume(mask_data, tuple(shape))

    blocking = nt.blocking([0, 0, 0], shape, list(block_shape))
    blocks_in_mask = _get_blocks_in_mask(mask, blocking, n_threads)

    with open(output_path, 'w') as f:
        json.dump(blocks_in_mask, f)

    fu.log_job_success(job_id)
コード例 #15
0
def _accumulate_with_filters(input_path, input_key, labels_path, labels_key,
                             graph_path, subgraph_key, output_path, output_key,
                             block_list, block_shape, filters, sigmas, halo,
                             apply_in_2d, channel_agglomeration):

    fu.log("accumulate features with applying filters:")

    with vu.file_reader(input_path, 'r') as f,\
            vu.file_reader(labels_path, 'r') as fl,\
            vu.file_reader(graph_path, 'r') as fg,\
            vu.file_reader(output_path) as fo:

        g = fg[subgraph_key]
        shape = g.attrs['shape']
        ignore_label = g.attrs['ignore_label']
        ds_edges = g['edges']

        ds_in = f[input_key]
        ds_labels = fl[labels_key]
        ds_out = fo[output_key]

        blocking = nt.blocking([0, 0, 0], shape, block_shape)
        for block_id in block_list:
            n_feats = _accumulate_block(block_id, blocking, ds_in, ds_labels,
                                        ds_edges, ds_out, filters, sigmas,
                                        halo, ignore_label, apply_in_2d,
                                        channel_agglomeration)

    return n_feats
コード例 #16
0
def segment_boutons():
    path = './data.n5'
    with z5py.File(path, 'a') as f:
        seg_neurons = f['segmentation/multicut']
        seg_ves = f['segmentation/vesicles']

        ds_out = f.require_dataset('segmentation/boutons',
                                   shape=seg_ves.shape,
                                   chunks=tuple(BLOCK_SHAPE),
                                   dtype='uint64',
                                   compression='gzip')

        roi_begin, roi_end = get_bounding_box(intersect_with_blocking=True,
                                              return_as_lists=True)
        blocking = nt.blocking(roi_begin, roi_end, BLOCK_SHAPE)
        n_blocks = blocking.numberOfBlocks

        n_threads = 32
        func = partial(intersect_segmentation,
                       seg_a=seg_neurons,
                       seg_b=seg_ves,
                       seg_out=ds_out,
                       blocking=blocking)

        with futures.ThreadPoolExecutor(n_threads) as tp:
            list(tqdm(tp.map(func, range(n_blocks)), total=n_blocks))
コード例 #17
0
def block_faces(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    with open(config_path, 'r') as f:
        config = json.load(f)
    input_path = config['input_path']
    input_key = config['input_key']
    block_list = config['block_list']
    tmp_folder = config['tmp_folder']
    offsets_path = config['offsets_path']
    block_shape = config['block_shape']

    with open(offsets_path) as f:
        offsets = json.load(f)['offsets']

    with vu.file_reader(input_path, 'r') as f:
        ds = f[input_key]
        shape = list(ds.shape)

        blocking = nt.blocking([0, 0, 0], shape, block_shape)
        assignments = [
            _process_faces(block_id, blocking, ds, offsets)
            for block_id in block_list
        ]
    # filter out empty assignments
    assignments = [ass for ass in assignments if ass is not None]
    assignments = np.concatenate(assignments, axis=0)
    assignments = np.unique(assignments, axis=0)

    save_path = os.path.join(tmp_folder, 'assignments_%i.npy' % job_id)
    np.save(save_path, assignments)
    fu.log_job_success(job_id)
コード例 #18
0
    def test_subresults(self):
        task = NodeLabelWorkflow(tmp_folder=self.tmp_folder,
                                 config_dir=self.config_folder,
                                 target=self.target,
                                 max_jobs=self.n_jobs,
                                 ws_path=self.path,
                                 ws_key=self.ws_key,
                                 input_path=self.path,
                                 input_key=self.input_key,
                                 output_path=self.output_path,
                                 output_key=self.output_key)

        ret = luigi.build([task], local_scheduler=True)
        self.assertTrue(ret)

        tmp_path = os.path.join(self.output_path, 'label_overlaps_')
        ws, inp = self.load_data()

        blocking = nt.blocking([0, 0, 0], ws.shape, self.block_shape)
        for block_id in range(blocking.numberOfBlocks):
            block = blocking.getBlock(block_id)
            chunk_id = tuple(
                start // bs
                for start, bs in zip(block.begin, self.block_shape))
            bb = tuple(
                slice(beg, end) for beg, end in zip(block.begin, block.end))

            wsb, inpb = ws[bb], inp[bb]

            overlaps, _ = ndist.deserializeOverlapChunk(tmp_path, chunk_id)
            overlaps_exp = self.compute_overlaps(wsb, inpb, False)

            ids = np.unique(wsb)
            self.check_overlaps(ids, overlaps, overlaps_exp)
コード例 #19
0
def _submit_blocks(ds_in, ds_out, block_shape, block_list, scale_factor, halo,
                   library, library_kwargs, n_threads):

    # get the blocking
    shape = ds_out.shape
    if len(shape) == 4:
        shape = shape[1:]
    blocking = nt.blocking([0, 0, 0], shape, block_shape)
    if library == 'vigra':
        sampler = partial(_ds_vigra, **library_kwargs)
    elif library == 'skimage':
        sk_scale = (scale_factor, ) * 3 if isinstance(
            scale_factor, int) else tuple(scale_factor)
        ds_function = library_kwargs.get('function', 'mean')
        ds_function = getattr(np, ds_function)
        sampler = partial(_ds_skimage, block_size=sk_scale, func=ds_function)
    else:
        raise ValueError(
            "Invalid library %s, only vigra and skimage are supported" %
            library)

    if n_threads <= 1:
        for block_id in block_list:
            _ds_block(blocking, block_id, ds_in, ds_out, scale_factor, halo,
                      sampler)
    else:
        with futures.ThreadPoolExecutor(n_threads) as tp:
            tasks = [
                tp.submit(_ds_block, blocking, block_id, ds_in, ds_out,
                          scale_factor, halo, sampler)
                for block_id in block_list
            ]
            [t.result() for t in tasks]
コード例 #20
0
def block_statistics(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    # get the config
    with open(config_path) as f:
        config = json.load(f)

    path = config['path']
    key = config['key']
    tmp_folder = config['tmp_folder']

    block_shape = config['block_shape']
    block_list = config['block_list']

    with vu.file_reader(path, 'r') as f:
        shape = f[key].shape

    blocking = nt.blocking([0, 0, 0], list(shape), list(block_shape))

    with vu.file_reader(path, 'r') as f_in:
        ds = f_in[key]
        block_stats = [
            _compute_block_stats(block_id, blocking, ds)
            for block_id in block_list
        ]

    save_path = os.path.join(tmp_folder,
                             'block_statistics_job%i.json' % job_id)
    job_stats = merge_stats(block_stats)
    with open(save_path, 'w') as f:
        json.dump(job_stats, f)

    fu.log_job_success(job_id)
コード例 #21
0
def affinity_to_boundary(data_path, prediction_prefix, tmp_folder, target,
                         max_jobs):
    aff_key = os.path.join(prediction_prefix, 'affinities')
    bd_key = os.path.join(prediction_prefix, 'boundaries')

    with z5py.File(data_path, 'a') as f:
        if bd_key in f:
            return

        ds_affs = f[aff_key]
        shape = ds_affs.shape[1:]
        chunks = ds_affs.chunks[1:]
        ds_bd = f.require_dataset(bd_key,
                                  shape=shape,
                                  chunks=chunks,
                                  compression='gzip',
                                  dtype=ds_affs.dtype)

        blocking = nt.blocking([0, 0, 0], shape, chunks)

        def _block(block_id):
            block = blocking.getBlock(block_id)
            bb = tuple(
                slice(beg, end) for beg, end in zip(block.begin, block.end))

            bb_affs = (slice(None), ) + bb
            affs = ds_affs[bb_affs]

            bd = np.maximum(affs[1], affs[2])
            bd = np.maximum(bd, np.maximum(affs[4], affs[5]))
            ds_bd[bb] = bd.astype(ds_bd.dtype)

        with futures.ThreadPoolExecutor(8) as tp:
            tp.map(_block, range(blocking.numberOfBlocks))
コード例 #22
0
def agglomerate(job_id, config_path):
    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)
    with open(config_path, 'r') as f:
        config = json.load(f)

    # read the input cofig
    input_path = config['input_path']
    input_key = config['input_key']
    shape = list(vu.get_shape(input_path, input_key))
    if len(shape) == 4:
        shape = shape[1:]

    block_shape = list(config['block_shape'])
    block_list = config['block_list']

    # read the output config
    output_path = config['output_path']
    output_key = config['output_key']

    # get the blocking
    blocking = nt.blocking([0, 0, 0], shape, block_shape)

    # submit blocks
    with vu.file_reader(input_path, 'r') as f_in, vu.file_reader(output_path) as f_out:
        ds_in = f_in[input_key]
        assert ds_in.ndim in (3, 4)
        ds_out = f_out[output_key]
        assert ds_out.ndim == 3
        for block_id in block_list:
            _agglomerate_block(blocking, block_id, ds_in, ds_out, config)

    # log success
    fu.log_job_success(job_id)
コード例 #23
0
def block_morphology(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    # get the config
    with open(config_path) as f:
        config = json.load(f)

    input_path = config['input_path']
    input_key = config['input_key']
    output_path = config['output_path']
    output_key = config['output_key']

    block_shape = config['block_shape']
    block_list = config['block_list']

    with vu.file_reader(input_path, 'r') as f:
        shape = f[input_key].shape

    blocking = nt.blocking([0, 0, 0],
                           list(shape),
                           list(block_shape))

    with vu.file_reader(input_path, 'r') as f_in:
        ds_in = f_in[input_key]
        [_morphology_for_block(block_id, blocking, ds_in,
                               output_path, output_key)
         for block_id in block_list]
    fu.log_job_success(job_id)
コード例 #24
0
def region_features(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    # get the config
    with open(config_path, 'r') as f:
        config = json.load(f)

    block_list = config['block_list']
    input_path = config['input_path']
    input_key = config['input_key']
    labels_path = config['labels_path']
    labels_key = config['labels_key']
    output_path = config['output_path']
    output_key = config['output_key']
    block_shape = config['block_shape']
    ignore_label = config['ignore_label']

    with vu.file_reader(input_path) as f_in,\
            vu.file_reader(labels_path) as f_l,\
            vu.file_reader(output_path) as f_out:

        ds_in = f_in[input_key]
        ds_labels = f_l[labels_key]
        ds_out = f_out[output_key]

        shape = ds_out.shape
        blocking = nt.blocking([0, 0, 0], shape, block_shape)

        for block_id in block_list:
            _block_features(block_id, blocking, ds_in, ds_labels, ds_out,
                            ignore_label)

    fu.log_job_success(job_id)
コード例 #25
0
def minfilter(job_id, config_path):
    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)
    with open(config_path, 'r') as f:
        config = json.load(f)

    # input/output files
    input_path = config['input_path']
    input_key = config['input_key']
    output_path = config['output_path']
    output_key = config['output_key']

    # blocks and task config
    block_list = config['block_list']
    filter_shape = config['filter_shape']

    with vu.file_reader(input_path,
                        'r') as f_in, vu.file_reader(output_path) as f_out:
        ds_in = f_in[input_key]
        ds_out = f_out[output_key]
        shape = ds_in.shape

        blocking = nt.blocking(roiBegin=[0, 0, 0],
                               roiEnd=list(shape),
                               blockShape=list(block_shape))

        # TODO is half of the halo really enough halo ?
        halo = list(fshape // 2 for fshape in filter_shape)
        [
            _minfilter_block(block_id, blocking, halo, ds_in, ds_out,
                             filter_shape) for block_id in block_list
        ]
    # log success
    fu.log_job_success(job_id)
コード例 #26
0
ファイル: debug.py プロジェクト: mobie/platybrowser-datasets
    def check_block(block_id):
        labels = label_block_mapping[block_id]
        if len(labels) == 0:
            return True

        block = blocking.getBlock(block_id - 1)
        bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))

        roi_begin = [b.start * scale_factor for b in bb]
        roi_end = [b.stop * scale_factor for b in bb]
        blocking_seg = nt.blocking(roi_begin, roi_end, check_block_shape)

        this_ids = set()
        for check_block_id in range(blocking_seg.numberOfBlocks):
            check_block = blocking_seg.getBlock(check_block_id)
            check_bb = tuple(
                slice(beg, end)
                for beg, end in zip(check_block.begin, check_block.end))
            seg = ds_seg[check_bb]
            ids = np.unique(seg)
            this_ids.update(ids.tolist())

        if len(set(labels) - this_ids) > 0:
            return False
        else:
            return True
def watershed_from_seeds(job_id, config_path):
    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)
    with open(config_path, 'r') as f:
        config = json.load(f)

    # read the input cofig
    input_path = config['input_path']
    input_key = config['input_key']
    shape = list(vu.get_shape(input_path, input_key))
    if len(shape) == 4:
        shape = shape[1:]

    block_shape = list(config['block_shape'])
    block_list = config['block_list']

    # TODO seeds and output might be identical
    # in that case we would need in-place logic if we
    # want to support h5 (it's fine with n5 as is)
    # read the seed and  output config
    seeds_path = config['seeds_path']
    seeds_key = config['seeds_key']
    output_path = config['output_path']
    output_key = config['output_key']

    # check if we have a mask
    with_mask = 'mask_path' in config
    if with_mask:
        mask_path = config['mask_path']
        mask_key = config['mask_key']

    # get the blocking
    blocking = nt.blocking([0, 0, 0], shape, block_shape)

    # submit blocks
    with vu.file_reader(input_path, 'r') as f_in,\
         vu.file_reader(seeds_path, 'r') as f_seeds,\
         vu.file_reader(output_path) as f_out:

        ds_in = f_in[input_key]
        assert ds_in.ndim in (3, 4)
        ds_seeds = f_out[seeds_key]
        assert ds_seeds.ndim == 3
        ds_out = f_out[output_key]
        assert ds_out.ndim == 3

        # note that the mask is usually small enough to keep it
        # in memory (and we interpolate to get to the full volume)
        # if this does not hold need to change this code!
        if with_mask:
            mask = vu.load_mask(mask_path, mask_key, shape)
            for block_id in block_list:
                _ws_block_masked(blocking, block_id, ds_in, ds_seeds, ds_out,
                                 mask, config)

        else:
            for block_id in block_list:
                _ws_block(blocking, block_id, ds_in, ds_seeds, ds_out, config)
    # log success
    fu.log_job_success(job_id)
コード例 #28
0
    def run(self):
        with open_file(self.path) as f:
            ds_seg = f[self.seg_key]
            ds_fg = f[self.fg_key]
            shape = ds_seg.shape

            blocking = nt.blocking([0] * ds_seg.ndim, shape, self.block_shape)

            def filter_block(block_id):
                block = blocking.getBlock(block_id)
                bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
                seg = ds_seg[bb].astype('uint32')
                if seg.sum() == 0:
                    return

                inp = normalize(ds_fg[bb])
                mean_fg = vigra.analysis.extractRegionFeatures(inp, seg, features=['mean'])['mean']
                fg_ids = np.where(mean_fg > self.threshold)[0]
                filtered = np.isin(seg, fg_ids)
                ds_seg[bb] = filtered.astype(ds_seg.dtype)

            n_blocks = blocking.numberOfBlocks
            with futures.ThreadPoolExecutor(self.n_threads) as tp:
                list(tqdm(tp.map(filter_block, range(n_blocks)), total=n_blocks))

        # dummy output for luigi
        with open(self.out_path, 'w') as f:
            f.write("Success!")
def merge_predictions(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    # get the config
    with open(config_path, 'r') as f:
        config = json.load(f)

    output_path = config['output_path']
    output_key = config['output_key']
    tmp_prefix = config['tmp_prefix']
    halo = config['halo']
    n_channels = config['n_channels']

    shape = vu.get_shape(output_path, output_key)
    if len(shape) > 3:
        shape = shape[-3:]
    block_shape = config['block_shape']
    blocking = nt.blocking([0, 0, 0], shape, block_shape)

    # TODO we could parallelize this
    with vu.file_reader(output_path) as f:
        ds = f[output_key]
        for block_id in range(blocking.numberOfBlocks):
            _merge_block(block_id, blocking, ds, tmp_prefix, halo, n_channels)

    fu.log_job_success(job_id)
コード例 #30
0
def prefilter_blocks(mask_path, mask_key,
                     shape, block_shape,
                     save_file, n_threads=48):
    if os.path.exists(save_file):
        print("Loading block list from file")
        with open(save_file) as f:
            return json.load(f)

    with open_file(mask_path, 'r') as f:
        ds = f[mask_key]
        mask = ResizedVolume(ds, shape=shape, order=0)

        blocking = nt.blocking([0, 0, 0], shape, block_shape)
        n_blocks = blocking.numberOfBlocks

        def check_block(block_id):
            block = blocking.getBlock(block_id)
            bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
            d = mask[bb]
            if d.sum() > 0:
                return block_id
            else:
                return None

        print("Computing block list ...")
        with futures.ThreadPoolExecutor(n_threads) as tp:
            blocks = list(tqdm(tp.map(check_block, range(n_blocks)), total=n_blocks))
        blocks = [bid for bid in blocks if bid is not None]

    with open(save_file, 'w') as f:
        json.dump(blocks, f)
    return blocks
def insert_affinities(job_id, config_path):

    fu.log("start processing job %i" % job_id)
    fu.log("reading config from %s" % config_path)

    with open(config_path, 'r') as f:
        config = json.load(f)
    affinity_path = config['affinity_path']
    affinity_key = config['affinity_key']
    objects_path = config['objects_path']
    objects_key = config['objects_key']

    block_list = config['block_list']
    block_shape = config['block_shape']
    offsets = config['offsets']

    with vu.file_reader(affinity_path) as f_in, vu.file_reader(
            objects_path) as f_obj:
        ds = f_in[affinity_key]
        shape = ds.shape[1:]

        # TODO actually check that objects are on a lower scale
        ds_objs = f_obj[objects_key]
        objects = vu.InterpolatedVolume(ds_objs, shape)

        blocking = nt.blocking([0, 0, 0], list(shape), block_shape)
        [
            _insert_affinities_block(block_id, blocking, ds, objects, offsets)
            for block_id in block_list
        ]

    fu.log_job_success(job_id)
コード例 #32
0
ファイル: test_blocking.py プロジェクト: constantinpape/nifty
    def testNeighbors(self):
        blocking = nt.blocking(roiBegin=[0, 0, 0],
                               roiEnd=[40, 35, 17],
                               blockShape=[5, 5, 5])
        neighbors = {}
        for block_id in range(blocking.numberOfBlocks):
            nbrs = [blocking.getNeighborId(block_id,
                                           axis=i // 2,
                                           lower=bool(i % 2)) for i in range(6)]
            neighbors[block_id] = [nbr for nbr in nbrs if nbr != -1]

        for block_id in range(blocking.numberOfBlocks):
            nbrs = neighbors[block_id]
            for nbr_id in nbrs:
                self.assertTrue(block_id in neighbors[nbr_id])
コード例 #33
0
ファイル: test_blocking.py プロジェクト: DerThorsten/nifty
def testBlocking2d():

    roiBegin =   [0,0]
    roiEnd =     [5,7]
    blockShape = [3,3]
    blockShift = [0,0]

    blocking = nt.blocking(roiBegin=roiBegin,roiEnd=roiEnd,
                             blockShape=blockShape, blockShift=blockShift)

    blocksPerAxis = blocking.blocksPerAxis


    assert_equals(blocksPerAxis[0],2)
    assert_equals(blocksPerAxis[1],3)


    blocks = [blocking.getBlock(i) for i in range(blocking.numberOfBlocks)]


    assert_equals(blocks[0].begin,[0,0])
    assert_equals(blocks[0].end,  [3,3])

    assert_equals(blocks[1].begin,[0,3])
    assert_equals(blocks[1].end,  [3,6])

    assert_equals(blocks[2].begin,[0,6])
    assert_equals(blocks[2].end,  [3,7])


    assert_equals(blocks[3].begin,[3,0])
    assert_equals(blocks[3].end,  [5,3])

    assert_equals(blocks[4].begin,[3,3])
    assert_equals(blocks[4].end,  [5,6])

    assert_equals(blocks[5].begin,[3,6])
    assert_equals(blocks[5].end,  [5,7])
コード例 #34
0
ファイル: test_blocking.py プロジェクト: constantinpape/nifty
    def testNeighborsToyExample(self):
        blocking = nt.blocking(roiBegin=[0, 0],
                               roiEnd=[10, 10],
                               blockShape=[5, 5])

        neighbors = {}
        for b in range(blocking.numberOfBlocks):
            nbrs = [blocking.getNeighborId(b, i // 2, bool(i % 2)) for i in range(4)]
            nbrs = [nbr for nbr in nbrs if nbr != -1]
            self.assertEqual(len(nbrs), 2)
            nbrs.sort()
            neighbors[b] = nbrs

        self.assertEqual(neighbors[0][0], 1)
        self.assertEqual(neighbors[0][1], 2)

        self.assertEqual(neighbors[1][0], 0)
        self.assertEqual(neighbors[1][1], 3)

        self.assertEqual(neighbors[2][0], 0)
        self.assertEqual(neighbors[2][1], 3)

        self.assertEqual(neighbors[3][0], 1)
        self.assertEqual(neighbors[3][1], 2)
コード例 #35
0
ファイル: test_blocking.py プロジェクト: stuarteberg/nifty
def testBlocking2d():

    roiBegin =   [0,0]
    roiEnd =     [5,7]
    blockShape = [3,3]
    blockShift = [0,0]

    blocking = nt.blocking(roiBegin=roiBegin,roiEnd=roiEnd,
                             blockShape=blockShape, blockShift=blockShift)

    blocksPerAxis = blocking.blocksPerAxis


    assert_equals(blocksPerAxis[0],2)
    assert_equals(blocksPerAxis[1],3)


    halo = [2,2]

    blocks = [blocking.getBlock(i) for i in range(blocking.numberOfBlocks)]
    blocksWithHalo = [blocking.getBlockWithHalo(i, halo) for i in range(blocking.numberOfBlocks)]


    assert_equals(blocks[0].begin,[0,0])
    assert_equals(blocks[0].end,  [3,3])


    print blocksWithHalo[0].outerBlock

    assert_equals(blocksWithHalo[0].innerBlock.begin,[0,0])
    assert_equals(blocksWithHalo[0].innerBlock.end,  [3,3])
    assert_equals(blocksWithHalo[0].outerBlock.begin,[0,0])
    assert_equals(blocksWithHalo[0].outerBlock.end,  [5,5])


    assert_equals(blocks[1].begin,[0,3])
    assert_equals(blocks[1].end,  [3,6])
    assert_equals(blocksWithHalo[1].innerBlock.begin,[0,3])
    assert_equals(blocksWithHalo[1].innerBlock.end,  [3,6])
    assert_equals(blocksWithHalo[1].outerBlock.begin,[0,1])
    assert_equals(blocksWithHalo[1].outerBlock.end,  [5,7])

    assert_equals(blocks[2].begin,[0,6])
    assert_equals(blocks[2].end,  [3,7])
    assert_equals(blocksWithHalo[2].innerBlock.begin,[0,6])
    assert_equals(blocksWithHalo[2].innerBlock.end,  [3,7])
    assert_equals(blocksWithHalo[2].outerBlock.begin,[0,4])
    assert_equals(blocksWithHalo[2].outerBlock.end,  [5,7])

    assert_equals(blocks[3].begin,[3,0])
    assert_equals(blocks[3].end,  [5,3])
    assert_equals(blocksWithHalo[3].innerBlock.begin,[3,0])
    assert_equals(blocksWithHalo[3].innerBlock.end,  [5,3])
    assert_equals(blocksWithHalo[3].outerBlock.begin,[1,0])
    assert_equals(blocksWithHalo[3].outerBlock.end,  [5,5])

    assert_equals(blocks[4].begin,[3,3])
    assert_equals(blocks[4].end,  [5,6])
    assert_equals(blocksWithHalo[4].innerBlock.begin,[3,3])
    assert_equals(blocksWithHalo[4].innerBlock.end,  [5,6])
    assert_equals(blocksWithHalo[4].outerBlock.begin,[1,1])
    assert_equals(blocksWithHalo[4].outerBlock.end,  [5,7])

    assert_equals(blocks[5].begin,[3,6])
    assert_equals(blocks[5].end,  [5,7])
    assert_equals(blocksWithHalo[5].innerBlock.begin,[3,6])
    assert_equals(blocksWithHalo[5].innerBlock.end,  [5,7])
    assert_equals(blocksWithHalo[5].outerBlock.begin,[1,4])
    assert_equals(blocksWithHalo[5].outerBlock.end,  [5,7])