Example #1
0
def visualize_skeletons(lauritzen_block_id,
                        seg_key,
                        out_key,
                        n_threads,
                        radius=10):
    path = '/nrs/saalfeld/lauritzen/0%i/workspace.n5' % lauritzen_block_id
    key1 = '/'.join(('filtered', 'segmentations', seg_key))
    label_file = os.path.join(path, key1)
    skeleton_file = os.path.join(path, 'skeletons')

    # FIXME this is pretty inefficient, because we repeat the same computation twice,
    # but for now these things are fast enough
    metrics = build_skeleton_metrics(label_file, skeleton_file, n_threads)
    non_empty_chunks, skeletons_to_blocks = metrics.groupSkeletonBlocks(
        n_threads)

    f = z5py.File(path)
    shape = f[key1].shape
    chunks = f[key1].chunks

    blocking = nifty.tools.blocking([0, 0, 0], list(shape), list(chunks))

    if out_key not in f:
        ds = f.create_dataset(out_key,
                              dtype='uint64',
                              compression='gzip',
                              shape=shape,
                              chunks=chunks)
    else:
        ds = f[out_key]

    def visualize_block(block_id):
        print("Writing block", block_id)
        block = blocking.getBlock(block_id)
        bshape = tuple(block.shape)
        vol = np.zeros(bshape, dtype='uint64')
        skeletons = skeletons_to_blocks[block_id]

        for skel_id, coords in skeletons.items():
            for coord in coords:
                _, z, y, x = coord
                rr, cc = circle(y, x, radius, shape=bshape[1:])
                vol[z, rr, cc] = skel_id

        bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
        ds[bb] = vol

    with futures.ThreadPoolExecutor(n_threads) as tp:
        tasks = [
            tp.submit(visualize_block, block_id)
            for block_id in non_empty_chunks
        ]
        [t.result() for t in tasks]
 def setUp(self):
     from cremi_tools.skeletons import build_skeleton_metrics
     assert os.path.exists('./test_skeletons.n5')
     assert os.path.exists('./test_ws.n5')
     self.metrics = build_skeleton_metrics('./test_ws.n5/watershed',
                                           './test_skeletons.n5')
     self.node_assignment = self.metrics.getNodeAssignments()
     self.skeleton_ids = [
         int(sk) for sk in os.listdir('./test_skeletons.n5')
         if sk.isdigit()
     ]
     self.skeleton_ids.sort()
Example #3
0
def evaluation_on_testdata(label_file, skeleton_file):
    from cremi_tools.skeletons import build_skeleton_metrics
    print("Building metrics...")
    m = build_skeleton_metrics(label_file, skeleton_file, 8)
    print("... done")

    # print("Computing split score ...")
    # split_summary(m)
    # print("... done")

    # print("Compting explicit merges ...")
    # explicit_merge_summary(m)
    # print("... done")

    print("Comuting heuristic merges ...")
    heuristic_merge_summary(m)
    print("... done")
Example #4
0
def compute_google_scores(block_id, seg_key, skeleton_postfix):
    path = '/nrs/saalfeld/lauritzen/0%i/workspace.n5' % block_id
    label_file = os.path.join(path, 'raw', 'segmentations', 'results', seg_key)
    skel_group = 'neurons_of_interest' if skeleton_postfix == '' else 'for_eval_%s' % skeleton_postfix

    skeleton_file = os.path.join(path, 'skeletons', skel_group)

    n_threads = 40
    metrics = build_skeleton_metrics(label_file, skeleton_file, n_threads)

    correct, split, merge, n_merges = metrics.computeGoogleScore(n_threads)
    print("Overall edge scores:")
    print("Correct:     ", correct)
    print("Split:       ", split)
    print("Merge:       ", merge)
    print("Merge Points:", n_merges)

    return {'correct': correct, 'split': split, 'merge': merge, 'n_merges': n_merges}
Example #5
0
def compute_skeleton_evaluation(path, seg_key, skeleton_keys, n_threads,
                                tmp_folder):

    label_file = os.path.join(path, seg_key)

    results = {}
    for skel_key in skeleton_keys:
        skeleton_file = os.path.join(path, skel_key)
        metrics = build_skeleton_metrics(label_file, skeleton_file, n_threads)
        correct, split, merge, n_merges = metrics.computeGoogleScore(n_threads)
        res = {
            'correct': correct,
            'split': split,
            'merge': merge,
            'n_merges': n_merges
        }
        results[skel_key] = res

    res_path = os.path.join(tmp_folder, 'skeleton_eval_res.json')
    with open(res_path, 'w') as f:
        json.dump(results, f)
def evaluate_segmentation(block_id, seg_key, n_threads):
    path = '/nrs/saalfeld/lauritzen/0%i/workspace.n5' % block_id
    label_file = os.path.join(path, 'filtered', 'segmentations', seg_key)
    skeleton_file = os.path.join(path, 'skeletons')

    t0 = time.time()
    print("Building skeleton metrics in...")
    metrics = build_skeleton_metrics(label_file, skeleton_file, n_threads)
    print("... in %f s" % (time.time() - t0, ))
    print()

    false_split_score(metrics)
    print()
    explicit_merge_score(metrics)
    print()
    # distance_statistics(metrics)

    correct, split, merge, n_merges = metrics.computeGoogleScore(n_threads)
    print("Overall edge scores:")
    print("Correct:     ", correct)
    print("Split:       ", split)
    print("Merge:       ", merge)
    print("Merge Points:", n_merges)
Example #7
0
def merge_along_skeletons(block_id, in_key, out_key, n_threads):
    path = '/nrs/saalfeld/lauritzen/0%i/workspace.n5' % block_id
    key1 = '/'.join(('filtered', 'segmentations', in_key))
    label_file = os.path.join(path, key1)

    # find false splits according to skeletons and the nodes that have to
    # be merged to fix it
    skeleton_file = os.path.join(path, 'skeletons')
    metrics = build_skeleton_metrics(label_file, skeleton_file, n_threads)
    skeleton_merges = metrics.mergeFalseSplitNodes(n_threads)

    n_labels = z5py.File(label_file).attrs['maxId'] + 1

    # get new node labeling with ufd
    ufd = nifty.ufd.ufd(n_labels)
    for _, merge_nodes in skeleton_merges.items():
        merge_nodes = np.array([mn for mn in merge_nodes])
        ufd.merge(merge_nodes)
    node_labels = ufd.elementLabeling()
    # TODO make sure 0 is mapped to zero
    vigra.analysis.relabelConsecutive(node_labels,
                                      out=node_labels,
                                      keep_zeros=True,
                                      start_label=1)

    labels = nz5.datasetWrapper('uint64', label_file)
    block_shape = [25, 256, 256]
    rag_file = './rag.npy'
    if not os.path.exists(rag_file):
        print("Computing RAG...")
        rag = nrag.gridRagZ5(labels,
                             numberOfLabels=int(n_labels),
                             numberOfThreads=n_threads,
                             dtype='uint64',
                             blockShape=block_shape)
        np.save(rag_file, rag.serialize())
        print("... done")
    else:
        ragser = np.load(rag_file)
        rag = nrag.gridRagZ5(labels,
                             numberOfLabels=int(n_labels),
                             serialization=ragser,
                             dtype='uint64')

    f_out = z5py.File(path)
    key2 = '/'.join(('filtered', 'segmentations', out_key))
    if key2 not in f_out:
        f_out.create_dataset(key2,
                             dtype='uint64',
                             compression='gzip',
                             shape=z5py.File(path)[key1].shape,
                             chunks=z5py.File(path)[key1].chunks)

    out_file = os.path.join(path, key2)
    out = nz5.datasetWrapper('uint64', out_file)

    print("Projecting to pixels...")
    nrag.projectScalarNodeDataToPixels(graph=rag,
                                       nodeData=node_labels,
                                       pixelData=out,
                                       blockShape=block_shape,
                                       numberOfThreads=n_threads)
    print("... done")
    z5py.File(path)[key2].attrs['maxId'] = n_labels - 1