def skeleton_evaluation(job_id, config_path): fu.log("start processing job %i" % job_id) fu.log("reading config from %s" % config_path) with open(config_path, 'r') as f: config = json.load(f) # read the input cofig input_path = config['input_path'] input_key = config['input_key'] skeleton_path = config['skeleton_path'] skeleton_key = config['skeleton_key'] output_path = config['output_path'] skeleton_format = config['skeleton_format'] n_threads = config.get('threads_per_job', 1) # TODO adapt nskel.SkeletonMetrics to new n5 skeleton format skeleton_ids = os.listdir(skeleton_file) skeleton_ids = [int(sk) for sk in skeleton_ids if sk.isdigit()] skeleton_ids.sort() metrics = nskel.SkeletonMetrics(os.path.join(input_path, input_key), os.path.join(skeleton_path, skeleton_key), skeleton_ids, n_threads) # TODO expose parameters for different eval options correct, split, merge, n_merges = metrics.computeGoogleScore(n_threads) res = {'correct': correct, 'split': split, 'merge': merge, 'n_merges': n_merges} with open(output_path, 'w') as f: json.dump(res, f) # log success fu.log_job_success(job_id)
def build_skeleton_metrics(label_file, skeleton_file, n_threads=-1): assert os.path.exists(label_file), label_file assert os.path.exists(skeleton_file), skeleton_file skeleton_ids = os.listdir(skeleton_file) skeleton_ids = [int(sk) for sk in skeleton_ids if sk.isdigit()] skeleton_ids.sort() return nskel.SkeletonMetrics(label_file, skeleton_file, skeleton_ids, n_threads)
def load_skeleton_metrics(label_file, skeleton_file, serialization_file): assert os.path.exists(label_file), label_file assert os.path.exists(skeleton_file), skeleton_file assert os.path.exists(serialization_file), serialization_file skeleton_ids = os.listdir(skeleton_file) skeleton_ids = [int(sk) for sk in skeleton_ids if sk.isdigit()] skeleton_ids.sort() return nskel.SkeletonMetrics(label_file, skeleton_file, skeleton_ids, serialization_file)
def test_nodes(self): import nifty.skeletons as nskel metrics = nskel.SkeletonMetrics('./tmp/seg.n5/seg', './tmp/skels.n5', [1, 2], 1) out = metrics.getNodeAssignments() self.assertEqual(list(out.keys()), [1, 2]) out1 = [out[1][k] for k in sorted(out[1].keys())] out2 = [out[2][k] for k in sorted(out[2].keys())] self.assertEqual(list(out1), 6 * [3]) self.assertEqual(list(out2), [0, 0, 0, 0, 2, 2])