def _train_model(self, model_file=None): print("Creating GALA feature manager...") fm = features.moments.Manager() fh = features.histogram.Manager(25, 0, 1, [0.1, 0.5, 0.9]) # Recommended numbers in the repo fg = features.graph.Manager() fc = features.contact.Manager() self.fm = features.base.Composite(children=[fm, fh, fg, fc]) if model_file is not None and os.path.isfile(model_file): print('Loading model from path ...') rf = classify.load_classifier(model_file) else: gt, pr, sv = (map(imio.read_h5_stack, [self.gt, self.mem, self.sp])) print("Creating training RAG...") g_train = agglo.Rag(sv, pr, feature_manager=self.fm) print("Learning agglomeration...") (X, y, w, merges) = g_train.learn_agglomerate(gt, self.fm, learning_mode='permissive', min_num_epochs=self.min_ep)[0] y = y[:, 0] rf = classify.DefaultRandomForest().fit(X, y) # Save if path requested if model_file is not None: classify.save_classifier(rf, model_file) self.model = agglo.classifier_probability(self.fm, rf)
def test_learned_agglo_4channel(): rf4 = classify.load_classifier('example-data/rf4.joblib') learned_policy4 = agglo.classifier_probability(fc, rf4) g_test4 = agglo.Rag(ws_test, p4_test, learned_policy4, feature_manager=fc) g_test4.agglomerate(0.5) seg_test4 = g_test4.get_segmentation() seg_test4_result = imio.read_h5_stack('example-data/test-seg4.lzf.h5') assert_array_equal(seg_test4, seg_test4_result)
def test_learned_agglo_1channel(): rf = classify.load_classifier('example-data/rf1.joblib') learned_policy = agglo.classifier_probability(fc, rf) g_test = agglo.Rag(ws_test, pr_test, learned_policy, feature_manager=fc) g_test.agglomerate(0.5) seg_test1 = g_test.get_segmentation() seg_test1_result = imio.read_h5_stack('example-data/test-seg1.lzf.h5') assert_array_equal(seg_test1, seg_test1_result)
def test_segment_with_classifier_4_channel(): if PYTHON_VERSION == 2: rf = classify.load_classifier(os.path.join(rundir, "example-data/rf-4.joblib")) else: fn = os.path.join(rundir, "example-data/rf4-py3.joblib") with tar_extract(fn) as fn: rf = joblib.load(fn) learned_policy = agglo.classifier_probability(fc, rf) g_test = agglo.Rag(ws_test, p4_test, learned_policy, feature_manager=fc) g_test.agglomerate(0.5) seg_test = g_test.get_segmentation() seg_expected = imio.read_h5_stack(os.path.join(rundir, "example-data/test-seg-4.lzf.h5")) assert_allclose(ev.vi(seg_test, seg_expected), 0.0)
def test_segment_with_classifier_4_channel(): if PYTHON_VERSION == 2: rf = classify.load_classifier( os.path.join(rundir, 'example-data/rf-4.joblib')) else: fn = os.path.join(rundir, 'example-data/rf4-py3.joblib') with tar_extract(fn) as fn: rf = joblib.load(fn) learned_policy = agglo.classifier_probability(fc, rf) g_test = agglo.Rag(ws_test, p4_test, learned_policy, feature_manager=fc) g_test.agglomerate(0.5) seg_test = g_test.get_segmentation() seg_expected = imio.read_h5_stack( os.path.join(rundir, 'example-data/test-seg-4.lzf.h5')) assert_allclose(ev.vi(seg_test, seg_expected), 0.0)
def testAggloRFBuild(self): from gala import agglo from gala import features from gala import classify self.datadir = os.path.abspath(os.path.dirname(sys.modules["gala"].__file__)) + "/testdata/" cl = classify.load_classifier(self.datadir + "agglomclassifier.rf.h5") fm_info = json.loads(str(cl.feature_description)) fm = features.io.create_fm(fm_info) mpf = agglo.classifier_probability(fm, cl) watershed, dummy, prediction = self.gen_watershed() stack = agglo.Rag(watershed, prediction, mpf, feature_manager=fm, nozeros=True) self.assertEqual(stack.number_of_nodes(), 3630) stack.agglomerate(0.1) self.assertEqual(stack.number_of_nodes(), 88) stack.remove_inclusions() self.assertEqual(stack.number_of_nodes(), 86)
def testNPRFBuild(self): if not np_installed: self.assertTrue(np_installed) from gala import stack_np from gala import classify self.datadir = os.path.abspath(os.path.dirname(sys.modules["gala"].__file__)) + "/testdata/" cl = classify.load_classifier(self.datadir + "agglomclassifier_np.rf.h5") fm_info = json.loads(str(cl.feature_description)) watershed, boundary, prediction = self.gen_watershed() stack = stack_np.Stack(watershed, prediction, single_channel=False, classifier=cl, feature_info=fm_info) self.assertEqual(stack.number_of_nodes(), 3629) stack.agglomerate(0.1) self.assertEqual(stack.number_of_nodes(), 80) stack.remove_inclusions() self.assertEqual(stack.number_of_nodes(), 78)
def testAggloRFBuild(self): from gala import agglo from gala import features from gala import classify self.datadir = os.path.abspath( os.path.dirname(sys.modules["gala"].__file__)) + "/testdata/" cl = classify.load_classifier(self.datadir + "agglomclassifier.rf.h5") fm_info = json.loads(str(cl.feature_description)) fm = features.io.create_fm(fm_info) mpf = agglo.classifier_probability(fm, cl) watershed, dummy, prediction = self.gen_watershed() stack = agglo.Rag(watershed, prediction, mpf, feature_manager=fm, nozeros=True) self.assertEqual(stack.number_of_nodes(), 3630) stack.agglomerate(0.1) self.assertEqual(stack.number_of_nodes(), 88) stack.remove_inclusions() self.assertEqual(stack.number_of_nodes(), 86)
def testNPRFBuild(self): if not np_installed: self.assertTrue(np_installed) from gala import stack_np from gala import classify self.datadir = os.path.abspath( os.path.dirname(sys.modules["gala"].__file__)) + "/testdata/" cl = classify.load_classifier(self.datadir + "agglomclassifier_np.rf.h5") fm_info = json.loads(str(cl.feature_description)) watershed, boundary, prediction = self.gen_watershed() stack = stack_np.Stack(watershed, prediction, single_channel=False, classifier=cl, feature_info=fm_info) self.assertEqual(stack.number_of_nodes(), 3629) stack.agglomerate(0.1) self.assertEqual(stack.number_of_nodes(), 80) stack.remove_inclusions() self.assertEqual(stack.number_of_nodes(), 78)
mat_contents3 = scipy.io.loadmat(sys.argv[7]) ws = mat_contents3['cube'] ws = ws.transpose([2,0,1]) ws = ws.astype('int64') print "unique labels in ws:",np.unique(ws).size print "Creating RAG..." # create graph and obtain a training dataset if algo == 1: #run gala fc = features.base.Composite(children=[features.moments.Manager(), features.histogram.Manager(25, 0, 1, [0.1, 0.5, 0.9]), features.graph.Manager(), features.contact.Manager([0.1, 0.5, 0.9]) ]) rf = classify.load_classifier(sys.argv[6]) learned_policy = agglo.classifier_probability(fc, rf) print 'applying classifier...' g_test = agglo.Rag(ws, membrane, learned_policy, feature_manager=fc) print 'choosing best operating point...' g_test.agglomerate(thresh) # best expected segmentation cube = g_test.get_segmentation() cube, _, _ = evaluate.relabel_from_one(cube) print "Completed Gala Run" # gala allows implementation of other agglomerative algorithms, including # the default, mean agglomeration if algo == 2: #mean agglomeration
print "unique labels in ws:", np.unique(ws).size print "Creating RAG..." # create graph and obtain a training dataset if algo == 1: #run gala fc = features.base.Composite(children=[ features.moments.Manager(), features.histogram.Manager(25, 0, 1, [0.1, 0.5, 0.9]), features.graph.Manager(), features.contact.Manager([0.1, 0.5, 0.9]) ]) rf = classify.load_classifier(sys.argv[6]) learned_policy = agglo.classifier_probability(fc, rf) print 'applying classifier...' g_test = agglo.Rag(ws, membrane, learned_policy, feature_manager=fc) print 'choosing best operating point...' g_test.agglomerate(thresh) # best expected segmentation cube = g_test.get_segmentation() cube, _, _ = evaluate.relabel_from_one(cube) print "Completed Gala Run" # gala allows implementation of other agglomerative algorithms, including # the default, mean agglomeration if algo == 2: #mean agglomeration print 'mean agglomeration step...'
from gala import evaluate start = time.time() min_seed_size = 2 connectivity = 2 smooth_thresh = 0.02 override = 0 inFileImageTest = 'isbi_em_ac4.mat' inFileMembraneTest = 'isbi_membrane_ac4.mat' inFileTruthTest = 'isbi_labels_ac4.mat' fc = features.base.Composite(children=[features.moments.Manager(), features.histogram.Manager(25, 0, 1, [0.1, 0.5, 0.9]), features.graph.Manager(), features.contact.Manager([0.1, 0.5, 0.9]) ]) rf = classify.load_classifier('ac3_full_classifier.rf') learned_policy = agglo.classifier_probability(fc, rf) mat_contents = scipy.io.loadmat(inFileImageTest) imTest = mat_contents['im'] mat_contents2 = scipy.io.loadmat(inFileMembraneTest) membraneTest = mat_contents2['membrane'] mat_contents3 = scipy.io.loadmat(inFileTruthTest) gt_test = mat_contents3['truth'] xdim, ydim, zdim = (imTest.shape) ws = np.zeros(membraneTest.shape) imTest = imTest.astype('int32')