def test_create_with_float_metric(self): write_smt_txt(1.239, "tests", metric="xy") with open(self.outcome_path, "r") as outcome_file: outcome = json.load(outcome_file) self.assertEqual(outcome["text_outcome"], "| xy: 1.24")
def test_new_outcome_keys(self): write_smt_txt("Awesome", "tests") with open(self.outcome_path, "r") as outcome_file: outcome = json.load(outcome_file) self.assertEqual(len(outcome), 2) self.assertIn("text_outcome", outcome) self.assertIn("numeric_outcome", outcome)
def test_append_with_metric_inline(self): write_smt_txt("Awesome", "tests") write_smt_txt(22, "tests", metric="xy", inline=True) with open(self.outcome_path, "r") as outcome_file: outcome = json.load(outcome_file) self.assertEqual(outcome["text_outcome"], "| Awesome xy: 22")
def test_appending_content_inline(self): write_smt_txt("Awesome", "tests") write_smt_txt("Great", "tests", inline=True) with open(self.outcome_path, "r") as outcome_file: outcome = json.load(outcome_file) self.assertEqual(outcome["text_outcome"], "| Awesome Great")
def score(self, trk_file=None, y=None, args=None): if isinstance(trk_file, list): trk_file = trk_file[0] if trk_file is None: TM_DATA = [ "/local/entrack/data/tractometer/125mm/FODl4.nii.gz", "/local/entrack/data/tractometer/125mm/wm_aligned.nii.gz" ] args.file_name = "tm_fibers.trk" self.predict(TM_DATA, args) trk_file = os.path.join(self.save_path, args.file_name) TM_PATH = ("./.tractometer/ismrm_2015_tractography_challenge_scoring/" "score_tractogram.py") SCORING_DATA = ("./.tractometer/ismrm_2015_tractography_challenge_" "scoring/scoring_data/") scoring_cmd = "python {command} {tracts} {base} {out}".format( command=TM_PATH, tracts=trk_file, base=SCORING_DATA, out=self.save_path) subprocess.run([ "bash", "-c", "source activate entrack_tm && {}".format(scoring_cmd) ]) eval_path = os.path.join(self.save_path, "scores", "tm_fibers.json") eval_data = json.load(open(eval_path)) for metric in [ "mean_OL", "mean_OR", "VC", "NC", "IC", "VB", "IB", "mean_F1" ]: write_smt_txt(eval_data[metric], self.save_path, metric=metric, inline=True) if "score" in args: return eval_data[args.score]
def save_scalars(self, trk_file, nii_file, min_pts_per_fiber=2, every_n_fibers=1, file_name="scalars.trk"): scalars, tracks, trk_hdr = self.fvm_scalars(trk_file, nii_file, min_pts_per_fiber, every_n_fibers) for metric in scalars.keys(): for q in [25, 50, 75]: write_smt_txt(flat_percentile(scalars[metric], q), self.save_path, metric=metric + "_" + str(q), inline=q > 25) save_fibers(tracks, trk_hdr, os.path.join(self.save_path, file_name), scalars=scalars)
def test_new_outcome_content(self): write_smt_txt("Awesome", "tests") with open(self.outcome_path, "r") as outcome_file: outcome = json.load(outcome_file) self.assertEqual(outcome["text_outcome"], "| Awesome")
def test_new_outcome_is_dict(self): write_smt_txt("Awesome", "tests") with open(self.outcome_path, "r") as outcome_file: outcome = json.load(outcome_file) self.assertIsInstance(outcome, dict)
def test_create_new_outcome(self): write_smt_txt("Awesome", "tests") self.assertTrue(os.path.exists(self.outcome_path))