示例#1
0
 def test_create_result_report(self):
     metrics = [multiclass_recall, multiclass_precision, multiclass_f1]
     r = ResultReport(approach=Approach.load(self.approach.id),
                      metrics=metrics)
     df = r.as_dataframe()
     for m in [f.__name__ for f in metrics]:
         self.assertTrue(m in df.columns)
示例#2
0
    def test_create_runpool(self):
        # Force reload runs from database
        self.approach = Approach.load(self.approach.id)
        runpool = RunPool(self.approach.runs)

        for run in runpool.iteruns():
            self.assertTrue(isinstance(run, Run))
示例#3
0
    def test_iterate_all_runs_runpool(self):
        self.approach = Approach.load(self.approach.id)
        runpool = RunPool(self.approach.runs)

        i = 0
        for run in runpool.iteruns():
            self.assertTrue(isinstance(run, Run))
            run.status = "finished"
            i += 1

        self.assertEqual(i, runpool.iter)
示例#4
0
文件: cli.py 项目: mfarre/DriftAI
def status(approach_id):
    if not _is_running_in_project():
        print("You must use driftai CLI inside an driftai project directory")
        return
    print("Loading approach data...")
    stat = Approach.load(approach_id).status
    if not stat["done"]:
        print("Approach {} is still running".format(approach_id))
        print(stat["progress_bar"] + " Done runs: " + str(stat["done_runs"]) + " Total runs: " + str(stat["total_runs"]))
    else:
        print("There are no left runs for Approach {approach_id}!".format(approach_id))
示例#5
0
    def test_get_subdataset_runs(self):
        runnable = import_from("test.lr.logistic_regression",
                               "LogisticRegressionApproach")
        runs = RunGenerator.from_runnable_approach(runnable())
        for run in runs:
            run.save()

        runs = Approach.load(self.approach.id).runs
        self.assertTrue(len(runs) > 0)
        for run in runs:
            self.assertIsInstance(run, Run)
        return runs
示例#6
0
文件: cli.py 项目: mfarre/DriftAI
def evaluate(approach_id, metric):
    if not _is_running_in_project():
        print("You must use driftai CLI inside an driftai project directory")
        return
    if not Approach.collection().exists(approach_id):
        print("Approach with id {} does not exist.".format(approach_id))
        return

    approach = Approach.load(approach_id)
    r = ResultReport(approach=approach, metrics=[str_to_metric_fn[m] for m in metric])
    r.as_dataframe()\
        .to_csv(approach_id + "_evaluation.csv", index=False)       
示例#7
0
    def test_generate_runs_from_subdataset(self):
        # Trick to load runnable approach
        LogisticRegressionApproach = import_from("test.lr.logistic_regression",
                                                 "LogisticRegressionApproach")
        ra = LogisticRegressionApproach()

        # Generate the runs
        run_gens = RunGenerator.from_runnable_approach(ra)

        # Write runs to database
        ra.approach.runs = run_gens
        ra.approach.update()

        # Reload approach to test if runs were correctly stored
        approach = Approach.load(ra.approach.id)
        self.assertEqual(len(approach.runs), len(run_gens))
示例#8
0
 def test_using_sklearn_metrics(self):
     from sklearn.metrics import classification_report
     r = ResultReport(approach=Approach.load(self.approach.id),
                      metrics=[classification_report])
     df = r.as_dataframe()
     self.assertIsNotNone(df.classification_report[0])
示例#9
0
 def test_create_result_report(self):
     metrics = ["recall", "precision", "f1"]
     r = ResultReport(approach=Approach.load(self.approach.id),
                      metrics=[recall, precision, f1])
     df = r.as_dataframe()
     self.assertTrue(all(m in df.columns for m in metrics))