Esempio n. 1
0
 def test_F1_binary(self):
     results = Results(
         domain=Domain([], DiscreteVariable(name="y", values="01")),
         actual=[0, 1, 1, 1, 0, 0, 1, 0, 0, 1])
     results.predicted = np.array([[0, 1, 1, 1, 0, 0, 1, 0, 0, 1],
                                   [0, 1, 1, 1, 0, 0, 1, 1, 1, 1]])
     res = F1(results)
     self.assertEqual(res[0], 1.)
     self.assertAlmostEqual(res[1], 5 / 6)
     res_target = F1(results, target=1)
     self.assertEqual(res[0], res_target[0])
     self.assertEqual(res[1], res_target[1])
     res_target = F1(results, target=0)
     self.assertEqual(res_target[0], 1.)
     self.assertAlmostEqual(res_target[1], 3 / 4)
Esempio n. 2
0
 def test_F1_multiclass(self):
     results = Results(
         domain=Domain([], DiscreteVariable(name="y", values="01234")),
         actual=[0, 4, 4, 1, 2, 0, 1, 2, 3, 2])
     results.predicted = np.array([[0, 1, 4, 1, 1, 0, 0, 2, 3, 1],
                                   [0, 4, 4, 1, 2, 0, 1, 2, 3, 2]])
     res = F1(results)
     self.assertAlmostEqual(res[0], 0.61)
     self.assertEqual(res[1], 1.)
    def test_F1_target(self):
        results = Results(domain=Domain([],
                                        DiscreteVariable(name="y",
                                                         values="01234")),
                          actual=[0, 4, 4, 1, 2, 0, 1, 2, 3, 2])
        results.predicted = np.array([[0, 1, 4, 1, 1, 0, 0, 2, 3, 1],
                                      [0, 4, 4, 1, 2, 0, 1, 2, 3, 2]])

        for target, prob in ((0, 4 / 5), (1, 1 / 3), (2, 1 / 2), (3, 1.),
                             (4, 2 / 3)):
            res = F1(results, target=target)
            self.assertEqual(res[0], prob)
            self.assertEqual(res[1], 1.)
 def setUpClass(cls):
     cls.iris = Table('iris')
     cls.score = F1()
Esempio n. 5
0
# then train the model, and produce results.
# So, simply initialize the CrossValidation() object from the 'testing' library
# and call it with input arguments 1) the dataset and 2) the learner.
# Note that the 'learner' argument should be in array form, i.e. '[learner]'.
cv = CrossValidation(k=10, random_state=44)
results = {}
print('Training')
for name, learner in tqdm(learners.items()):
    results[name] = cv(data=wineData, learners=[learner])

# As for the required metrics, you can get them using the 'evaluation.scoring' library.
# The 'average' parameter of each metric is used while measuring scores to perform
# a type of averaging on the data. DON'T WORRY MUCH ABOUT THAT JUST YET (AGAIN). USE EITHER
# 'MICRO' OR 'MACRO' (preferably 'macro', at least for final results).
# =============================================================================

# # ADD COMMANDS TO EVALUATE YOUR MODEL HERE (AND PRINT ON CONSOLE)
scores = defaultdict(dict)
print('Testing')
for name, res in tqdm(results.items()):
    scores[name]['acc'] = CA(results=res)[0]
    scores[name]['prec'] = Precision(results=res, average='macro')[0]
    scores[name]['rec'] = Recall(results=res, average='macro')[0]
    scores[name]['f1'] = F1(results=res, average='macro')[0]
    classifier = learners[name](data=wineData)
    rules = '\n'.join([str(rule) for rule in classifier.rule_list])
    scores[name]['rules'] = rules

scores_df = pd.DataFrame.from_dict(data=scores).transpose()
scores_df.to_csv(category + '_' + evaluator + '.csv')
 def setUpClass(cls):
     cls.iris = Table("iris")
     cls.score = F1()