def test_Recall(self): data = Table('iris') learner = LogisticRegressionLearner() results = TestOnTrainingData(data, [learner]) self.assertGreater(Recall(results)[0], 0.9) self.assertEqual(round(Recall(results)[0], 3), 0.927)
def test_Recall(self): data = Table('iris') learner = LogisticRegressionLearner(preprocessors=[]) results = TestOnTrainingData(data, [learner]) self.assertAlmostEqual(Recall(results)[0], 0.960, 3)
# then train the model, and produce results. # So, simply initialize the CrossValidation() object from the 'testing' library # and call it with input arguments 1) the dataset and 2) the learner. # Note that the 'learner' argument should be in array form, i.e. '[learner]'. cv = CrossValidation(k=10, random_state=44) results = {} print('Training') for name, learner in tqdm(learners.items()): results[name] = cv(data=wineData, learners=[learner]) # As for the required metrics, you can get them using the 'evaluation.scoring' library. # The 'average' parameter of each metric is used while measuring scores to perform # a type of averaging on the data. DON'T WORRY MUCH ABOUT THAT JUST YET (AGAIN). USE EITHER # 'MICRO' OR 'MACRO' (preferably 'macro', at least for final results). # ============================================================================= # # ADD COMMANDS TO EVALUATE YOUR MODEL HERE (AND PRINT ON CONSOLE) scores = defaultdict(dict) print('Testing') for name, res in tqdm(results.items()): scores[name]['acc'] = CA(results=res)[0] scores[name]['prec'] = Precision(results=res, average='macro')[0] scores[name]['rec'] = Recall(results=res, average='macro')[0] scores[name]['f1'] = F1(results=res, average='macro')[0] classifier = learners[name](data=wineData) rules = '\n'.join([str(rule) for rule in classifier.rule_list]) scores[name]['rules'] = rules scores_df = pd.DataFrame.from_dict(data=scores).transpose() scores_df.to_csv(category + '_' + evaluator + '.csv')
def setUpClass(cls): cls.iris = Table('iris') cls.score = Recall()
def setUpClass(cls): cls.iris = Table("iris") cls.score = Recall()