def test_Precision(self): data = Table('iris') learner = LogisticRegressionLearner() results = TestOnTrainingData(data, [learner]) self.assertGreater(Precision(results)[0], 0.9) self.assertEqual(round(Precision(results)[0], 3), 0.928)
def setUpClass(cls): cls.iris = Table('iris') cls.score = Precision()
def test_Precision(self): data = Table('iris') learner = LogisticRegressionLearner(preprocessors=[]) results = TestOnTrainingData(data, [learner]) self.assertAlmostEqual(Precision(results)[0], 0.962, 3)
# then train the model, and produce results. # So, simply initialize the CrossValidation() object from the 'testing' library # and call it with input arguments 1) the dataset and 2) the learner. # Note that the 'learner' argument should be in array form, i.e. '[learner]'. cv = CrossValidation(k=10, random_state=44) results = {} print('Training') for name, learner in tqdm(learners.items()): results[name] = cv(data=wineData, learners=[learner]) # As for the required metrics, you can get them using the 'evaluation.scoring' library. # The 'average' parameter of each metric is used while measuring scores to perform # a type of averaging on the data. DON'T WORRY MUCH ABOUT THAT JUST YET (AGAIN). USE EITHER # 'MICRO' OR 'MACRO' (preferably 'macro', at least for final results). # ============================================================================= # # ADD COMMANDS TO EVALUATE YOUR MODEL HERE (AND PRINT ON CONSOLE) scores = defaultdict(dict) print('Testing') for name, res in tqdm(results.items()): scores[name]['acc'] = CA(results=res)[0] scores[name]['prec'] = Precision(results=res, average='macro')[0] scores[name]['rec'] = Recall(results=res, average='macro')[0] scores[name]['f1'] = F1(results=res, average='macro')[0] classifier = learners[name](data=wineData) rules = '\n'.join([str(rule) for rule in classifier.rule_list]) scores[name]['rules'] = rules scores_df = pd.DataFrame.from_dict(data=scores).transpose() scores_df.to_csv(category + '_' + evaluator + '.csv')