示例#1
0
 def test_ce(self):
     self.assertAlmostEqual(metrics.ce([1,1,1,0,0,0],
                                       [1,1,1,0,0,0]), 0)
     self.assertAlmostEqual(metrics.ce([1,1,1,0,0,0],
                                       [1,1,1,1,0,0]), 1.0/6)
     self.assertAlmostEqual(metrics.ce([1,2,3,4],
                                       [1,2,3,3]), 0.25)
     self.assertAlmostEqual(metrics.ce(["cat", "dog", "bird"],
                                       ["cat", "dog", "fish"]), 1.0/3)
     self.assertAlmostEqual(metrics.ce(["cat", "dog", "bird"],
                                       ["caat", "doog", "biird"]), 1)
示例#2
0
 def test_ce(self):
     self.assertAlmostEqual(
         metrics.ce([1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0]), 0)
     self.assertAlmostEqual(
         metrics.ce([1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0]), 1.0 / 6)
     self.assertAlmostEqual(metrics.ce([1, 2, 3, 4], [1, 2, 3, 3]), 0.25)
     self.assertAlmostEqual(
         metrics.ce(["cat", "dog", "bird"], ["cat", "dog", "fish"]),
         1.0 / 3)
     self.assertAlmostEqual(
         metrics.ce(["cat", "dog", "bird"], ["caat", "doog", "biird"]), 1)
	def run(self, hyper_classifier, training_data, training_target, testing_data, testing_target):
		'''
		TODO DOCUMENTATION
		'''
		results = {'name': self.name, 'parameterization': self.parameterization, 'exception': None}
		try:
			self.classifier = hyper_classifier.make_classifier(training_data, training_target, **self.parameterization)
			self.classifier.fit(training_data, training_target)
			results['predicted'] = self.classifier.predict(testing_data)
		except MemoryError as e:
			raise e
		except Exception as e:
			print(repr(e))
			results['exception'] = e
		else:
			# attempt to save memory
			del(self.classifier)
			self.classifier = None

			results['ml_metric_ce'] = ml_metrics.ce(testing_target, results['predicted'])
			results['ml_metric_rmse'] = ml_metrics.rmse(testing_target, results['predicted'])
			results['sklearn_metric_accuracy'] = sklearn.metrics.accuracy_score(testing_target, results['predicted'])
			results['sklearn_metric_f1'] = sklearn.metrics.f1_score(testing_target, results['predicted'])
			results['sklearn_metric_precision'] = sklearn.metrics.precision_score(testing_target, results['predicted'])
			results['sklearn_metric_recall'] = sklearn.metrics.recall_score(testing_target, results['predicted'])

			results['ml_metric_auc'] = {}
			results['sklearn_metric_auc'] = {}
			for label in set(testing_target):
				binary_testing_target = np.array(map(lambda x: 1 if x == label else 0, testing_target))
				binary_predicted = np.array(map(lambda x: 1 if x == label else 0, results['predicted']))
				results['ml_metric_auc'][label] = ml_metrics.auc(binary_testing_target, binary_predicted)
				results['sklearn_metric_auc'][label] = sklearn.metrics.auc_score(binary_testing_target, binary_predicted)

		return results