def test_num_classes(self): task = TestTask() task.set_input(0, self.vowpal_example) vowpal_util.num_classes(task) self.assertEqual(int(task.outputs[0].read()), 2)
def test_convert_format(self): task = TestTask() task.set_input(0, self.libsvm_example) vowpal_util.convert_libsvm_format_to_vowpal(task) self.assertEqual(task.outputs[0].read(), self.vowpal_example)
def _process_task(self, data): task = TestTask() task.set_input(0, '\n'.join([' '.join([str(e) for e in line]) for line in data]) + '\n') task.outputs.setsize(3) rule = rules.segment_without_label_bias(self.weights) rule.fun(task) return task
def test_min(self): task = TestTask() task.source_parameters = [{"param1":0}, {"param1":1}] task.set_input_by_json(0, {"key1":10, "key2":20}) task.set_input_by_json(1, {"key1": 5, "key2":30}) rule = rules.min("key1") rule.fun(task) result = task.json_output(0) self.assertEqual(result, {"param1": 1, "key1": 5, "key2": 30})
def test_multilabel_evaluation(self): task = TestTask() task.set_input_by_json(0, map(self.label, [(1,1),(1,2),(2,2),(2,2)])) rules.calculate_stats_multiclass_classification(task) result = task.json_output(0) self.assertEqual(result["accuracy"], 3./4) self.assertEqual(result["average_accuracy"], 3./4) self.assertEqual(result["error_rate"], 1./4) self.assertEqual(result["1-precision"], 1./2) self.assertEqual(result["2-precision"], 1) self.assertEqual(result["1-recall"], 1) self.assertEqual(result["2-recall"], 2./3) self.assertEqual(result["1-F1"], 2./3) self.assertEqual(result["precision-micro"], 3./4) self.assertEqual(result["recall-micro"], 3./4) self.assertEqual(result["precision-macro"], 3./4)