def test_recall(self):
        predicted_results = array([[1], [1], [0], [0], [1]])
        actual_results = array([[1], [1], [0], [1], [1]])
        labels = [0, 1]
        metric = Recall()

        evaluation = Evaluation(predicted_results, actual_results, labels)
        results = evaluation.run(metric)

        expected_1 = 0.75
        expected_0 = 1

        self.assertEquals(expected_1, results[1])
        self.assertEquals(expected_0, results[0])

        predicted_results = array([[0], [0], [1], [0], [0]])
        actual_results = array([[1], [1], [0], [1], [1]])

        evaluation = Evaluation(predicted_results, actual_results, labels)
        results = evaluation.run(metric)

        expected_1 = 0
        expected_0 = 0

        self.assertEquals(expected_1, results[1])
        self.assertEquals(expected_0, results[0])
    def test_accuracy(self):
        predicted_results = array([[1], [1], [0], [1], [0], [1]])
        actual_results = array([[1], [1], [1], [0], [0], [1]])
        labels = [0, 1]
        metric = Accuracy()

        evaluation = Evaluation(predicted_results, actual_results, labels)
        results = evaluation.run(metric)

        expected_1, expected_0 = 0.625, 0.625

        self.assertAlmostEqual(expected_1, results[1])
        self.assertEqual(expected_0, results[0])
    def test_accuracy(self):
        predicted_results = array([[1], [1], [0], [1], [0], [1]])
        actual_results = array([[1], [1], [1], [0], [0], [1]])
        labels = [0, 1]
        metric = Accuracy()

        evaluation = Evaluation(predicted_results, actual_results, labels)
        results = evaluation.run(metric)

        expected_1, expected_0 = 0.625, 0.625

        self.assertAlmostEqual(expected_1, results[1])
        self.assertEqual(expected_0, results[0])
    def test_f2_score(self):
        predicted_results = array([[1], [1], [0], [1], [0], [1]])
        actual_results = array([[1], [1], [1], [0], [1], [1]])
        labels = [0, 1]
        metric = F_score(2)

        evaluation = Evaluation(predicted_results, actual_results, labels)
        results = evaluation.run(metric)

        expected_1 = 0.625
        expected_0 = 0

        self.assertAlmostEqual(expected_1, results[1])
        self.assertEqual(expected_0, results[0])
    def test_simple_accuracy(self):
        predicted_results = array([[1], [1], [0], [0]])
        actual_results = array([[1], [1], [0], [1]])
        labels = [0, 1]
        metric = SimpleAccuracy()

        evaluation = Evaluation(predicted_results, actual_results, labels)
        results = evaluation.run(metric)

        expected_1 = 0.75
        expected_0 = 0.75

        self.assertEquals(expected_1, results[1])
        self.assertEquals(expected_0, results[0])
    def test_f2_score(self):
        predicted_results = array([[1], [1], [0], [1], [0], [1]])
        actual_results = array([[1], [1], [1], [0], [1], [1]])
        labels = [0, 1]
        metric = F_score(2)

        evaluation = Evaluation(predicted_results, actual_results, labels)
        results = evaluation.run(metric)

        expected_1 = 0.625
        expected_0 = 0

        self.assertAlmostEqual(expected_1, results[1])
        self.assertEqual(expected_0, results[0])
    def test_fpr(self):
        predicted_results = array([[1], [1], [0], [1], [0]])
        actual_results = array([[0], [0], [0], [1], [0]])
        labels = [0, 1]
        metric = FPR()

        evaluation = Evaluation(predicted_results, actual_results, labels)
        results = evaluation.run(metric)

        expected_1 = 0.5
        expected_0 = 0

        self.assertEquals(expected_1, results[1])
        self.assertEquals(expected_0, results[0])
    def test_simple_accuracy(self):
        predicted_results = array([[1], [1], [0], [0]])
        actual_results = array([[1], [1], [0], [1]])
        labels = [0, 1]
        metric = SimpleAccuracy()

        evaluation = Evaluation(predicted_results, actual_results, labels)
        results = evaluation.run(metric)

        expected_1 = 0.75
        expected_0 = 0.75

        self.assertEquals(expected_1, results[1])
        self.assertEquals(expected_0, results[0])
    def test_precision(self):
        predicted_results = array([[1], [1], [0], [0]])
        actual_results = array([[1], [1], [0], [1]])
        labels = [0, 1]
        metric = Precision()

        evaluation = Evaluation(predicted_results, actual_results, labels)
        results = evaluation.run(metric)

        expected_1 = 1
        expected_0 = 0.5

        self.assertEquals(expected_1, results[1])
        self.assertEquals(expected_0, results[0])

        predicted_results = array([[0], [0], [0], [0]])

        evaluation = Evaluation(predicted_results, actual_results, labels)
        results = evaluation.run(metric)

        expected_1 = 0
        expected_0 = 0.25

        self.assertEquals(expected_1, results[1])
        self.assertEquals(expected_0, results[0])
 def create_evaluation(self, predicted_values, real_values, labels):
     return Evaluation(predicted_values, real_values, labels)