def test_accuracy_metric_4_multilabel_classification(self):
        # 100% correct
        expected = np.array([[0, 1, 1], [0, 1, 1], [1, 0, 0], [1, 0, 0],
                             [0, 1, 1], [1, 0, 0], [0, 1, 1], [1, 0, 0],
                             [0, 1, 1], [1, 0, 0]])
        prediction = expected.copy()
        score = acc_metric(expected,
                           prediction.astype(float),
                           task=MULTILABEL_CLASSIFICATION)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (prediction.copy() - 1) * -1
        score = acc_metric(expected,
                           prediction.astype(float),
                           task=MULTILABEL_CLASSIFICATION)
        self.assertAlmostEqual(-1, score)

        # Pseudorandom
        prediction = np.array([[0.0, 0.0, 0.0], [0.0, 0.0,
                                                 0.0], [0.0, 0.0, 0.0],
                               [0.0, 0.0, 0.0], [0.0, 0.0,
                                                 0.0], [1.0, 1.0, 1.0],
                               [1.0, 1.0, 1.0], [1.0, 1.0, 1.0],
                               [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
        score = acc_metric(expected,
                           prediction,
                           task=MULTILABEL_CLASSIFICATION)
        self.assertAlmostEqual(-0.0666666666, score)
    def test_accuracy_metric_4_multiclass_classification(self):
        # 100% correct
        expected = np.array([1, 1, 0, 0, 1, 0, 2, 0, 2, 1])
        prediction = np.array([[0.0, 1.0, 0.0], [0.0, 1.0,
                                                 0.0], [1.0, 0.0, 0.0],
                               [1.0, 0.0, 0.0], [0.0, 1.0,
                                                 0.0], [1.0, 0.0, 0.0],
                               [0.0, 0.0, 1.0], [1.0, 0.0, 0.0],
                               [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]])
        score = acc_metric(expected,
                           prediction,
                           task=MULTICLASS_CLASSIFICATION)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (prediction.copy() - 1) * -1
        score = acc_metric(expected,
                           prediction,
                           task=MULTICLASS_CLASSIFICATION)
        self.assertAlmostEqual(-0.5, score)

        # Pseudorandom
        prediction = np.array([[1.0, 0.0, 0.0], [0.0, 1.0,
                                                 0.0], [0.0, 0.0, 1.0],
                               [1.0, 0.0, 0.0], [0.0, 1.0,
                                                 0.0], [0.0, 0.0, 1.0],
                               [1.0, 0.0, 0.0], [0.0, 1.0, 0.0],
                               [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])
        score = acc_metric(expected,
                           prediction,
                           task=MULTICLASS_CLASSIFICATION)
        self.assertAlmostEqual(0.1, score)
Exemplo n.º 3
0
    def test_accuracy_metric_4_binary_classification(self):
        # 100% correct
        expected = np.array([0, 1, 1, 1, 0, 0, 1, 1, 1, 0]).reshape((-1, 1))
        prediction = expected.copy()
        score = acc_metric(expected, prediction)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (expected.copy() - 1) * -1
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(-1, score)

        # Random
        prediction = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(0, score)
Exemplo n.º 4
0
    def test_accuracy_metric_4_binary_classification(self):
        # 100% correct
        expected = np.array([0, 1, 1, 1, 0, 0, 1, 1, 1, 0]).reshape((-1, 1))
        prediction = expected.copy()
        score = acc_metric(expected, prediction)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (expected.copy() - 1) * -1
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(-1, score)

        # Random
        prediction = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(0, score)
    def test_accuracy_metric_4_binary_classification(self):
        # 100% correct
        expected = np.array([0, 1, 1, 1, 0, 0, 1, 1, 1, 0]).reshape((-1, 1))
        prediction = np.array([[1., 0.], [0., 1.], [0., 1.], [0., 1.],
                               [1., 0.], [1., 0.], [0., 1.], [0., 1.],
                               [0., 1.], [1., 0.]])
        score = acc_metric(expected, prediction, task=BINARY_CLASSIFICATION)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (prediction.copy() - 1) * -1
        score = acc_metric(expected, prediction, task=BINARY_CLASSIFICATION)
        self.assertAlmostEqual(-1, score)

        # Random
        prediction = np.array([[1., 0.], [1., 0.], [1., 0.], [1., 0.], [1., 0.],
                               [0., 1.], [0., 1.], [0., 1.], [0., 1.], [0., 1.]])
        score = acc_metric(expected, prediction, task=BINARY_CLASSIFICATION)
        self.assertAlmostEqual(0, score)
Exemplo n.º 6
0
    def test_accuracy_metric_4_multilabel_classification(self):
        # 100% correct
        expected = np.array(
            [[0, 0, 1, 1, 0, 1, 0, 1, 0, 1], [1, 1, 0, 0, 1, 0, 1, 0, 1, 0], [1, 1, 0, 0, 1, 0, 1, 0, 1, 0]]
        )
        prediction = expected.copy()
        score = acc_metric(expected, prediction)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (expected.copy() - 1) * -1
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(-1, score)

        # Pseudorandom
        prediction = np.array(
            [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]]
        )
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(-0.0666666666, score)
Exemplo n.º 7
0
    def test_accuracy_metric_4_multilabel_classification(self):
        # 100% correct
        expected = np.array([[0, 0, 1, 1, 0, 1, 0, 1, 0, 1],
                             [1, 1, 0, 0, 1, 0, 1, 0, 1, 0],
                             [1, 1, 0, 0, 1, 0, 1, 0, 1, 0]])
        prediction = expected.copy()
        score = acc_metric(expected, prediction)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (expected.copy() - 1) * -1
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(-1, score)

        # Pseudorandom
        prediction = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
                               [0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
                               [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
        score = acc_metric(expected, prediction)
        self.assertAlmostEqual(-0.0666666666, score)
    def test_accuracy_metric_4_binary_classification(self):
        # 100% correct
        expected = np.array([0, 1, 1, 1, 0, 0, 1, 1, 1, 0]).reshape((-1, 1))
        prediction = np.array([[1., 0.], [0., 1.], [0., 1.], [0.,
                                                              1.], [1., 0.],
                               [1., 0.], [0., 1.], [0., 1.], [0., 1.],
                               [1., 0.]])
        score = acc_metric(expected, prediction, task=BINARY_CLASSIFICATION)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (prediction.copy() - 1) * -1
        score = acc_metric(expected, prediction, task=BINARY_CLASSIFICATION)
        self.assertAlmostEqual(-1, score)

        # Random
        prediction = np.array([[1., 0.], [1., 0.], [1., 0.], [1.,
                                                              0.], [1., 0.],
                               [0., 1.], [0., 1.], [0., 1.], [0., 1.],
                               [0., 1.]])
        score = acc_metric(expected, prediction, task=BINARY_CLASSIFICATION)
        self.assertAlmostEqual(0, score)
    def test_accuracy_metric_4_multilabel_classification(self):
        # 100% correct
        expected = np.array([[0, 1, 1], [0, 1, 1], [1, 0, 0], [1, 0, 0],
                             [0, 1, 1], [1, 0, 0], [0, 1, 1], [1, 0, 0],
                             [0, 1, 1], [1, 0, 0]])
        prediction = expected.copy()
        score = acc_metric(expected, prediction.astype(float),
                           task=MULTILABEL_CLASSIFICATION)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (prediction.copy() - 1) * -1
        score = acc_metric(expected, prediction.astype(float),
                           task=MULTILABEL_CLASSIFICATION)
        self.assertAlmostEqual(-1, score)

        # Pseudorandom
        prediction = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0],
                               [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 1.0, 1.0],
                               [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0],
                               [1.0, 1.0, 1.0]])
        score = acc_metric(expected, prediction, task=MULTILABEL_CLASSIFICATION)
        self.assertAlmostEqual(-0.0666666666, score)
    def test_accuracy_metric_4_multiclass_classification(self):
        # 100% correct
        expected = np.array([1, 1, 0, 0, 1, 0, 2, 0, 2, 1])
        prediction = np.array([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0],
                               [1.0, 0.0, 0.0], [1.0, 0.0, 0.0],
                               [0.0, 1.0, 0.0], [1.0, 0.0, 0.0],
                               [0.0, 0.0, 1.0], [1.0, 0.0, 0.0],
                               [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]])
        score = acc_metric(expected, prediction, task=MULTICLASS_CLASSIFICATION)
        self.assertEqual(1, score)

        # 100% incorrect
        prediction = (prediction.copy() - 1) * -1
        score = acc_metric(expected, prediction, task=MULTICLASS_CLASSIFICATION)
        self.assertAlmostEqual(-0.5, score)

        # Pseudorandom
        prediction = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0],
                               [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0],
                               [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0],
                               [1.0, 0.0, 0.0]])
        score = acc_metric(expected, prediction, task=MULTICLASS_CLASSIFICATION)
        self.assertAlmostEqual(0.1, score)