示例#1
0
 def test_binary(self):
     c = ConfusionMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
     bs = c.binary()
     self.assertTrue(
         np.all(bs[0].asarray() == np.array([[1, 5], [11, 28]])))
     self.assertTrue(
         np.all(bs[1].asarray() == np.array([[5, 10], [10, 20]])))
     self.assertTrue(
         np.all(bs[2].asarray() == np.array([[9, 15], [9, 12]])))
     for b in bs:
         # self.assertIsInstance(b, BinaryConfusionMatrix)
         self.assertTrue(isinstance(b, BinaryConfusionMatrix))
示例#2
0
 def test_rates(self):
     c = ConfusionMatrix([[8, 2, 1], [1, 4, 3], [1, 2, 7]])
     self.assertTrue(np.all(c.classification_rates == [8.0 / 11, 0.5, 0.7]))
     self.assertTrue(np.all(c.error_rates == [3.0 / 11, 0.5, 0.3]))
     self.assertAlmostEqual(c.balanced_error_rate,
                            np.mean([3.0 / 11, 0.5, 0.3]))
     self.assertAlmostEqual(c.balanced_classification_rate,
                            np.mean([8.0 / 11, 0.5, 0.7]))
     self.assertAlmostEqual(c.classification_accuracy, 19.0 / 29.0)
     self.assertAlmostEqual(c.classification_error_rate, 10.0 / 29.0)
示例#3
0
    def test_constructor_labels(self):
        c = ConfusionMatrix([[1, 2], [3, 4]])
        self.assertEqual(c.labels, [0, 1])

        c = ConfusionMatrix([[1, 2], [3, 4]], labels=['a', 'b'])
        self.assertEqual(c.labels, ['a', 'b'])

        # with self.assertRaises(RuntimeError) as cm:
        #     c = ConfusionMatrix([[1, 2], [3, 4]], labels=[1, 2, 3])
        # self.assertEqual(cm.exception.message, "Number of class labels does not equal number of classes - number of labels is 3, number of classes is 2")
        def func():
            c = ConfusionMatrix([[1, 2], [3, 4]], labels=[1, 2, 3])

        self.assertRaises(RuntimeError, func)

        # with self.assertRaises(RuntimeError) as cm:
        #     c = ConfusionMatrix([[1, 2], [3, 4]], labels=[1, 1])
        # self.assertEqual(cm.exception.message, "Class labels are not unique - labels are [1, 1]")
        def func():
            c = ConfusionMatrix([[1, 2], [3, 4]], labels=[1, 1])

        self.assertRaises(RuntimeError, func)
示例#4
0
    def test_from_samples(self):
        i = [
            np.atleast_2d(np.array([0, 1])).T,
            np.atleast_2d(np.array([1, 2, 2])).T
        ]
        t = [
            np.atleast_2d(np.array([0, 0])).T,
            np.atleast_2d(np.array([2, 2, 2])).T
        ]
        c = ConfusionMatrix.from_samples(3, i, t)

        self.assertTrue(np.all(c.truth == [0.5, 0, 0.5]))
        self.assertTrue(
            np.all(
                c.detections == [0.5 / 2, (0.5 + 1.0 / 3) / 2, (2.0 / 3) / 2]))
        self.assertTrue(np.all(c.correct == [0.5 / 2, 0, (2.0 / 3) / 2]))

        for result, truth in zip(c.incorrect, [0.5 / 2, 0, 1.0 / 6]):
            self.assertAlmostEqual(result, truth)
示例#5
0
    def test_from_data(self):
        # with self.assertRaises(RuntimeError) as cm:
        #     i = np.atleast_2d(np.array([0, 1])).T
        #     t = np.atleast_2d(np.array([2])).T
        #     c = ConfusionMatrix.from_data(3, i, t)
        # self.assertEqual(cm.exception.message, "Output and target data should have the same shape")
        def func():
            i = np.atleast_2d(np.array([0, 1])).T
            t = np.atleast_2d(np.array([2])).T
            c = ConfusionMatrix.from_data(3, i, t)

        self.assertRaises(RuntimeError, func)

        i = np.atleast_2d(np.array([0, 0, 1, 2, 0, 0, 2, 2, 1, 2])).T
        t = np.atleast_2d(np.array([0, 0, 0, 1, 1, 0, 2, 2, 1, 1])).T
        c = ConfusionMatrix.from_data(3, i, t)

        self.assertTrue(np.all(c.truth == [4, 4, 2]))
        self.assertTrue(np.all(c.detections == [4, 2, 4]))
        self.assertTrue(np.all(c.correct == [3, 1, 2]))
        self.assertTrue(np.all(c.incorrect == [1, 3, 0]))
示例#6
0
 def test_asarray(self):
     c = ConfusionMatrix([[1, 2], [3, 4]])
     self.assertTrue(np.all(c.asarray() == np.array([[1, 2], [3, 4]])))
示例#7
0
 def func():
     c = ConfusionMatrix([[1, 2], [3, 4]], labels=[1, 1])
示例#8
0
 def func():
     c = ConfusionMatrix([[1, 2, 3], [4, 5, 6]])
示例#9
0
        ytest.append(flow(xtest))

    pylab.subplot(n_subplots_x, n_subplots_y, 2)
    pylab.plot(reservoir.inspect()[0])
    pylab.title("Sample reservoir states")
    pylab.xlabel("Timestep")
    pylab.ylabel("Activation")

    print "Error : " + str(mdp.numx.mean([loss_01_time(sample, target) for (sample, target) in zip(ytest, outputs[n_train_samples:])]))

    ymean = sp.array([sp.argmax(mdp.numx.atleast_2d(mdp.numx.mean(sample, axis=0))) for sample in
                      outputs[n_train_samples:]])
    ytestmean = sp.array([sp.argmax(mdp.numx.atleast_2d(mdp.numx.mean(sample, axis=0))) for sample in ytest])

    # use ConfusionMatrix to compute some more information about the 
    confusion_matrix = ConfusionMatrix.from_data(10, ytestmean, ymean) # 10 classes
    print "Error rate: %.4f" % confusion_matrix.error_rate # this comes down to 0-1 loss
    print "Balanced error rate: %.4f" % confusion_matrix.ber
    print

    # compute precision and recall for each class vs. all others
    print "Per-class precision and recall"
    binary_confusion_matrices = confusion_matrix.binary()
    for c in range(10):
        m = binary_confusion_matrices[c]
        print "label %d - precision: %.2f, recall %.2f" % (c, m.precision, m.recall)
    print

    # properties of the ConfusionMatrix and BinaryConfusionMatrix classes can also be used
    # as error measure functions, as follows:
    ber = ConfusionMatrix.error_measure('ber', 10) # 10-class balanced error rate
示例#10
0
 def test_subsets(self):
     c = ConfusionMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
     self.assertTrue(np.all(c.correct == [1, 5, 9]))
     self.assertTrue(np.all(c.incorrect == [5, 10, 15]))
     self.assertTrue(np.all(c.detections == [12, 15, 18]))
     self.assertTrue(np.all(c.ground_truth == [6, 15, 24]))
示例#11
0
 def test_balance(self):
     c = ConfusionMatrix([[1, 3], [2, 2]])
     cb = c.balance()
     self.assertTrue(
         np.all(cb.asarray() == np.array([[0.25, 0.75], [0.5, 0.5]])))
示例#12
0
 def func():
     i = np.atleast_2d(np.array([0, 1])).T
     t = np.atleast_2d(np.array([2])).T
     c = ConfusionMatrix.from_data(3, i, t)
示例#13
0
文件: task2.py 项目: grezesf/Research
# testout = flow(test_inputs[0])
# trainout = flow(train_inputs[0])
# testout = flow(test_inputs[0])
# print "shape of testout: ", numpy.shape(testout)

import scipy as sp
from Oger.utils import ConfusionMatrix, plot_conf

ytest = []
for xtest in test_inputs:
    ytest.append(opt_flow(xtest))
ymean = sp.atleast_2d(sp.array([sp.argmax(mdp.numx.atleast_2d(mdp.numx.mean(sample, axis=0))) for sample in test_targets])).T
ytestmean = sp.atleast_2d(sp.array([sp.argmax(mdp.numx.atleast_2d(mdp.numx.mean(sample, axis=0))) for sample in ytest])).T

# use ConfusionMatrix to compute some more information about the
confusion_matrix = ConfusionMatrix.from_data(61, ytestmean , ymean) # 61 classes
print "Error rate: %.4f" % confusion_matrix.error_rate # this comes down to 0-1 loss
print "Balanced error rate: %.4f" % confusion_matrix.ber
print

# compute precision and recall for each class vs. all others
print "Per-class precision and recall"
binary_confusion_matrices = confusion_matrix.binary()
for c in range(61):
    m = binary_confusion_matrices[c]
    print "label %d - precision: %.2f, recall %.2f" % (c, m.precision, m.recall)
print

# properties of the ConfusionMatrix and BinaryConfusionMatrix classes can also be used
# as error measure functions, as follows:
ber = ConfusionMatrix.error_measure('ber', 61) # 61-class balanced error rate
示例#14
0
            for (sample, target) in zip(ytest, outputs[n_train_samples:])
        ]))

    ymean = sp.atleast_2d(
        sp.array([
            sp.argmax(mdp.numx.atleast_2d(mdp.numx.mean(sample, axis=0)))
            for sample in outputs[n_train_samples:]
        ])).T
    ytestmean = sp.atleast_2d(
        sp.array([
            sp.argmax(mdp.numx.atleast_2d(mdp.numx.mean(sample, axis=0)))
            for sample in ytest
        ])).T

    # use ConfusionMatrix to compute some more information about the
    confusion_matrix = ConfusionMatrix.from_data(10, ytestmean,
                                                 ymean)  # 10 classes
    print "Error rate: %.4f" % confusion_matrix.error_rate  # this comes down to 0-1 loss
    print "Balanced error rate: %.4f" % confusion_matrix.ber
    print

    # compute precision and recall for each class vs. all others
    print "Per-class precision and recall"
    binary_confusion_matrices = confusion_matrix.binary()
    for c in range(10):
        m = binary_confusion_matrices[c]
        print "label %d - precision: %.2f, recall %.2f" % (c, m.precision,
                                                           m.recall)
    print

    # properties of the ConfusionMatrix and BinaryConfusionMatrix classes can also be used
    # as error measure functions, as follows:
示例#15
0
 def test_add(self):
     c1 = ConfusionMatrix([[1, 2], [3, 4]])
     c2 = ConfusionMatrix([[3, 4], [1, 2]])
     s = c1 + c2
     t = ConfusionMatrix([[4, 6], [4, 6]])
     self.assertTrue(np.all(s.asarray() == t.asarray()))
示例#16
0
 def test_normalise(self):
     c = ConfusionMatrix([[1, 2], [3, 4]])
     cn = c.normalise()
     self.assertTrue(
         np.all(cn.asarray() == np.array([[0.1, 0.2], [0.3, 0.4]])))
示例#17
0
 def test_error_measure(self):
     ber = ConfusionMatrix.error_measure('ber', 3)
     i = np.atleast_2d(np.array([0, 0, 1, 2, 0, 0, 2, 2, 1, 2])).T
     t = np.atleast_2d(np.array([0, 0, 0, 1, 1, 0, 2, 2, 1, 1])).T
     self.assertAlmostEqual(ber(i, t), 1.0 / 3)
示例#18
0
 def test_properties(self):
     c = ConfusionMatrix([[1, 2], [3, 4]])
     cn = c.normalise()
     self.assertEqual(c.num_classes, 2)
     self.assertEqual(c.total, 10)
     self.assertEqual(cn.total, 1)
示例#19
0
 def test_sparse(self):
     c = ConfusionMatrix([[5, 0, 0], [0, 0, 0], [1, 0, 3]])
     self.assertAlmostEqual(c.ber, 0.25 / 3)
示例#20
0
 def func():
     c = ConfusionMatrix([1, 2, 3])
示例#21
0
ytest = []
for xtest in test_inputs:
    ytest.append(opt_flow(xtest))
ymean = sp.atleast_2d(
    sp.array([
        sp.argmax(mdp.numx.atleast_2d(mdp.numx.mean(sample, axis=0)))
        for sample in test_targets
    ])).T
ytestmean = sp.atleast_2d(
    sp.array([
        sp.argmax(mdp.numx.atleast_2d(mdp.numx.mean(sample, axis=0)))
        for sample in ytest
    ])).T

# use ConfusionMatrix to compute some more information about the
confusion_matrix = ConfusionMatrix.from_data(61, ytestmean,
                                             ymean)  # 61 classes
print "Error rate: %.4f" % confusion_matrix.error_rate  # this comes down to 0-1 loss
print "Balanced error rate: %.4f" % confusion_matrix.ber
print

# compute precision and recall for each class vs. all others
print "Per-class precision and recall"
binary_confusion_matrices = confusion_matrix.binary()
for c in range(61):
    m = binary_confusion_matrices[c]
    print "label %d - precision: %.2f, recall %.2f" % (c, m.precision,
                                                       m.recall)
print

# properties of the ConfusionMatrix and BinaryConfusionMatrix classes can also be used
# as error measure functions, as follows: