def test_whenNoPredictionIsCorrectAccuracyIs0(self):
   rewardClasses = [0,1]
   confusionMatrix = ConfusionMatrix(rewardClasses)
   realReward = 0
   predictedReward = 1
   confusionMatrix.addObservation(realReward, predictedReward)
   
   self.assertEqual(0, confusionMatrix.accuracy())
 def test_whenAllPredictionsAreCorrectAccuracyIs100(self):
   rewardClasses = [0,1]
   confusionMatrix = ConfusionMatrix(rewardClasses)
   realReward = 0
   predictedReward = 0
   confusionMatrix.addObservation(realReward, predictedReward)
   
   self.assertEqual(100, confusionMatrix.accuracy())
  def test_whenObservationIsAddedCountIncreasesByOne(self):
    rewardClasses = [0,1]
    confusionMatrix = ConfusionMatrix(rewardClasses)
    realReward = 0
    predictedReward = 0

    confusionMatrix.addObservation(realReward, predictedReward)
    
    self.assertEqual(1, confusionMatrix.confusionMatrix[realReward][predictedReward])
예제 #4
0
def test_forest_list(forest_list, cv, database):

    classes = db.get_target_attribute_values()
    f1measure = 0

    for i, forest in enumerate(forest_list):
        test_set = cv.folds[i]

        (real_labels, pred_labels) = forest.test(test_set, debug)
        confusion_matrix = ConfusionMatrix.create_confusion_matrix(
            real_labels, pred_labels, classes)
        f1measure += Metrics.f1measure(confusion_matrix)

        if debug:
            print('Testing the forest with the dataset:')
            print(test_set)
            print('Test Results:')

            print('%d: ' % (i + 1))
            print(confusion_matrix)

    f1measure /= len(forest_list)
    if debug:
        print('Mean F1-Measure: ', f1measure)

    return f1measure
  def test_whenConfusionMatrixAreMergedCountsAreAdded(self):
    rewardClasses = [0,1]
    confusionMatrix1 = ConfusionMatrix(rewardClasses)
    confusionMatrix2 = ConfusionMatrix(rewardClasses)
    realReward = 0
    predictedReward = 0
    confusionMatrix2.addObservation(realReward, predictedReward)

    confusionMatrix1.merge(confusionMatrix2)

    self.assertEqual(1, confusionMatrix1.confusionMatrix[realReward][predictedReward])
예제 #6
0
    real_classes = [
        'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c1',
        'c1', 'c1', 'c1', 'c2', 'c2', 'c2', 'c2', 'c2', 'c3', 'c3', 'c3', 'c3',
        'c3', 'c3', 'c3', 'c3', 'c3', 'c3'
    ]

    pred_classes_array = [
        'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c1', 'c2',
        'c3', 'c3', 'c3', 'c2', 'c2', 'c2', 'c2', 'c1', 'c3', 'c3', 'c3', 'c3',
        'c3', 'c3', 'c1', 'c1', 'c2', 'c2'
    ]

    classes = ['c1', 'c2', 'c3']

    confusion_matrix = ConfusionMatrix.create_confusion_matrix(
        real_classes, pred_classes_array, classes)
    sklean_confusion_matrix = metrics.confusion_matrix(real_classes,
                                                       pred_classes_array,
                                                       classes)

    print(confusion_matrix)
    print(sklean_confusion_matrix)

    f1measure = f1measure(confusion_matrix)
    sklearn_f1measure = metrics.precision_recall_fscore_support(
        real_classes, pred_classes_array, average='macro')

    print('f1measure\t', f1measure)
    print('sk fb_score\t', sklearn_f1measure[2])