def test_Compare(self):
     mxKFoldRun = MxKFoldRun(5, 2)
     combined5x2t = Combined5x2t()
     experimentPerformance1 = mxKFoldRun.execute(
         Experiment(C45(), C45Parameter(1, True, 0.2), self.iris))
     experimentPerformance2 = mxKFoldRun.execute(
         Experiment(LinearPerceptron(),
                    LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100),
                    self.iris))
     self.assertAlmostEqual(
         0.186,
         combined5x2t.compare(experimentPerformance1,
                              experimentPerformance2).getPValue(), 3)
     experimentPerformance1 = mxKFoldRun.execute(
         Experiment(C45(), C45Parameter(1, True, 0.2), self.tictactoe))
     experimentPerformance2 = mxKFoldRun.execute(
         Experiment(Bagging(), BaggingParameter(1, 50), self.tictactoe))
     self.assertAlmostEqual(
         0.0000059,
         combined5x2t.compare(experimentPerformance1,
                              experimentPerformance2).getPValue(), 7)
     experimentPerformance1 = mxKFoldRun.execute(
         Experiment(Lda(), Parameter(1), self.dermatology))
     experimentPerformance2 = mxKFoldRun.execute(
         Experiment(LinearPerceptron(),
                    LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100),
                    self.dermatology))
     self.assertAlmostEqual(
         0.9819,
         combined5x2t.compare(experimentPerformance1,
                              experimentPerformance2).getPValue(), 4)
     experimentPerformance1 = mxKFoldRun.execute(
         Experiment(Dummy(), Parameter(1), self.nursery))
     experimentPerformance2 = mxKFoldRun.execute(
         Experiment(NaiveBayes(), Parameter(1), self.nursery))
     self.assertAlmostEqual(
         0.0,
         combined5x2t.compare(experimentPerformance1,
                              experimentPerformance2).getPValue(), 4)
     experimentPerformance1 = mxKFoldRun.execute(
         Experiment(NaiveBayes(), Parameter(1), self.car))
     experimentPerformance2 = mxKFoldRun.execute(
         Experiment(Bagging(), BaggingParameter(1, 50), self.car))
     self.assertAlmostEqual(
         0.00043,
         combined5x2t.compare(experimentPerformance1,
                              experimentPerformance2).getPValue(), 5)
     experimentPerformance1 = mxKFoldRun.execute(
         Experiment(Knn(), KnnParameter(1, 3, EuclidianDistance()),
                    self.bupa))
     experimentPerformance2 = mxKFoldRun.execute(
         Experiment(Lda(), Parameter(1), self.bupa))
     self.assertAlmostEqual(
         0.0663,
         combined5x2t.compare(experimentPerformance1,
                              experimentPerformance2).getPValue(), 4)
 def test_LinearPerceptron(self):
     linearPerceptron = LinearPerceptron()
     linearPerceptronParameter = LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100)
     pca = Pca(self.iris)
     pca.convert()
     linearPerceptron.train(self.iris.getInstanceList(), linearPerceptronParameter)
     self.assertAlmostEqual(1.33, 100 * linearPerceptron.test(self.iris.getInstanceList()).getErrorRate(), 2)
     linearPerceptronParameter = LinearPerceptronParameter(1, 0.01, 0.99, 0.2, 100)
     pca = Pca(self.bupa)
     pca.convert()
     linearPerceptron.train(self.bupa.getInstanceList(), linearPerceptronParameter)
     self.assertAlmostEqual(27.54, 100 * linearPerceptron.test(self.bupa.getInstanceList()).getErrorRate(), 2)
     pca = Pca(self.dermatology)
     pca.convert()
     linearPerceptron.train(self.dermatology.getInstanceList(), linearPerceptronParameter)
     self.assertAlmostEqual(3.28, 100 * linearPerceptron.test(self.dermatology.getInstanceList()).getErrorRate(), 2)
    def train(self, trainSet: InstanceList,
              parameters: LinearPerceptronParameter):
        """
        Training algorithm for the linear perceptron algorithm. 20 percent of the data is separated as cross-validation
        data used for selecting the best weights. 80 percent of the data is used for training the linear perceptron with
        gradient descent.

        PARAMETERS
        ----------
        trainSet : InstanceList
            Training data given to the algorithm
        parameters : LinearPerceptronParameter
            Parameters of the linear perceptron.
        """
        partition = Partition(trainSet, parameters.getCrossValidationRatio(),
                              parameters.getSeed(), True)
        self.model = LinearPerceptronModel(partition.get(1), partition.get(0),
                                           parameters)
 def test_LinearPerceptron(self):
     linearPerceptron = LinearPerceptron()
     linearPerceptronParameter = LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100)
     discreteToIndexed = LaryToBinary(self.car)
     discreteToIndexed.convert()
     linearPerceptron.train(self.car.getInstanceList(), linearPerceptronParameter)
     self.assertAlmostEqual(29.98, 100 * linearPerceptron.test(self.car.getInstanceList()).getErrorRate(), 2)
     discreteToIndexed = LaryToBinary(self.tictactoe)
     discreteToIndexed.convert()
     linearPerceptron.train(self.tictactoe.getInstanceList(), linearPerceptronParameter)
     self.assertAlmostEqual(34.66, 100 * linearPerceptron.test(self.tictactoe.getInstanceList()).getErrorRate(), 2)
    def __init__(self, trainSet: InstanceList, validationSet: InstanceList,
                 parameters: LinearPerceptronParameter):
        """
        Constructor that takes InstanceLists as trainsSet and validationSet. Initially it allocates layer weights,
        then creates an input vector by using given trainSet and finds error. Via the validationSet it finds the
        classification performance and at the end it reassigns the allocated weight Matrix with the matrix that has the
        best accuracy.

        PARAMETERS
        ----------
        trainSet : InstanceList
            InstanceList that is used to train.
        validationSet : InstanceList
            InstanceList that is used to validate.
        parameters : LinearPerceptronParameter
            Linear perceptron parameters; learningRate, etaDecrease, crossValidationRatio, epoch.
        """
        super().__init__(trainSet)
        self.W = self.allocateLayerWeights(self.K, self.d + 1,
                                           parameters.getSeed())
        bestW = copy.deepcopy(self.W)
        bestClassificationPerformance = ClassificationPerformance(0.0)
        epoch = parameters.getEpoch()
        learningRate = parameters.getLearningRate()
        for i in range(epoch):
            trainSet.shuffle(parameters.getSeed())
            for j in range(trainSet.size()):
                self.createInputVector(trainSet.get(j))
                rMinusY = self.calculateRMinusY(trainSet.get(j), self.x,
                                                self.W)
                deltaW = Matrix(rMinusY, self.x)
                deltaW.multiplyWithConstant(learningRate)
                self.W.add(deltaW)
            currentClassificationPerformance = self.testClassifier(
                validationSet)
            if currentClassificationPerformance.getAccuracy(
            ) > bestClassificationPerformance.getAccuracy():
                bestClassificationPerformance = currentClassificationPerformance
                bestW = copy.deepcopy(self.W)
            learningRate *= parameters.getEtaDecrease()
        self.W = bestW
Example #6
0
 def test_Compare(self):
     kFoldRun = KFoldRun(10)
     pairedt = Pairedt()
     experimentPerformance1 = kFoldRun.execute(Experiment(C45(), C45Parameter(1, True, 0.2), self.iris))
     experimentPerformance2 = kFoldRun.execute(Experiment(LinearPerceptron(), LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100), self.iris))
     self.assertAlmostEqual(0.379, pairedt.compare(experimentPerformance1, experimentPerformance2).getPValue(), 3)
     experimentPerformance1 = kFoldRun.execute(Experiment(C45(), C45Parameter(1, True, 0.2), self.tictactoe))
     experimentPerformance2 = kFoldRun.execute(Experiment(Bagging(), BaggingParameter(1, 50), self.tictactoe))
     self.assertAlmostEqual(0.00000692, pairedt.compare(experimentPerformance1, experimentPerformance2).getPValue(), 7)
     experimentPerformance1 = kFoldRun.execute(Experiment(Lda(), Parameter(1), self.dermatology))
     experimentPerformance2 = kFoldRun.execute(Experiment(LinearPerceptron(), LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100), self.dermatology))
     self.assertAlmostEqual(0.7842, pairedt.compare(experimentPerformance1, experimentPerformance2).getPValue(), 4)
     experimentPerformance1 = kFoldRun.execute(Experiment(Dummy(), Parameter(1), self.nursery))
     experimentPerformance2 = kFoldRun.execute(Experiment(NaiveBayes(), Parameter(1), self.nursery))
     self.assertAlmostEqual(0.0, pairedt.compare(experimentPerformance1, experimentPerformance2).getPValue(), 4)
     experimentPerformance1 = kFoldRun.execute(Experiment(NaiveBayes(), Parameter(1), self.car))
     experimentPerformance2 = kFoldRun.execute(Experiment(Bagging(), BaggingParameter(1, 50), self.car))
     self.assertAlmostEqual(0.00000336, pairedt.compare(experimentPerformance1, experimentPerformance2).getPValue(), 7)
     experimentPerformance1 = kFoldRun.execute(Experiment(Knn(), KnnParameter(1, 3, EuclidianDistance()), self.bupa))
     experimentPerformance2 = kFoldRun.execute(Experiment(Lda(), Parameter(1), self.bupa))
     self.assertAlmostEqual(0.1640, pairedt.compare(experimentPerformance1, experimentPerformance2).getPValue(), 4)
 def test_LinearPerceptron(self):
     linearPerceptron = LinearPerceptron()
     linearPerceptronParameter = LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100)
     normalize = Normalize(self.iris)
     normalize.convert()
     linearPerceptron.train(self.iris.getInstanceList(), linearPerceptronParameter)
     self.assertAlmostEqual(2.00, 100 * linearPerceptron.test(self.iris.getInstanceList()).getErrorRate(), 2)
     normalize = Normalize(self.bupa)
     normalize.convert()
     linearPerceptron.train(self.bupa.getInstanceList(), linearPerceptronParameter)
     self.assertAlmostEqual(26.67, 100 * linearPerceptron.test(self.bupa.getInstanceList()).getErrorRate(), 2)
     normalize = Normalize(self.dermatology)
     normalize.convert()
     linearPerceptron.train(self.dermatology.getInstanceList(), linearPerceptronParameter)
     self.assertAlmostEqual(1.91, 100 * linearPerceptron.test(self.dermatology.getInstanceList()).getErrorRate(), 2)
Example #8
0
 def test_LinearPerceptron(self):
     linearPerceptron = LinearPerceptron()
     linearPerceptronParameter = LinearPerceptronParameter(
         1, 0.1, 0.99, 0.2, 100)
     discreteToContinuous = DiscreteToContinuous(self.car)
     discreteToContinuous.convert()
     linearPerceptron.train(self.car.getInstanceList(),
                            linearPerceptronParameter)
     self.assertAlmostEqual(
         5.73, 100 *
         linearPerceptron.test(self.car.getInstanceList()).getErrorRate(),
         2)
     discreteToContinuous = DiscreteToContinuous(self.tictactoe)
     discreteToContinuous.convert()
     linearPerceptron.train(self.tictactoe.getInstanceList(),
                            linearPerceptronParameter)
     self.assertAlmostEqual(
         2.51, 100 * linearPerceptron.test(
             self.tictactoe.getInstanceList()).getErrorRate(), 2)
Example #9
0
 def test_Train(self):
     linearPerceptron = LinearPerceptron()
     linearPerceptronParameter = LinearPerceptronParameter(
         1, 0.1, 0.99, 0.2, 100)
     linearPerceptron.train(self.iris.getInstanceList(),
                            linearPerceptronParameter)
     self.assertAlmostEqual(
         1.33, 100 *
         linearPerceptron.test(self.iris.getInstanceList()).getErrorRate(),
         2)
     linearPerceptron.train(self.bupa.getInstanceList(),
                            linearPerceptronParameter)
     self.assertAlmostEqual(
         28.99, 100 *
         linearPerceptron.test(self.bupa.getInstanceList()).getErrorRate(),
         2)
     linearPerceptron.train(self.dermatology.getInstanceList(),
                            linearPerceptronParameter)
     self.assertAlmostEqual(
         4.37, 100 * linearPerceptron.test(
             self.dermatology.getInstanceList()).getErrorRate(), 2)
Example #10
0
 def test_Execute(self):
     mxKFoldRun = MxKFoldRun(5, 2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(C45(), C45Parameter(1, True, 0.2), self.iris))
     self.assertAlmostEqual(
         6.13, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(C45(), C45Parameter(1, True, 0.2), self.tictactoe))
     self.assertAlmostEqual(
         23.51,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(Knn(), KnnParameter(1, 3, EuclidianDistance()),
                    self.bupa))
     self.assertAlmostEqual(
         37.05,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(Knn(), KnnParameter(1, 3, EuclidianDistance()),
                    self.dermatology))
     self.assertAlmostEqual(
         15.41,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(Lda(), Parameter(1), self.bupa))
     self.assertAlmostEqual(
         34.72,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(Lda(), Parameter(1), self.dermatology))
     self.assertAlmostEqual(
         4.04, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(LinearPerceptron(),
                    LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100),
                    self.iris))
     self.assertAlmostEqual(
         5.2, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(LinearPerceptron(),
                    LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100),
                    self.dermatology))
     self.assertAlmostEqual(
         5.46, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(NaiveBayes(), Parameter(1), self.car))
     self.assertAlmostEqual(
         16.52,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(NaiveBayes(), Parameter(1), self.nursery))
     self.assertAlmostEqual(
         9.80, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(Bagging(), BaggingParameter(1, 50), self.tictactoe))
     self.assertAlmostEqual(
         8.77, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(Bagging(), BaggingParameter(1, 50), self.car))
     self.assertAlmostEqual(
         9.77, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(Dummy(), Parameter(1), self.nursery))
     self.assertAlmostEqual(
         67.09,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = mxKFoldRun.execute(
         Experiment(Dummy(), Parameter(1), self.iris))
     self.assertAlmostEqual(
         70.53,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
 def test_Execute(self):
     kFoldRun = KFoldRun(10)
     experimentPerformance = kFoldRun.execute(
         Experiment(C45(), C45Parameter(1, True, 0.2), self.iris))
     self.assertAlmostEqual(
         6.00, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = kFoldRun.execute(
         Experiment(C45(), C45Parameter(1, True, 0.2), self.tictactoe))
     self.assertAlmostEqual(
         18.78,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = kFoldRun.execute(
         Experiment(Knn(), KnnParameter(1, 3, EuclidianDistance()),
                    self.bupa))
     self.assertAlmostEqual(
         36.85,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = kFoldRun.execute(
         Experiment(Knn(), KnnParameter(1, 3, EuclidianDistance()),
                    self.dermatology))
     self.assertAlmostEqual(
         10.92,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = kFoldRun.execute(
         Experiment(Lda(), Parameter(1), self.bupa))
     self.assertAlmostEqual(
         31.61,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = kFoldRun.execute(
         Experiment(Lda(), Parameter(1), self.dermatology))
     self.assertAlmostEqual(
         3.30, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = kFoldRun.execute(
         Experiment(LinearPerceptron(),
                    LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100),
                    self.iris))
     self.assertAlmostEqual(
         5.33, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = kFoldRun.execute(
         Experiment(LinearPerceptron(),
                    LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100),
                    self.dermatology))
     self.assertAlmostEqual(
         3.81, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = kFoldRun.execute(
         Experiment(NaiveBayes(), Parameter(1), self.car))
     self.assertAlmostEqual(
         14.88,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = kFoldRun.execute(
         Experiment(NaiveBayes(), Parameter(1), self.nursery))
     self.assertAlmostEqual(
         9.71, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = kFoldRun.execute(
         Experiment(Bagging(), BaggingParameter(1, 50), self.tictactoe))
     self.assertAlmostEqual(
         3.55, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = kFoldRun.execute(
         Experiment(Bagging(), BaggingParameter(1, 50), self.car))
     self.assertAlmostEqual(
         6.77, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = kFoldRun.execute(
         Experiment(Dummy(), Parameter(1), self.nursery))
     self.assertAlmostEqual(
         67.12,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = kFoldRun.execute(
         Experiment(Dummy(), Parameter(1), self.iris))
     self.assertAlmostEqual(
         79.33,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
 def test_Execute(self):
     stratifiedMxKRun = StratifiedMxKFoldRun(5, 2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(C45(), C45Parameter(1, True, 0.2), self.iris))
     self.assertAlmostEqual(
         8.00, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(C45(), C45Parameter(1, True, 0.2), self.tictactoe))
     self.assertAlmostEqual(
         22.03,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(Knn(), KnnParameter(1, 3, EuclidianDistance()),
                    self.bupa))
     self.assertAlmostEqual(
         33.33,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(Knn(), KnnParameter(1, 3, EuclidianDistance()),
                    self.dermatology))
     self.assertAlmostEqual(
         13.66,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(Lda(), Parameter(1), self.bupa))
     self.assertAlmostEqual(
         33.05,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(Lda(), Parameter(1), self.dermatology))
     self.assertAlmostEqual(
         3.55, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(LinearPerceptron(),
                    LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100),
                    self.iris))
     self.assertAlmostEqual(
         6.00, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(LinearPerceptron(),
                    LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100),
                    self.dermatology))
     self.assertAlmostEqual(
         4.66, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(NaiveBayes(), Parameter(1), self.car))
     self.assertAlmostEqual(
         15.11,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(NaiveBayes(), Parameter(1), self.nursery))
     self.assertAlmostEqual(
         9.68, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(Bagging(), BaggingParameter(1, 50), self.tictactoe))
     self.assertAlmostEqual(
         9.29, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(Bagging(), BaggingParameter(1, 50), self.car))
     self.assertAlmostEqual(
         9.20, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(Dummy(), Parameter(1), self.nursery))
     self.assertAlmostEqual(
         66.67,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = stratifiedMxKRun.execute(
         Experiment(Dummy(), Parameter(1), self.iris))
     self.assertAlmostEqual(
         66.67,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
 def test_Execute(self):
     bootstrapRun = BootstrapRun(10)
     experimentPerformance = bootstrapRun.execute(
         Experiment(C45(), C45Parameter(1, True, 0.2), self.iris))
     self.assertAlmostEqual(
         4.33, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(C45(), C45Parameter(1, True, 0.2), self.tictactoe))
     self.assertAlmostEqual(
         13.16,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(Knn(), KnnParameter(1, 3, EuclidianDistance()),
                    self.bupa))
     self.assertAlmostEqual(
         24.84,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(Knn(), KnnParameter(1, 3, EuclidianDistance()),
                    self.dermatology))
     self.assertAlmostEqual(
         8.01, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(Lda(), Parameter(1), self.bupa))
     self.assertAlmostEqual(
         32.03,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(Lda(), Parameter(1), self.dermatology))
     self.assertAlmostEqual(
         2.95, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(LinearPerceptron(),
                    LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100),
                    self.iris))
     self.assertAlmostEqual(
         3.27, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(LinearPerceptron(),
                    LinearPerceptronParameter(1, 0.1, 0.99, 0.2, 100),
                    self.dermatology))
     self.assertAlmostEqual(
         2.65, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(NaiveBayes(), Parameter(1), self.car))
     self.assertAlmostEqual(
         14.75,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(NaiveBayes(), Parameter(1), self.nursery))
     self.assertAlmostEqual(
         9.71, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(Bagging(), BaggingParameter(1, 50), self.tictactoe))
     self.assertAlmostEqual(
         3.00, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(Bagging(), BaggingParameter(1, 50), self.car))
     self.assertAlmostEqual(
         3.44, 100 * experimentPerformance.meanPerformance().getErrorRate(),
         2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(Dummy(), Parameter(1), self.nursery))
     self.assertAlmostEqual(
         66.79,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)
     experimentPerformance = bootstrapRun.execute(
         Experiment(Dummy(), Parameter(1), self.iris))
     self.assertAlmostEqual(
         66.67,
         100 * experimentPerformance.meanPerformance().getErrorRate(), 2)