Esempio n. 1
0
 def test_PredictionWithDiffVarType(self):
     """Test prediction with diff. VarType
     Test the prediction of examples with different varType
     """
     expectedAcc = 0.666666666667
     # Create a svm model
     svm = AZorngCvSVM.CvSVMLearner(self.noBadDataTrain)
     #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc2 = evalUtilities.getClassificationAccuracy(self.noBadDataTest[3:],
                                                    svm)
     Acc1 = evalUtilities.getClassificationAccuracy(self.badVarTypeData[3:],
                                                    svm)
     self.assertEqual(round(Acc1, 7), round(expectedAcc, 7),
                      "The Accuracy is not the expected. Got: " + str(Acc1))
     self.assertEqual(round(Acc2, 7), round(expectedAcc, 7),
                      "The Accuracy is not the expected. Got: " + str(Acc2))
     self.assert_(
         ('Fixed Types of variables' in svm.examplesFixedLog)
         and (svm.examplesFixedLog['Fixed Types of variables'] == 27),
         "No report of fixing in classifier class")
     self.assert_(
         ('Vars needing type fix' in svm.examplesFixedLog)
         and (svm.examplesFixedLog['Vars needing type fix']['[Br]([C])']
              == "EnumVariable to FloatVariable"),
         "No report of fixing in classifier class")
Esempio n. 2
0
    def test_MetaDataHandleForSavingModel(self):
        """Test the handling of SaveModel for Data with Meta Atributes
        """
        expectedAccWMeta = 1.0 # Ver 0.3 
        expectedAccNoMeta = 0.63333333300000005 # Ver 0.3
        #Test the save of a model created from a train data with meta attributes
        self.assert_(len(self.WMetaTest.domain.getmetas())>=1,"The dataset WMetaTest should have Meta Attributes")
        CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0)
        annM = CvANNlearner(self.WMetaTest)
        AccNoMetaBefore = evalUtilities.getClassificationAccuracy(self.NoMetaTrain,annM) 
        AccWMetaBefore = evalUtilities.getClassificationAccuracy(self.WMetaTest,annM)


        # Save the model 
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdiriTest"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"CvANNModel.CvANN")
        annM.write(modelPath)

        # Read in the model
        annR = AZorngCvANN.CvANNread(modelPath)
        self.assert_(len(annR.imputer.defaults.domain.getmetas())==0,"There shouldn't be any Meta data now!")

        # Calculate classification accuracy 
        AccNoMetaAfter = evalUtilities.getClassificationAccuracy(self.NoMetaTrain, annR)
        AccWMetaAfter = evalUtilities.getClassificationAccuracy(self.WMetaTest, annR)

        # Test that the accuracy of the model before and after saved
        self.assertEqual(AccNoMetaBefore, AccNoMetaAfter,"NoMeta: Predictions after loading saved model were different")
        self.assertEqual(AccWMetaBefore, AccWMetaAfter, "WMeta: Predictions after loading saved model were different")
        self.assertEqual(round(AccWMetaAfter,9), round(expectedAccWMeta,9))
        self.assertEqual(round(AccNoMetaAfter,9), round(expectedAccNoMeta,9))
 
        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Esempio n. 3
0
    def test_MetaDataHandleForSavingModel(self):
        """Test the handling of SaveModel for Data with Meta Atributes
        """
        expectedAccWMeta = [0.733333333, 0.83333333300000001]
        expectedAccNoMeta =[0.55151515200000001 ]
        #Test the save of a model created from a train data with meta attributes
        self.assert_(len(self.WMetaTest.domain.getmetas())>=1,"The dataset WMetaTest should have Meta Attributes")
        CvBayeslearner = AZorngCvBayes.CvBayesLearner()
        BayesM = CvBayeslearner(self.WMetaTest)
        AccNoMetaBefore = evalUtilities.getClassificationAccuracy(self.NoMetaTrain,BayesM) 
        AccWMetaBefore = evalUtilities.getClassificationAccuracy(self.WMetaTest,BayesM)


        # Save the model 
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdiriTest"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"CvBayesModel.CvBayes")
        BayesM.write(modelPath)

        # Read in the model
        BayesR = AZorngCvBayes.CvBayesread(modelPath)
        self.assert_(len(BayesR.imputer.defaults.domain.getmetas())==0,"There shouldn't be any Meta data now!")

        # Calculate classification accuracy 
        AccNoMetaAfter = evalUtilities.getClassificationAccuracy(self.NoMetaTrain, BayesR)
        AccWMetaAfter = evalUtilities.getClassificationAccuracy(self.WMetaTest, BayesR)

        # Test that the accuracy of the model before and after saved
        self.assertEqual(AccNoMetaBefore, AccNoMetaAfter,"NoMeta: Predictions after loading saved model were different")
        self.assertEqual(AccWMetaBefore, AccWMetaAfter, "WMeta: Predictions after loading saved model were different")
        self.assert_(round(AccWMetaAfter,5) in [round(x,5) for x in expectedAccWMeta])
        self.assert_(round(AccNoMetaAfter,5) in [round(x,5) for x in expectedAccNoMeta])
 
        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Esempio n. 4
0
    def test_PredictionWithDiffVarType(self):
        """Test prediction with diff. VarType
        Test the prediction of examples with different varType
        """
        expectedAccValues = [
            0.96296296296296291,  # Ver 0.3
            1.0
        ]

        # Create a rf model
        RFlearner = AZorngRF.RFLearner(NumThreads = 1, maxDepth = "20", minSample = "5", useSurrogates = "false", getVarVariance = "false", \
                                        nActVars = "0", nTrees = "100", forestAcc = "0.1", termCrit = "0")
        rf = RFlearner(self.noBadDataTrain)
        #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
        Acc2 = evalUtilities.getClassificationAccuracy(self.noBadDataTest[3:],
                                                       rf)
        Acc1 = evalUtilities.getClassificationAccuracy(self.badVarTypeData[3:],
                                                       rf)
        self.assertRoundedToExpectedArray(Acc1, expectedAccValues, 9)
        self.assertRoundedToExpectedArray(Acc2, expectedAccValues, 9)
        self.assert_(
            ('Fixed Types of variables' in rf.examplesFixedLog)
            and (rf.examplesFixedLog['Fixed Types of variables'] == 27),
            "No report of fixing in classifier class")
        self.assert_(
            ('Vars needing type fix' in rf.examplesFixedLog)
            and (rf.examplesFixedLog['Vars needing type fix']['[Br]([C])']
                 == "EnumVariable to FloatVariable",
                 "No report of fixing in classifier class"))
Esempio n. 5
0
 def test_PredictionWithDiffVarType(self):
     """Test prediction with diff. VarType
     Test the prediction of examples with different varType
     """
     expectedAcc = 0.96296296296296291  # Ver 0.3
     # Create a rf model
     RFlearner = AZorngRF.RFLearner(
         NumThreads=1,
         maxDepth="20",
         minSample="5",
         useSurrogates="false",
         getVarVariance="false",
         nActVars="0",
         nTrees="100",
         forestAcc="0.1",
         termCrit="0",
     )
     rf = RFlearner(self.noBadDataTrain)
     # using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc2 = evalUtilities.getClassificationAccuracy(self.noBadDataTest[3:], rf)
     Acc1 = evalUtilities.getClassificationAccuracy(self.badVarTypeData[3:], rf)
     self.assertEqual(Acc1, expectedAcc)
     self.assertEqual(Acc2, expectedAcc)
     self.assert_(
         ("Fixed Types of variables" in rf.examplesFixedLog)
         and (rf.examplesFixedLog["Fixed Types of variables"] == 27),
         "No report of fixing in classifier class",
     )
     self.assert_(
         ("Vars needing type fix" in rf.examplesFixedLog)
         and (
             rf.examplesFixedLog["Vars needing type fix"]["[Br]([C])"] == "EnumVariable to FloatVariable",
             "No report of fixing in classifier class",
         )
     )
Esempio n. 6
0
    def test_Priors(self):
        """Test to assure that priors are set correcly."""

        # Create a CvANN model
        CvANNlearner = AZorngCvANN.CvANNLearner(stopUPs = 0, priors = {"Iris-versicolor":0.35, "Iris-virginica":0.13, "Iris-setosa":0.52})
        CvANNmodel = CvANNlearner(self.irisData)
        #Model with No Priors
        CvANNlearnerNoP = AZorngCvANN.CvANNLearner(stopUPs=0)
        CvANNmodelNoP = CvANNlearnerNoP(self.irisData)


        # Calculate classification accuracy 
        Acc = evalUtilities.getClassificationAccuracy(self.irisData, CvANNmodel)

        # Save the model 
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdirTest"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"modelPriors.CvANN")
        CvANNmodel.write(modelPath)

        # Read in the model
        newCvANNmodel = AZorngCvANN.CvANNread(modelPath)

        # Calculate classification accuracy 
        savedAcc = evalUtilities.getClassificationAccuracy(self.irisData, CvANNmodel)
        NoPAcc = evalUtilities.getClassificationAccuracy(self.irisData, CvANNmodelNoP)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)
        self.assert_(Acc != NoPAcc)


        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Esempio n. 7
0
    def test_Priors(self):
        """Test to assure that priors are set correcly."""

        # Create a CvANN model
        CvANNlearner = AZorngCvANN.CvANNLearner(stopUPs = 0, priors = {"Iris-versicolor":0.35, "Iris-virginica":0.13, "Iris-setosa":0.52})
        CvANNmodel = CvANNlearner(self.irisData)
        #Model with No Priors
        CvANNlearnerNoP = AZorngCvANN.CvANNLearner(stopUPs=0)
        CvANNmodelNoP = CvANNlearnerNoP(self.irisData)


        # Calculate classification accuracy 
        Acc = evalUtilities.getClassificationAccuracy(self.irisData, CvANNmodel)

        # Save the model 
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdirTest"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"modelPriors.CvANN")
        CvANNmodel.write(modelPath)

        # Read in the model
        newCvANNmodel = AZorngCvANN.CvANNread(modelPath)

        # Calculate classification accuracy 
        savedAcc = evalUtilities.getClassificationAccuracy(self.irisData, CvANNmodel)
        NoPAcc = evalUtilities.getClassificationAccuracy(self.irisData, CvANNmodelNoP)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)
        self.assert_(Acc != NoPAcc)


        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Esempio n. 8
0
    def test_MetaDataHandleForSavingModel(self):
        """Test the handling of SaveModel for Data with Meta Atributes
        """
        expectedAccWMeta = 1.0 # Ver 0.3 
        expectedAccNoMeta = 0.63333333300000005 # Ver 0.3
        #Test the save of a model created from a train data with meta attributes
        self.assert_(len(self.WMetaTest.domain.getmetas())>=1,"The dataset WMetaTest should have Meta Attributes")
        CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0)
        annM = CvANNlearner(self.WMetaTest)
        AccNoMetaBefore = evalUtilities.getClassificationAccuracy(self.NoMetaTrain,annM) 
        AccWMetaBefore = evalUtilities.getClassificationAccuracy(self.WMetaTest,annM)


        # Save the model 
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdiriTest"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"CvANNModel.CvANN")
        annM.write(modelPath)

        # Read in the model
        annR = AZorngCvANN.CvANNread(modelPath)
        self.assert_(len(annR.imputer.defaults.domain.getmetas())==0,"There shouldn't be any Meta data now!")

        # Calculate classification accuracy 
        AccNoMetaAfter = evalUtilities.getClassificationAccuracy(self.NoMetaTrain, annR)
        AccWMetaAfter = evalUtilities.getClassificationAccuracy(self.WMetaTest, annR)

        # Test that the accuracy of the model before and after saved
        self.assertEqual(AccNoMetaBefore, AccNoMetaAfter,"NoMeta: Predictions after loading saved model were different")
        self.assertEqual(AccWMetaBefore, AccWMetaAfter, "WMeta: Predictions after loading saved model were different")
        self.assertEqual(round(AccWMetaAfter,9), round(expectedAccWMeta,9))
        self.assertEqual(round(AccNoMetaAfter,9), round(expectedAccNoMeta,9))
 
        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Esempio n. 9
0
    def testMetaDataHandleForSavingModel(self):
        """Test the handling of SaveModel for Data with Meta Atributes
        """
        expected_AccWMetaAfter  = [0.433333333333, 0.766666666667, 0.366666666667, 0.6] #Ver 0.3 - Artifact: The extra values can be expected on other Systems:[AZInHouse, Ubuntu10.04, Ubuntu10.10, Ubuntu10.10] 
        expected_AccNoMetaAfter = [0.545454545455, 0.533333333333, 0.442424242424, 0.551515151515] #Ver 0.3 - Artifact: The extra values can be expected on other Systems: [AZInHouse, Ubuntu10.04, Ubuntu10.10, Ubuntu10.10] 

        #Test the save of a model created from a train data with meta attributes
        self.assert_(len(self.WMetaTest.domain.getmetas())>=1,"The dataset WMetaTest should have Meta Attributes")
        plsM = AZorngPLS.PLSLearner(self.WMetaTest)
        AccNoMetaBefore = evalUtilities.getClassificationAccuracy(self.NoMetaTrain,plsM) 
        AccWMetaBefore = evalUtilities.getClassificationAccuracy(self.WMetaTest,plsM)


        # Save the model 
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdir"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"PLSModel")
        plsM.write(modelPath)

        # Read in the model
        plsR = AZorngPLS.PLSread(modelPath)
        self.assert_(len(plsR.imputer.defaults.domain.getmetas())==0,"There shouldn't be any Meta data now!")

        # Calculate classification accuracy 
        AccNoMetaAfter = evalUtilities.getClassificationAccuracy(self.NoMetaTrain, plsR)
        AccWMetaAfter = evalUtilities.getClassificationAccuracy(self.WMetaTest, plsR)

        # Test that the accuracy of the model before and after saved
        self.assertEqual(AccNoMetaBefore, AccNoMetaAfter,"NoMeta: Predictions after loading saved model were different")
        self.assertEqual(AccWMetaBefore, AccWMetaAfter, "WMeta: Predictions after loading saved model were different")
        self.assert_(round(AccWMetaAfter,9) in [round(x,9) for x in expected_AccWMetaAfter],"Accuracy was not the expected value! Got: "+str(AccWMetaAfter)+" - "+str(AccNoMetaAfter))
        self.assert_(round(AccNoMetaAfter,9) in [round(x,9) for x in expected_AccNoMetaAfter],"Accuracy was not the expected value!")
 
        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Esempio n. 10
0
    def testSavedModel(self):
        """Test PLS model saving
        Test to assure that a saved pls model gives the same predictions as before saving."""

        # Create a pls model
        pls = AZorngPLS.PLSLearner(self.train_data)

        # Calculate classification accuracy 
        Acc = evalUtilities.getClassificationAccuracy(self.test_data, pls)

        # Save the model 
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdir"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"PLSModel")
        pls.write(modelPath)
        
        # Read in the model
        plsM = AZorngPLS.PLSread(modelPath)

        # Calculate classification accuracy 
        savedAcc = evalUtilities.getClassificationAccuracy(self.test_data, plsM)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)

        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Esempio n. 11
0
    def test_SavedModel(self):
        """Test to assure that a saved ann model gives the same predictions as before saving."""

        # Create an ann model
        ann = AZorngCvANN.CvANNLearner(self.train_data,stopUPs=0)

        # Calculate classification accuracy 
        Acc = evalUtilities.getClassificationAccuracy(self.test_data, ann)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdir"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"ann.cvann")
        ann.write(modelPath)
        
        # Read in the model
        ann = AZorngCvANN.CvANNread(modelPath)

        # Calculate classification accuracy 
        savedAcc = evalUtilities.getClassificationAccuracy(self.test_data, ann)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)

        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Esempio n. 12
0
    def test_SavedModel(self):
        """Test to assure that a saved ann model gives the same predictions as before saving."""

        # Create an ann model
        ann = AZorngCvANN.CvANNLearner(self.train_data,stopUPs=0)

        # Calculate classification accuracy 
        Acc = evalUtilities.getClassificationAccuracy(self.test_data, ann)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdir"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"ann.fann")
        ann.write(modelPath)
        
        # Read in the model
        ann = AZorngCvANN.CvANNread(modelPath)

        # Calculate classification accuracy 
        savedAcc = evalUtilities.getClassificationAccuracy(self.test_data, ann)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)

        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Esempio n. 13
0
    def testSavedModel(self):
        """Test PLS model saving
        Test to assure that a saved pls model gives the same predictions as before saving."""

        # Create a pls model
        pls = AZorngPLS.PLSLearner(self.train_data)

        # Calculate classification accuracy
        Acc = evalUtilities.getClassificationAccuracy(self.test_data, pls)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR,
                                  "scratchdir" + str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir, "PLSModel")
        pls.write(modelPath)

        # Read in the model
        plsM = AZorngPLS.PLSread(modelPath)

        # Calculate classification accuracy
        savedAcc = evalUtilities.getClassificationAccuracy(
            self.test_data, plsM)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)

        # Remove the scratch directory
        os.system("/bin/rm -rf " + scratchdir)
Esempio n. 14
0
 def test_PredictionWithDiffVarType(self):
     """Test prediction with diff. VarType
     Test the prediction of examples with different varType
     """
     expectedAcc = 0.37036999999999998
     # Create a Bayes model
     CvBayeslearner = AZorngCvBayes.CvBayesLearner()
     Bayes = CvBayeslearner(self.noBadDataTrain)
     # using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc2 = evalUtilities.getClassificationAccuracy(self.noBadDataTest[3:], Bayes)
     Acc1 = evalUtilities.getClassificationAccuracy(self.badVarTypeData[3:], Bayes)
     self.assertEqual(round(Acc1, 6), round(expectedAcc, 6))
     self.assertEqual(round(Acc2, 6), round(expectedAcc, 6))
     self.assert_(
         ("Fixed Types of variables" in Bayes.examplesFixedLog)
         and (Bayes.examplesFixedLog["Fixed Types of variables"] == 27),
         "No report of fixing in classifier class",
     )
     self.assert_(
         ("Vars needing type fix" in Bayes.examplesFixedLog)
         and (
             Bayes.examplesFixedLog["Vars needing type fix"]["[Br]([C])"] == "EnumVariable to FloatVariable",
             "No report of fixing in classifier class",
         )
     )
Esempio n. 15
0
    def testMetaDataHandleForSavingModel(self):
        """Test the handling of SaveModel for Data with Meta Atributes
        """

        #Test the save of a model created from a train data with meta attributes
        self.assert_(len(self.WMetaTest.domain.getmetas())>=1,"The dataset WMetaTest should have Meta Attributes")
        plsM = AZorngPLS.PLSLearner(self.WMetaTest)
        AccNoMetaBefore = evalUtilities.getClassificationAccuracy(self.NoMetaTrain,plsM) 
        AccWMetaBefore = evalUtilities.getClassificationAccuracy(self.WMetaTest,plsM)


        # Save the model 
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdir"+str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir,"PLSModel")
        plsM.write(modelPath)

        # Read in the model
        plsR = AZorngPLS.PLSread(modelPath)
        self.assert_(len(plsR.imputer.defaults.domain.getmetas())==0,"There shouldn't be any Meta data now!")

        # Calculate classification accuracy 
        AccNoMetaAfter = evalUtilities.getClassificationAccuracy(self.NoMetaTrain, plsR)
        AccWMetaAfter = evalUtilities.getClassificationAccuracy(self.WMetaTest, plsR)

        # Test that the accuracy of the model before and after saved
        self.assertEqual(AccNoMetaBefore, AccNoMetaAfter,"NoMeta: Predictions after loading saved model were different")
        self.assertEqual(AccWMetaBefore, AccWMetaAfter, "WMeta: Predictions after loading saved model were different")
        self.assertEqual(round(AccWMetaAfter,9), round(0.888888888889,9),"Accuracy was not the expected value!")
        self.assertEqual(round(AccNoMetaAfter,9), round(0.605769230769,9),"Accuracy was not the expected value!")
 
        # Remove the scratch directory
        os.system("/bin/rm -rf "+scratchdir)
Esempio n. 16
0
    def test_TwoWays(self):
        """
        Test that an Bayes created in one or two steps give the same results
        """
        # Deviation allowed in Acc
        devAlloed = 0.4  # Before:   0.02

        # One step Bayes creation
        Bayes = AZorngCvBayes.CvBayesLearner(self.train_data)

        # Calculate classification accuracy for the classifier trained in one step
        oneStepAcc = evalUtilities.getClassificationAccuracy(self.test_data, Bayes)

        # Two step Bayes creation
        learner = AZorngCvBayes.CvBayesLearner()
        Bayes = learner(self.train_data)

        # Calculate classification accuracy for the classifier trained in two steps
        twoStepAcc = evalUtilities.getClassificationAccuracy(self.test_data, Bayes)

        # Test that the accuracy of the classifiers created in different ways is the exact same
        self.assert_(
            oneStepAcc >= twoStepAcc - devAlloed and oneStepAcc <= twoStepAcc + devAlloed,
            "Dev=" + str(oneStepAcc - twoStepAcc),
        )
Esempio n. 17
0
    def test_SavedModel(self):
        """Test to assure that a saved Bayes model gives the same predictions as before saving."""

        # Create an Bayes model
        Bayes = AZorngCvBayes.CvBayesLearner(self.train_data)

        # Calculate classification accuracy
        Acc = evalUtilities.getClassificationAccuracy(self.test_data, Bayes)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdir" + str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir, "Bayes.fBayes")
        Bayes.write(modelPath)

        # Read in the model
        Bayes = AZorngCvBayes.CvBayesread(modelPath)

        # Calculate classification accuracy
        savedAcc = evalUtilities.getClassificationAccuracy(self.test_data, Bayes)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)

        # Test using the global read functionality
        Bayes2 = AZBaseClasses.modelRead(modelPath)
        savedAcc2 = evalUtilities.getClassificationAccuracy(self.test_data, Bayes2)
        self.assertEqual(Acc, savedAcc2)

        # Remove the scratch directory
        os.system("/bin/rm -rf " + scratchdir)
Esempio n. 18
0
    def test_SVM_Priors_D(self):
        """Test SVM with priors """
        # Train a svm
        svm = AZorngCvSVM.CvSVMLearner(
            self.inDataD, priors={"Iris-setosa": 0.2, "Iris-versicolor": 0.3, "Iris-virginica": 0.5}
        )
        trainedAcc = evalUtilities.getClassificationAccuracy(self.inDataD, svm)

        self.assertEqual(round(trainedAcc, 7), round(0.73333329999999997, 7))
        # Save model
        rc = svm.write(self.modelPath)
        self.assertEqual(rc, True)
        # Load the saved model
        loadedsvm = AZorngCvSVM.CvSVMread(self.modelPath)
        loadedAcc = evalUtilities.getClassificationAccuracy(self.inDataD, loadedsvm)
        # Assure equal accuracy
        self.assertEqual(trainedAcc, loadedAcc)

        svmLearner = AZorngCvSVM.CvSVMLearner(
            scaleData=False, priors={"Iris-setosa": 0.2, "Iris-versicolor": 0.3, "Iris-virginica": 0.5}
        )

        svmLearner.name = "CvSVMLearner"
        svmLearner.shrinking = 1
        svmLearner.eps = 0.001
        svmLearner.p = 0.0
        svmLearner.nu = 0.6
        svmLearner.kernel_type = 2
        svmLearner.svm_type = 103
        svmLearner.gamma = 0.0033
        svmLearner.C = 47
        svmLearner.probability = 1
        svmLearner.scaleData = True
        svmLearner.scaleClass = False
        # svmLearner.for_nomogram=1

        Res = orngTest.crossValidation(
            [svmLearner], self.inDataD, folds=5, strat=orange.MakeRandomIndices.StratifiedIfPossible
        )
        CA = evalUtilities.CA(Res)[0]
        self.assertEqual(round(CA, 2), round(0.940000000, 2))  # orange1.0: 0.93333333333333335])

        svmLearner.priors = None
        Res = orngTest.crossValidation(
            [svmLearner], self.inDataD, folds=5, strat=orange.MakeRandomIndices.StratifiedIfPossible
        )
        CA = evalUtilities.CA(Res)[0]
        self.assertEqual(round(CA, 2), round(0.94666666666666666, 2))

        newSVM = svmLearner(self.inDataD)
        trainedAcc = evalUtilities.getClassificationAccuracy(self.inDataD, newSVM)
        # Save model
        rc = newSVM.write(self.modelPath)
        self.assertEqual(rc, True)
        # Load the saved model
        loadedsvm = AZorngCvSVM.CvSVMread(self.modelPath)
        loadedAcc = evalUtilities.getClassificationAccuracy(self.inDataD, loadedsvm)
        # Assure equal accuracy
        self.assertEqual(round(trainedAcc, 7), round(0.95999999999999996, 7))  # Before in AZSVM: 0.953333300000
        self.assertEqual(round(trainedAcc, 1), round(loadedAcc, 1))
Esempio n. 19
0
    def test_SavedModel(self):
        """Test to assure that a saved Bayes model gives the same predictions as before saving."""

        # Create an Bayes model
        Bayes = AZorngCvBayes.CvBayesLearner(self.train_data)

        # Calculate classification accuracy
        Acc = evalUtilities.getClassificationAccuracy(self.test_data, Bayes)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR,
                                  "scratchdir" + str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir, "Bayes.fBayes")
        Bayes.write(modelPath)

        # Read in the model
        Bayes = AZorngCvBayes.CvBayesread(modelPath)

        # Calculate classification accuracy
        savedAcc = evalUtilities.getClassificationAccuracy(
            self.test_data, Bayes)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)

        #Test using the global read functionality
        Bayes2 = AZBaseClasses.modelRead(modelPath)
        savedAcc2 = evalUtilities.getClassificationAccuracy(
            self.test_data, Bayes2)
        self.assertEqual(Acc, savedAcc2)

        # Remove the scratch directory
        os.system("/bin/rm -rf " + scratchdir)
Esempio n. 20
0
    def test_TwoWays(self):
        """
        Test that an Bayes created in one or two steps give the same results
        """
        #Deviation allowed in Acc
        devAlloed = 0.4  #Before:   0.02

        # One step Bayes creation
        Bayes = AZorngCvBayes.CvBayesLearner(self.train_data)

        # Calculate classification accuracy for the classifier trained in one step
        oneStepAcc = evalUtilities.getClassificationAccuracy(
            self.test_data, Bayes)

        # Two step Bayes creation
        learner = AZorngCvBayes.CvBayesLearner()
        Bayes = learner(self.train_data)

        # Calculate classification accuracy for the classifier trained in two steps
        twoStepAcc = evalUtilities.getClassificationAccuracy(
            self.test_data, Bayes)

        # Test that the accuracy of the classifiers created in different ways is the exact same
        self.assert_(
            oneStepAcc >= twoStepAcc - devAlloed
            and oneStepAcc <= twoStepAcc + devAlloed,
            "Dev=" + str(oneStepAcc - twoStepAcc))
Esempio n. 21
0
    def test_MetaDataHandleForSavingModel(self):
        """Test the handling of SaveModel for Data with Meta Atributes
        """
        expectedAccWMeta = 1.0  # VEr 0.3
        expectedAccNoMetaValues = [
            0.56666666700000001,  # Ver 0.3
            0.563636364
        ]

        #Test the save of a model created from a train data with meta attributes
        self.assert_(
            len(self.WMetaTest.domain.getmetas()) >= 1,
            "The dataset WMetaTest should have Meta Attributes")
        RFlearner = AZorngRF.RFLearner(NumThreads = 1, maxDepth = "20", minSample = "5", useSurrogates = "false", getVarVariance = "false", \
                                        nActVars = "0", nTrees = "100", forestAcc = "0.1", termCrit = "0")
        rfM = RFlearner(self.WMetaTest)
        AccNoMetaBefore = evalUtilities.getClassificationAccuracy(
            self.NoMetaTrain, rfM)
        AccWMetaBefore = evalUtilities.getClassificationAccuracy(
            self.WMetaTest, rfM)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR,
                                  "scratchdirTest" + str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir, "RFModel.RF")
        rfM.write(modelPath)

        # Read in the model
        rfR = AZorngRF.RFread(modelPath)
        self.assert_(
            len(rfR.domain.getmetas()) == 0,
            "There shouldn't be any Meta data now!")

        # Calculate classification accuracy
        AccNoMetaAfter = evalUtilities.getClassificationAccuracy(
            self.NoMetaTrain, rfR)
        AccWMetaAfter = evalUtilities.getClassificationAccuracy(
            self.WMetaTest, rfR)

        # Test that the accuracy of the model before and after saved
        self.assertEqual(
            AccNoMetaBefore, AccNoMetaAfter,
            "NoMeta: Predictions after loading saved model were different")
        self.assertEqual(
            AccWMetaBefore, AccWMetaAfter,
            "WMeta: Predictions after loading saved model were different")
        self.assertEqual(round(AccWMetaAfter, 9), round(expectedAccWMeta, 9))

        self.assertRoundedToExpectedArray(AccNoMetaAfter,
                                          expectedAccNoMetaValues, 9)

        # Remove the scratch directory
        os.system("/bin/rm -rf " + scratchdir)
Esempio n. 22
0
 def testPredictionWithDiffVarOrder(self):
     """Test Prediction with diff. VarOrder
     Test the prediction  examples with different varOrder
     """
     expectedAcc = 0.851851851852
     # Create a pls model
     pls = AZorngPLS.PLSLearner(self.noBadDataTrain)
     #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest,pls)
     Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData,pls)
     self.assertEqual(round(Acc1,9),round(expectedAcc,9),"The Accuracy is not the expected")
     self.assertEqual(round(Acc2,9),round(expectedAcc,9),"The Accuracy is not the expected")
Esempio n. 23
0
    def test_Priors(self):
        """Test to assure that priors are set correcly."""
        # Create a CvSVM model
        CvSVMlearner = AZorngCvSVM.CvSVMLearner(C=3,
                                                priors={
                                                    "Iris-versicolor": 2,
                                                    "Iris-virginica": 4,
                                                    "Iris-setosa": 6
                                                })
        CvSVMmodel = CvSVMlearner(self.inDataD)

        # Calculate classification accuracy
        Acc = evalUtilities.getClassificationAccuracy(self.inDataD, CvSVMmodel)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR,
                                  "scratchdirTest" + str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir, "modelPriors.CvSVM")
        CvSVMmodel.write(modelPath)

        # Read in the model
        newCvSVMmodel = AZorngCvSVM.CvSVMread(modelPath)

        # Calculate classification accuracy
        savedAcc = evalUtilities.getClassificationAccuracy(
            self.inDataD, CvSVMmodel)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)

        #Check the priors saved in the model
        file = open(os.path.join(modelPath, "model.svm"), "r")
        lines = file.readlines()
        file.close()

        priors = [
            round(x, 2) for x in eval((lines[18].strip()).replace("data:", ""))
        ]
        self.assertEqual(len(priors), 3)
        self.assertEqual(
            priors[self.inDataD.domain.classVar.values.index("Iris-setosa")],
            18.0)
        self.assertEqual(
            priors[self.inDataD.domain.classVar.values.index(
                "Iris-versicolor")], 6.0)
        self.assertEqual(
            priors[self.inDataD.domain.classVar.values.index(
                "Iris-virginica")], 12.0)

        # Remove the scratch directory
        os.system("/bin/rm -rf " + scratchdir)
Esempio n. 24
0
    def test_MetaDataHandle(self):
        """Test the handling of Data with Meta Atributes
        """
        expectedAcc = 0.69999999999999996 # Ver 0.3
        # Create an ann model
        CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0)
        ann = CvANNlearner(self.NoMetaTrain)

        # Calculate classification accuracy (NoMetaTest and WMeta are the same appart from the meta atribute) 
        AccNoMeta = evalUtilities.getClassificationAccuracy(self.NoMetaTest, ann)
        AccWMeta = evalUtilities.getClassificationAccuracy(self.WMetaTest, ann)
        self.assertEqual(AccNoMeta,AccWMeta,"Predictions with and without meta data were different!")
        self.assertEqual(round(AccNoMeta,9), round(expectedAcc,9))
Esempio n. 25
0
    def test_MetaDataHandle(self):
        """Test the handling of Data with Meta Atributes
        """
        expectedAcc = 0.73333333300000003
        # Create an Boost model
        CvBoostlearner = AZorngCvBoost.CvBoostLearner()
        Boost = CvBoostlearner(self.NoMetaTrain)

        # Calculate classification accuracy (NoMetaTest and WMeta are the same appart from the meta atribute) 
        AccNoMeta = evalUtilities.getClassificationAccuracy(self.NoMetaTest, Boost)
        AccWMeta = evalUtilities.getClassificationAccuracy(self.WMetaTest, Boost)
        self.assertEqual(AccNoMeta,AccWMeta,"Predictions with and without meta data were different!")
        self.assertEqual(round(AccNoMeta,9), round(expectedAcc,9))
Esempio n. 26
0
    def test_MetaDataHandle(self):
        """Test the handling of Data with Meta Atributes
        """
        expectedAcc = 0.33333333300000001  # [0.666666667, 0.333333333]
        # Create an Bayes model
        CvBayeslearner = AZorngCvBayes.CvBayesLearner()
        Bayes = CvBayeslearner(self.NoMetaTrain)

        # Calculate classification accuracy (NoMetaTest and WMeta are the same appart from the meta atribute)
        AccNoMeta = evalUtilities.getClassificationAccuracy(self.NoMetaTest, Bayes)
        AccWMeta = evalUtilities.getClassificationAccuracy(self.WMetaTest, Bayes)
        self.assertEqual(AccNoMeta, AccWMeta, "Predictions with and without meta data were different!")
        self.assertEqual(round(AccNoMeta, 9), round(expectedAcc, 9))
Esempio n. 27
0
    def test_MetaDataHandle(self):
        """Test the handling of Data with Meta Atributes
        """
        expectedAcc = 0.69999999999999996 # Ver 0.3
        # Create an ann model
        CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0)
        ann = CvANNlearner(self.NoMetaTrain)

        # Calculate classification accuracy (NoMetaTest and WMeta are the same appart from the meta atribute) 
        AccNoMeta = evalUtilities.getClassificationAccuracy(self.NoMetaTest, ann)
        AccWMeta = evalUtilities.getClassificationAccuracy(self.WMetaTest, ann)
        self.assertEqual(AccNoMeta,AccWMeta,"Predictions with and without meta data were different!")
        self.assertEqual(round(AccNoMeta,9), round(expectedAcc,9))
Esempio n. 28
0
    def test_MetaDataHandle(self):
        """Test the handling of Data with Meta Atributes
        """
        # Create an svm model

        svm = AZorngCvSVM.CvSVMLearner(self.NoMetaTrain)

        # Calculate classification accuracy (NoMetaTest and WMeta are the same appart from the meta atribute)
        AccNoMeta = evalUtilities.getClassificationAccuracy(self.NoMetaTest, svm)
        AccWMeta = evalUtilities.getClassificationAccuracy(self.WMetaTest, svm)

        self.assertEqual(AccNoMeta, AccWMeta, "Predictions with and without meta data were different!")
        self.assertEqual(round(AccNoMeta, 9), round(0.7, 9), "Accuracy was not the expected value! Got: ")  # Ver 0.3
Esempio n. 29
0
    def testMetaDataHandle(self):
        """Test the handling of Data with Meta Atributes
        """
        # Create an pls model

        pls = AZorngPLS.PLSLearner(self.NoMetaTrain)

        # Calculate classification accuracy (NoMetaTest and WMeta are the same appart from the meta atribute) 
        AccNoMeta = evalUtilities.getClassificationAccuracy(self.NoMetaTest, pls)
        AccWMeta = evalUtilities.getClassificationAccuracy(self.WMetaTest, pls)

        self.assertEqual(AccNoMeta,AccWMeta,"Predictions with and without meta data were different!")
        self.assertEqual(round(AccNoMeta,9), round(0.851851851852,9),"Accuracy was not the expected value!")
Esempio n. 30
0
    def test_PredictionWithDiffVarOrder(self):
        """Test Prediction with diff. VarOrder
        Test the prediction  examples with different varOrder
        """
        expectedAcc = 0.69999999999999996 # Ver 0.3
        # Create a ann model
        CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0)
        ann = CvANNlearner(self.noBadDataTrain)
        #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
        Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest,ann)
        Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData,ann)

        self.assertEqual(round(Acc1,9),round(expectedAcc,9),)
        self.assertEqual(round(Acc2,9),round(expectedAcc,9),)
Esempio n. 31
0
    def test_PredictionWithDiffVarOrder(self):
        """Test Prediction with diff. VarOrder
        Test the prediction  examples with different varOrder
        """
        expectedAcc = 0.69999999999999996 # Ver 0.3
        # Create a ann model
        CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0)
        ann = CvANNlearner(self.noBadDataTrain)
        #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
        Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest,ann)
        Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData,ann)

        self.assertEqual(round(Acc1,9),round(expectedAcc,9),)
        self.assertEqual(round(Acc2,9),round(expectedAcc,9),)
Esempio n. 32
0
 def testPredictionWithDiffVarType(self):
     """Test prediction with diff. VarType
     Test the prediction of examples with different varType
     """
     expectedAcc = 0.875
     # Create a pls model
     pls = AZorngPLS.PLSLearner(self.noBadDataTrain)
     #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc2 = evalUtilities.getClassificationAccuracy(self.noBadDataTest[3:],pls)
     Acc1 = evalUtilities.getClassificationAccuracy(self.badVarTypeData[3:],pls)
     self.assertEqual(Acc1,expectedAcc,"The Accuracy is not the expected")
     self.assertEqual(Acc2,expectedAcc,"The Accuracy is not the expected")    
     self.assert_(('Fixed Types of variables' in pls.examplesFixedLog) and (pls.examplesFixedLog['Fixed Types of variables']==24), "No report of fixing in classifier class")
     self.assert_(('Vars needing type fix' in pls.examplesFixedLog) and (pls.examplesFixedLog['Vars needing type fix']['SELMA_Max_pos_chrg_GH']=="EnumVariable to FloatVariable"), "No report of fixing in classifier class")
Esempio n. 33
0
    def test_MetaDataHandleForSavingModel(self):
        """Test the handling of SaveModel for Data with Meta Atributes
        """
        expectedAccWMeta = [0.733333333, 0.83333333300000001]
        expectedAccNoMeta = [0.55151515200000001]
        #Test the save of a model created from a train data with meta attributes
        self.assert_(
            len(self.WMetaTest.domain.getmetas()) >= 1,
            "The dataset WMetaTest should have Meta Attributes")
        CvBayeslearner = AZorngCvBayes.CvBayesLearner()
        BayesM = CvBayeslearner(self.WMetaTest)
        AccNoMetaBefore = evalUtilities.getClassificationAccuracy(
            self.NoMetaTrain, BayesM)
        AccWMetaBefore = evalUtilities.getClassificationAccuracy(
            self.WMetaTest, BayesM)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR,
                                  "scratchdiriTest" + str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir, "CvBayesModel.CvBayes")
        BayesM.write(modelPath)

        # Read in the model
        BayesR = AZorngCvBayes.CvBayesread(modelPath)
        self.assert_(
            len(BayesR.imputer.defaults.domain.getmetas()) == 0,
            "There shouldn't be any Meta data now!")

        # Calculate classification accuracy
        AccNoMetaAfter = evalUtilities.getClassificationAccuracy(
            self.NoMetaTrain, BayesR)
        AccWMetaAfter = evalUtilities.getClassificationAccuracy(
            self.WMetaTest, BayesR)

        # Test that the accuracy of the model before and after saved
        self.assertEqual(
            AccNoMetaBefore, AccNoMetaAfter,
            "NoMeta: Predictions after loading saved model were different")
        self.assertEqual(
            AccWMetaBefore, AccWMetaAfter,
            "WMeta: Predictions after loading saved model were different")
        self.assert_(
            round(AccWMetaAfter, 5) in [round(x, 5) for x in expectedAccWMeta])
        self.assert_(
            round(AccNoMetaAfter, 5) in
            [round(x, 5) for x in expectedAccNoMeta])

        # Remove the scratch directory
        os.system("/bin/rm -rf " + scratchdir)
Esempio n. 34
0
    def test_PredictionWithDiffVarOrder(self):
        """Test Prediction with diff. VarOrder
        Test the prediction  examples with different varOrder
        """
        expectedAcc = [0.33333333300000001, 0.666666667]
        # Create a Bayes model
        CvBayeslearner = AZorngCvBayes.CvBayesLearner()
        Bayes = CvBayeslearner(self.noBadDataTrain)
        # using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
        Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest, Bayes)
        Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData, Bayes)

        self.assertEqual(Acc1, Acc2)
        self.assert_(round(Acc1, 5) in [round(x, 5) for x in expectedAcc])
Esempio n. 35
0
 def test_PredictionWithDiffVarOrder(self):
     """Test Prediction with diff. VarOrder
     Test the prediction  examples with different varOrder
     """
     expectedAcc = 0.81481481499999997 #opencv1.1: 0.77777777800000003
     # Create a rf model
     RFlearner = AZorngRF.RFLearner(NumThreads = 1, maxDepth = "20", minSample = "5", useSurrogates = "false", getVarVariance = "false", \
                                     nActVars = "0", nTrees = "100", forestAcc = "0.1", termCrit = "0")
     rf = RFlearner(self.noBadDataTrain)
     #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest,rf)
     Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData,rf)
     self.assertEqual(round(Acc1,9),round(expectedAcc,9))
     self.assertEqual(round(Acc2,9),round(expectedAcc,9))
Esempio n. 36
0
    def test_MetaDataHandle(self):
        """Test the handling of Data with Meta Atributes
        """
        expectedAcc = 0.81481481499999997 #opencv1.1:  0.77777777800000003
        # Create an rf model
        RFlearner = AZorngRF.RFLearner(NumThreads = 1, maxDepth = "20", minSample = "5", useSurrogates = "false", getVarVariance = "false", \
                                        nActVars = "0", nTrees = "100", forestAcc = "0.1", termCrit = "0")
        rf = RFlearner(self.NoMetaTrain)

        # Calculate classification accuracy (NoMetaTest and WMeta are the same appart from the meta atribute) 
        AccNoMeta = evalUtilities.getClassificationAccuracy(self.NoMetaTest, rf)
        AccWMeta = evalUtilities.getClassificationAccuracy(self.WMetaTest, rf)
        self.assertEqual(AccNoMeta,AccWMeta,"Predictions with and without meta data were different!")
        self.assertEqual(round(AccNoMeta,9), round(expectedAcc,9))
Esempio n. 37
0
    def test_PredictionWithDiffVarOrder(self):
        """Test Prediction with diff. VarOrder
        Test the prediction  examples with different varOrder
        """
        expectedAcc = 0.73333333300000003 
        # Create a Boost model
        CvBoostlearner = AZorngCvBoost.CvBoostLearner()
        Boost = CvBoostlearner(self.noBadDataTrain)
        #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
        Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest,Boost)
        Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData,Boost)

        self.assertEqual(round(Acc1,9),round(expectedAcc,9),)
        self.assertEqual(round(Acc2,9),round(expectedAcc,9),)
Esempio n. 38
0
    def test_Priors(self):
        """Test to assure that priors are set correcly."""

        # Create a RF model
        RFlearner = AZorngRF.RFLearner(NumThreads = 1, maxDepth = "20", minSample = "5", useSurrogates = "false", getVarVariance = "false", \
                                        nActVars = "0", nTrees = "100", forestAcc = "0.1", termCrit = "0", priors = {"Iris-versicolor":0.35, "Iris-virginica":0.13, "Iris-setosa":0.52})
        RFmodel = RFlearner(self.irisData)

        # Calculate classification accuracy
        Acc = evalUtilities.getClassificationAccuracy(self.irisData, RFmodel)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR,
                                  "scratchdirTest" + str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir, "modelPriors.RF")
        RFmodel.write(modelPath)

        # Read in the model
        newRFmodel = AZorngRF.RFread(modelPath)

        # Calculate classification accuracy
        savedAcc = evalUtilities.getClassificationAccuracy(
            self.irisData, newRFmodel)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)

        #Check the priors saved in the model
        file = open(os.path.join(modelPath, "model.rf"), "r")
        lines = file.readlines()
        file.close()
        priors = [
            round(x, 2) for x in eval((lines[22].strip() +
                                       lines[23].strip()).replace("data:", ""))
        ]
        self.assertEqual(len(priors), 3)
        self.assertEqual(
            priors[self.irisData.domain.classVar.values.index("Iris-setosa")],
            0.52)
        self.assertEqual(
            priors[self.irisData.domain.classVar.values.index(
                "Iris-versicolor")], 0.35)
        self.assertEqual(
            priors[self.irisData.domain.classVar.values.index(
                "Iris-virginica")], 0.13)

        # Remove the scratch directory
        os.system("/bin/rm -rf " + scratchdir)
Esempio n. 39
0
    def test_PredictionWithDiffVarOrder(self):
        """Test Prediction with diff. VarOrder
        Test the prediction  examples with different varOrder
        """
        expectedAcc = 0.7  # 0.59999999999999998 #0.7 # Ver 0.3
        # Create a svm model
        svm = AZorngCvSVM.CvSVMLearner(self.noBadDataTrain)
        # using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
        Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest, svm)
        Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData, svm)

        self.assertEqual(
            round(Acc1, 9), round(expectedAcc, 9), "The Accuracy is not the expected. Got: " + str(Acc1)
        )  # Ver 0.3
        self.assertEqual(round(Acc2, 9), round(expectedAcc, 9), "The Accuracy is not the expected. Got: " + str(Acc2))
Esempio n. 40
0
 def testPredictionWithDiffVarOrder(self):
     """Test Prediction with diff. VarOrder
     Test the prediction  examples with different varOrder
     """
     expectedAcc = 0.666666666667  # ver 0.3
     # Create a pls model
     pls = AZorngPLS.PLSLearner(self.noBadDataTrain)
     #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest, pls)
     Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData,
                                                    pls)
     self.assertEqual(round(Acc1, 9), round(expectedAcc, 9),
                      "The Accuracy is not the expected. Got: " + str(Acc2))
     self.assertEqual(round(Acc2, 9), round(expectedAcc, 9),
                      "The Accuracy is not the expected")
Esempio n. 41
0
 def test_PredictionWithDiffVarType(self):
     """Test prediction with diff. VarType
     Test the prediction of examples with different varType
     """
     expectedAcc = 0.703704 
     # Create a Boost model
     CvBoostlearner = AZorngCvBoost.CvBoostLearner()
     Boost = CvBoostlearner(self.noBadDataTrain)
     #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc2 = evalUtilities.getClassificationAccuracy(self.noBadDataTest[3:],Boost)
     Acc1 = evalUtilities.getClassificationAccuracy(self.badVarTypeData[3:],Boost)
     self.assertEqual(round(Acc1,6),round(expectedAcc,6))
     self.assertEqual(round(Acc2,6),round(expectedAcc,6))  
     self.assert_(('Fixed Types of variables' in Boost.examplesFixedLog) and (Boost.examplesFixedLog['Fixed Types of variables']==27), "No report of fixing in classifier class")
     self.assert_(('Vars needing type fix' in Boost.examplesFixedLog) and (Boost.examplesFixedLog['Vars needing type fix']['[Br]([C])']=="EnumVariable to FloatVariable", "No report of fixing in classifier class"))
Esempio n. 42
0
 def test_PredictionWithDiffVarType(self):
     """Test prediction with diff. VarType
     Test the prediction of examples with different varType
     """
     expectedAcc = 0.66666700000000001 # Ver 0.3 
     # Create a ann model
     CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0)
     ann = CvANNlearner(self.noBadDataTrain)
     #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc2 = evalUtilities.getClassificationAccuracy(self.noBadDataTest[3:],ann)
     Acc1 = evalUtilities.getClassificationAccuracy(self.badVarTypeData[3:],ann)
     self.assertEqual(round(Acc1,6),round(expectedAcc,6))
     self.assertEqual(round(Acc2,6),round(expectedAcc,6))  
     self.assert_(('Fixed Types of variables' in ann.examplesFixedLog) and (ann.examplesFixedLog['Fixed Types of variables']==27), "No report of fixing in classifier class")
     self.assert_(('Vars needing type fix' in ann.examplesFixedLog) and (ann.examplesFixedLog['Vars needing type fix']['[Br]([C])']=="EnumVariable to FloatVariable", "No report of fixing in classifier class"))
Esempio n. 43
0
 def test_PredictionWithDiffVarType(self):
     """Test prediction with diff. VarType
     Test the prediction of examples with different varType
     """
     expectedAcc = 0.66666700000000001 # Ver 0.3 
     # Create a ann model
     CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0)
     ann = CvANNlearner(self.noBadDataTrain)
     #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc2 = evalUtilities.getClassificationAccuracy(self.noBadDataTest[3:],ann)
     Acc1 = evalUtilities.getClassificationAccuracy(self.badVarTypeData[3:],ann)
     self.assertEqual(round(Acc1,6),round(expectedAcc,6))
     self.assertEqual(round(Acc2,6),round(expectedAcc,6))  
     self.assert_(('Fixed Types of variables' in ann.examplesFixedLog) and (ann.examplesFixedLog['Fixed Types of variables']==27), "No report of fixing in classifier class")
     self.assert_(('Vars needing type fix' in ann.examplesFixedLog) and (ann.examplesFixedLog['Vars needing type fix']['[Br]([C])']=="EnumVariable to FloatVariable", "No report of fixing in classifier class"))
Esempio n. 44
0
    def test_SVMD(self):

        # Train a svm
        svm = AZorngCvSVM.CvSVMLearner(
            self.inDataD, scaleData=False, gamma=4, C=1, nu=0.5, p=0.1, eps=0.001, coef0=0, degree=3
        )
        trainedAcc = evalUtilities.getClassificationAccuracy(self.inDataD, svm)

        self.assertEqual(round(trainedAcc, 7), round(0.986666666667, 7))
        # Save model
        rc = svm.write(self.modelPath)
        self.assertEqual(rc, True)
        # Load the saved model
        loadedsvm = AZorngCvSVM.CvSVMread(self.modelPath)
        loadedAcc = evalUtilities.getClassificationAccuracy(self.inDataD, loadedsvm)
        # Assure equal accuracy
        self.assertEqual(trainedAcc, loadedAcc)

        svmLearner = AZorngCvSVM.CvSVMLearner(scaleData=False)

        svmLearner.name = "CvSVMLearner"
        svmLearner.eps = 0.001
        svmLearner.p = 0.0
        svmLearner.nu = 0.6
        svmLearner.kernel_type = 2
        svmLearner.svm_type = 101
        svmLearner.gamma = 0.0033
        svmLearner.C = 47
        svmLearner.scaleData = True
        svmLearner.scaleClass = False

        Res = orngTest.crossValidation(
            [svmLearner], self.inDataD, folds=5, strat=orange.MakeRandomIndices.StratifiedIfPossible
        )
        CA = evalUtilities.CA(Res)[0]
        self.assertEqual(round(CA, 2), round(0.96666666666666667, 2))  # Before in AZSVM: 0.95999999999999996

        newSVM = svmLearner(self.inDataD)
        trainedAcc = evalUtilities.getClassificationAccuracy(self.inDataD, newSVM)
        # Save model
        rc = newSVM.write(self.modelPath)
        self.assertEqual(rc, True)
        # Load the saved model
        loadedsvm = AZorngCvSVM.CvSVMread(self.modelPath)
        loadedAcc = evalUtilities.getClassificationAccuracy(self.inDataD, loadedsvm)
        # Assure equal accuracy
        self.assertEqual(round(trainedAcc, 7), round(0.96666669999999999, 7))  # Before in AZSVM: 0.953333300000
        self.assertEqual(round(trainedAcc, 1), round(loadedAcc, 1))
Esempio n. 45
0
    def test_Priors(self):
        """Test to assure that priors are set correcly."""

        # Create a RF model
        RFlearner = AZorngRF.RFLearner(
            NumThreads=1,
            maxDepth="20",
            minSample="5",
            useSurrogates="false",
            getVarVariance="false",
            nActVars="0",
            nTrees="100",
            forestAcc="0.1",
            termCrit="0",
            priors={"Iris-versicolor": 0.35, "Iris-virginica": 0.13, "Iris-setosa": 0.52},
        )
        RFmodel = RFlearner(self.irisData)

        # Calculate classification accuracy
        Acc = evalUtilities.getClassificationAccuracy(self.irisData, RFmodel)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdirTest" + str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir, "modelPriors.RF")
        RFmodel.write(modelPath)

        # Read in the model
        newRFmodel = AZorngRF.RFread(modelPath)

        # Calculate classification accuracy
        savedAcc = evalUtilities.getClassificationAccuracy(self.irisData, newRFmodel)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)

        # Check the priors saved in the model
        file = open(os.path.join(modelPath, "model.rf"), "r")
        lines = file.readlines()
        file.close()
        priors = [round(x, 2) for x in eval((lines[22].strip() + lines[23].strip()).replace("data:", ""))]
        self.assertEqual(len(priors), 3)
        self.assertEqual(priors[self.irisData.domain.classVar.values.index("Iris-setosa")], 0.52)
        self.assertEqual(priors[self.irisData.domain.classVar.values.index("Iris-versicolor")], 0.35)
        self.assertEqual(priors[self.irisData.domain.classVar.values.index("Iris-virginica")], 0.13)

        # Remove the scratch directory
        os.system("/bin/rm -rf " + scratchdir)
Esempio n. 46
0
    def test_PredictionWithIncompatibleDomain(self):
        """Test prediction with uncompatible domain
        Test the non-prediction of examples with an incompatible domain  
        """
        expectedAcc1 = 0.7  #Ver 0.3
        # Create a svm model
        svm = AZorngCvSVM.CvSVMLearner(self.noBadDataTrain)
        #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
        Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest, svm)

        self.assertEqual(round(Acc1, 9), round(expectedAcc1, 9),
                         "The Accuracy is not the expected. Got: " + str(Acc1))
        self.assertEqual(svm(self.badVarTypeData[0]), 'NEG',
                         "This example could still be predicted. Got: " +
                         str(svm(self.badVarTypeData[0])))  #Ver 0.3
        self.assertEqual(
            svm(self.badVarTypeData[1]), 'NEG',
            "This example could still be predicted. Got: " +
            str(svm(self.badVarTypeData[1])))
        self.assertEqual(
            svm(self.badVarNameData[0]), None,
            "This example should NOT be predicted. Got: " +
            str(svm(self.badVarNameData[0])))
        self.assertEqual(
            svm(self.badVarCountData[0]), None,
            "This example should NOT be predicted. Got: " +
            str(svm(self.badVarCountData[0])))
Esempio n. 47
0
 def test_PredictionWithIncompatibleDomain(self):
     """Test prediction with uncompatible domain
     Test the non-prediction of examples with an incompatible domain  
     """
     expectedAcc1 = 0.96666666700000003  # Ver 0.3
     # Create a rf model
     RFlearner = AZorngRF.RFLearner(
         NumThreads=1,
         maxDepth="20",
         minSample="5",
         useSurrogates="false",
         getVarVariance="false",
         nActVars="0",
         nTrees="100",
         forestAcc="0.1",
         termCrit="0",
     )
     rf = RFlearner(self.noBadDataTrain)
     # using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest, rf)
     self.assertEqual(round(Acc1, 9), round(expectedAcc1, 9))
     self.assertEqual(rf(self.badVarTypeData[0]), "NEG", "This example could still be predicted")
     self.assertEqual(rf(self.badVarTypeData[1]), "NEG", "This example could still be predicted")
     self.assertEqual(rf(self.badVarNameData[0]), None, "This example should NOT be predicted")
     self.assertEqual(rf(self.badVarCountData[0]), None, "This example should NOT be predicted")
Esempio n. 48
0
    def test_ImputeTrain(self):
        """
        Assure that imputation works for the rf models. Test on data with missing values
        This test just assures the the model is trained. The correct imputation test is made on testImpute
        """
        expected_Acc = [
            0.95757999999999999,
            0.95455000000000001,
        ]  # Ver 0.3 - Artifact: The second value can be expected on other Systems
        rfLearner = AZorngRF.RFLearner(
            NumThreads=1,
            maxDepth="20",
            minSample="5",
            useSurrogates="false",
            getVarVariance="false",
            nActVars="0",
            nTrees="100",
            forestAcc="0.001",
            termCrit="0",
        )

        rf = rfLearner(self.missingTrain)

        Acc = evalUtilities.getClassificationAccuracy(self.missingTest, rf)

        self.assert_(round(Acc, 5) in [round(x, 5) for x in expected_Acc])  # Ver 0.3
Esempio n. 49
0
    def test_PredictionWithDiffVarOrder(self):
        """Test Prediction with diff. VarOrder
        Test the prediction  examples with different varOrder
        """
        expectedAcc = [0.33333333300000001, 0.666666667]
        # Create a Bayes model
        CvBayeslearner = AZorngCvBayes.CvBayesLearner()
        Bayes = CvBayeslearner(self.noBadDataTrain)
        #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
        Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest,
                                                       Bayes)
        Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData,
                                                       Bayes)

        self.assertEqual(Acc1, Acc2)
        self.assert_(round(Acc1, 5) in [round(x, 5) for x in expectedAcc])
Esempio n. 50
0
    def test_MetaDataHandleForSavingModel(self):
        """Test the handling of SaveModel for Data with Meta Atributes
        """

        #Test the save of a model created from a train data with meta attributes
        self.assert_(
            len(self.WMetaTest.domain.getmetas()) >= 1,
            "The dataset WMetaTest should have Meta Attributes")
        svmM = AZorngCvSVM.CvSVMLearner(self.WMetaTest)
        AccNoMetaBefore = evalUtilities.getClassificationAccuracy(
            self.NoMetaTrain, svmM)
        AccWMetaBefore = evalUtilities.getClassificationAccuracy(
            self.WMetaTest, svmM)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR,
                                  "scratchdirSVMtest" + str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir, "CvSVMModel")
        svmM.write(modelPath)

        # Read in the model
        svmR = AZorngCvSVM.CvSVMread(modelPath)
        self.assert_(
            len(svmR.imputer.defaults.domain.getmetas()) == 0,
            "There shouldn't be any Meta data now!")

        # Calculate classification accuracy
        AccNoMetaAfter = evalUtilities.getClassificationAccuracy(
            self.NoMetaTrain, svmR)
        AccWMetaAfter = evalUtilities.getClassificationAccuracy(
            self.WMetaTest, svmR)

        # Test that the accuracy of the model before and after saved
        self.assertEqual(
            AccNoMetaBefore, AccNoMetaAfter,
            "NoMeta: Predictions after loading saved model were different")
        self.assertEqual(
            AccWMetaBefore, AccWMetaAfter,
            "WMeta: Predictions after loading saved model were different")
        self.assertEqual(round(AccWMetaAfter, 9), round(0.7, 9),
                         "Accuracy was not the expected value!")
        self.assertEqual(round(AccNoMetaAfter, 9), round(0.6, 9),
                         "Accuracy was not the expected value!")

        # Remove the scratch directory
        os.system("/bin/rm -rf " + scratchdir)
Esempio n. 51
0
    def test_PredictionWithDiffVarOrder(self):
        """Test Prediction with diff. VarOrder
        Test the prediction  examples with different varOrder
        """
        expectedAcc = 0.7  # 0.59999999999999998 #0.7 # Ver 0.3
        # Create a svm model
        svm = AZorngCvSVM.CvSVMLearner(self.noBadDataTrain)
        #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
        Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest, svm)
        Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData,
                                                       svm)

        self.assertEqual(round(Acc1, 9), round(expectedAcc, 9),
                         "The Accuracy is not the expected. Got: " +
                         str(Acc1))  #Ver 0.3
        self.assertEqual(round(Acc2, 9), round(expectedAcc, 9),
                         "The Accuracy is not the expected. Got: " + str(Acc2))
Esempio n. 52
0
    def test_MetaDataHandle(self):
        """Test the handling of Data with Meta Atributes
        """
        # Create an svm model

        svm = AZorngCvSVM.CvSVMLearner(self.NoMetaTrain)

        # Calculate classification accuracy (NoMetaTest and WMeta are the same appart from the meta atribute)
        AccNoMeta = evalUtilities.getClassificationAccuracy(
            self.NoMetaTest, svm)
        AccWMeta = evalUtilities.getClassificationAccuracy(self.WMetaTest, svm)

        self.assertEqual(
            AccNoMeta, AccWMeta,
            "Predictions with and without meta data were different!")
        self.assertEqual(round(AccNoMeta, 9), round(
            0.7, 9), "Accuracy was not the expected value! Got: ")  #Ver 0.3
Esempio n. 53
0
    def testMetaDataHandle(self):
        """Test the handling of Data with Meta Atributes
        """
        # Create an pls model

        pls = AZorngPLS.PLSLearner(self.NoMetaTrain)

        # Calculate classification accuracy (NoMetaTest and WMeta are the same appart from the meta atribute)
        AccNoMeta = evalUtilities.getClassificationAccuracy(
            self.NoMetaTest, pls)
        AccWMeta = evalUtilities.getClassificationAccuracy(self.WMetaTest, pls)

        self.assertEqual(
            AccNoMeta, AccWMeta,
            "Predictions with and without meta data were different!")
        self.assertEqual(
            round(AccNoMeta, 9), round(0.666666666667, 9),
            "Accuracy was not the expected value! Got: " + str(AccNoMeta))
Esempio n. 54
0
 def test_PredictionWithDiffVarOrder(self):
     """Test Prediction with diff. VarOrder
     Test the prediction  examples with different varOrder
     """
     expectedAccValues = [
         0.96666666700000003,  # Ver 0.3
         1.0
     ]
     # Create a rf model
     RFlearner = AZorngRF.RFLearner(NumThreads = 1, maxDepth = "20", minSample = "5", useSurrogates = "false", getVarVariance = "false", \
                                     nActVars = "0", nTrees = "100", forestAcc = "0.1", termCrit = "0")
     rf = RFlearner(self.noBadDataTrain)
     #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible
     Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest, rf)
     Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData,
                                                    rf)
     self.assertRoundedToExpectedArray(Acc1, expectedAccValues, 9)
     self.assertRoundedToExpectedArray(Acc2, expectedAccValues, 9)
Esempio n. 55
0
 def test_PersistentClassAcc(self):
     """
     Assure that the accuracy is perserved for models trained in the same way. 
     """
     # One step Boost creation
     Boost = AZorngCvBoost.CvBoostLearner(self.train_data)
     # Calculate classification accuracy for the classifier trained in one step
     oneStepAcc = evalUtilities.getClassificationAccuracy(self.test_data, Boost)
     # Check that the accuracy is what it used to be
     self.assertEqual(round(0.97143000000000002,5),round(oneStepAcc,5)) 
Esempio n. 56
0
    def test_SavedModel(self):
        """Test to assure that a saved RF model gives the same predictions as before saving."""

        # Create a RF model
        RFlearner = AZorngRF.RFLearner(maxDepth = "20", minSample = "5", useSurrogates = "false", getVarVariance = "false", \
                                        nActVars = "0", nTrees = "100", forestAcc = "0.1", termCrit = "0")
        RFmodel = RFlearner(self.trainData)

        # Calculate classification accuracy
        Acc = evalUtilities.getClassificationAccuracy(self.testData, RFmodel)

        # Save the model
        scratchdir = os.path.join(AZOC.SCRATCHDIR,
                                  "scratchdirTest" + str(time.time()))
        os.mkdir(scratchdir)
        modelPath = os.path.join(scratchdir, "model.RF")
        RFmodel.write(modelPath)

        # Read in the model
        newRFmodel = AZorngRF.RFread(modelPath)

        # Calculate classification accuracy
        savedAcc = evalUtilities.getClassificationAccuracy(
            self.testData, newRFmodel)

        # Test that the accuracy of the two classifiers is the exact same
        self.assertEqual(Acc, savedAcc)

        #Check the priors saved in the model
        file = open(os.path.join(modelPath, "model.rf"), "r")
        lines = file.readlines()
        file.close()
        priors = [
            round(x, 2) for x in eval((lines[22].strip()).replace("data:", ""))
        ]
        self.assertEqual(len(priors), 2)
        self.assertEqual(
            priors[self.testData.domain.classVar.values.index("POS")], 0.50)
        self.assertEqual(
            priors[self.testData.domain.classVar.values.index("NEG")], 0.50)

        # Remove the scratch directory
        os.system("/bin/rm -rf " + scratchdir)
Esempio n. 57
0
 def test_PersistentClassAcc(self):
     """
     Assure that the accuracy is perserved for models trained in the same way. 
     """
     # One step ann creation
     ann = AZorngCvANN.CvANNLearner(self.train_data,nHidden = [3],stopUPs=0)
     # Calculate classification accuracy for the classifier trained in one step
     oneStepAcc = evalUtilities.getClassificationAccuracy(self.test_data, ann)
     # Check that the accuracy is what it used to be
     self.assertEqual(round(0.92381000000000002,5),round(oneStepAcc,5)) #orange1.0  0.95555999999999996,5
Esempio n. 58
0
    def test_ImputeTrain(self):
        """
        Assure that imputation works for the svm models. Test on data with missing values
        This test just assures the the model is trained. The correct imputation test is made on testImpute
        """

        svm = AZorngCvSVM.CvSVMLearner(self.missingTrain)

        Acc = evalUtilities.getClassificationAccuracy(self.missingTest, svm)
        self.assertEqual(round(0.59999999999999998, 5), round(Acc,
                                                              5))  # Ver 0.3