def test_Priors(self): """Test to assure that priors are set correcly.""" # Create a CvANN model CvANNlearner = AZorngCvANN.CvANNLearner(stopUPs = 0, priors = {"Iris-versicolor":0.35, "Iris-virginica":0.13, "Iris-setosa":0.52}) CvANNmodel = CvANNlearner(self.irisData) #Model with No Priors CvANNlearnerNoP = AZorngCvANN.CvANNLearner(stopUPs=0) CvANNmodelNoP = CvANNlearnerNoP(self.irisData) # Calculate classification accuracy Acc = evalUtilities.getClassificationAccuracy(self.irisData, CvANNmodel) # Save the model scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdirTest"+str(time.time())) os.mkdir(scratchdir) modelPath = os.path.join(scratchdir,"modelPriors.CvANN") CvANNmodel.write(modelPath) # Read in the model newCvANNmodel = AZorngCvANN.CvANNread(modelPath) # Calculate classification accuracy savedAcc = evalUtilities.getClassificationAccuracy(self.irisData, CvANNmodel) NoPAcc = evalUtilities.getClassificationAccuracy(self.irisData, CvANNmodelNoP) # Test that the accuracy of the two classifiers is the exact same self.assertEqual(Acc, savedAcc) self.assert_(Acc != NoPAcc) # Remove the scratch directory os.system("/bin/rm -rf "+scratchdir)
def test_CreateDefaultClassifierUsingTrainingData(self): """ Test the creation of default Classifier by calling learner with training data. """ # Arrange learners = [ AZorngCvSVM.CvSVMLearner(), AZorngCvANN.CvANNLearner(), AZorngRF.RFLearner() ] trainingData = self.getRegressionTrainingData() learner = AZorngConsensus.ConsensusLearner(learners=learners) # Act classifier = learner(trainingData) # Assert self.assertNotEqual(classifier, None) self.assertEqual(len(classifier.classifiers), len(learners)) self.assertEqual(classifier.expression, None) self.assertEqual(classifier.name, "Consensus classifier") self.assertEqual(classifier.verbose, 0) self.assertNotEqual(classifier.imputeData, None) self.assertEqual(classifier.NTrainEx, len(trainingData)) self.assertNotEqual(classifier.basicStat, None) self.assertEqual(classifier.weights, None)
def test_CreateModelWithLearnerDictionary(self): """ Test the creation of Consensus Model using dictionary of learners """ # Arrange learners = { 'a': AZorngCvSVM.CvSVMLearner(), 'b': AZorngCvANN.CvANNLearner(), 'c': AZorngRF.RFLearner() } expression = "a + b + c" # Act learner = AZorngConsensus.ConsensusLearner(learners=learners, expression=expression) # Assert for k, v in learner.learners.items(): self.assertEqual(learner.learners[k], learners[k]) self.assertEqual(learner.expression, expression) self.assertEqual(learner.name, "Consensus learner") self.assertEqual(learner.verbose, 0) self.assertEqual(learner.imputeData, None) self.assertEqual(learner.NTrainEx, 0) self.assertEqual(learner.basicStat, None) self.assertEqual(learner.weights, None)
def test_SavedModel(self): """Test to assure that a saved ann model gives the same predictions as before saving.""" # Create an ann model ann = AZorngCvANN.CvANNLearner(self.train_data,stopUPs=0) # Calculate classification accuracy Acc = evalUtilities.getClassificationAccuracy(self.test_data, ann) # Save the model scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdir"+str(time.time())) os.mkdir(scratchdir) modelPath = os.path.join(scratchdir,"ann.cvann") ann.write(modelPath) # Read in the model ann = AZorngCvANN.CvANNread(modelPath) # Calculate classification accuracy savedAcc = evalUtilities.getClassificationAccuracy(self.test_data, ann) # Test that the accuracy of the two classifiers is the exact same self.assertEqual(Acc, savedAcc) # Remove the scratch directory os.system("/bin/rm -rf "+scratchdir)
def test_MetaDataHandleForSavingModel(self): """Test the handling of SaveModel for Data with Meta Atributes """ expectedAccWMeta = 1.0 # Ver 0.3 expectedAccNoMeta = 0.63333333300000005 # Ver 0.3 #Test the save of a model created from a train data with meta attributes self.assert_(len(self.WMetaTest.domain.getmetas())>=1,"The dataset WMetaTest should have Meta Attributes") CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0) annM = CvANNlearner(self.WMetaTest) AccNoMetaBefore = evalUtilities.getClassificationAccuracy(self.NoMetaTrain,annM) AccWMetaBefore = evalUtilities.getClassificationAccuracy(self.WMetaTest,annM) # Save the model scratchdir = os.path.join(AZOC.SCRATCHDIR, "scratchdiriTest"+str(time.time())) os.mkdir(scratchdir) modelPath = os.path.join(scratchdir,"CvANNModel.CvANN") annM.write(modelPath) # Read in the model annR = AZorngCvANN.CvANNread(modelPath) self.assert_(len(annR.imputer.defaults.domain.getmetas())==0,"There shouldn't be any Meta data now!") # Calculate classification accuracy AccNoMetaAfter = evalUtilities.getClassificationAccuracy(self.NoMetaTrain, annR) AccWMetaAfter = evalUtilities.getClassificationAccuracy(self.WMetaTest, annR) # Test that the accuracy of the model before and after saved self.assertEqual(AccNoMetaBefore, AccNoMetaAfter,"NoMeta: Predictions after loading saved model were different") self.assertEqual(AccWMetaBefore, AccWMetaAfter, "WMeta: Predictions after loading saved model were different") self.assertEqual(round(AccWMetaAfter,9), round(expectedAccWMeta,9)) self.assertEqual(round(AccNoMetaAfter,9), round(expectedAccNoMeta,9)) # Remove the scratch directory os.system("/bin/rm -rf "+scratchdir)
def test_CreateDefaultClassifierUsingPreTrainedRegressionClassifiers(self): """ Test the creation of custom Consensus Classifier using pre-trained regression classifiers. """ # Arrange learners = { 'a': AZorngCvSVM.CvSVMLearner(), 'b': AZorngCvANN.CvANNLearner(), 'c': AZorngRF.RFLearner() } classifiers = {} for k, v in learners.items(): classifiers[k] = v(self.getRegressionTrainingData()) expression = "a + b + c" # Act classifier = AZorngConsensus.ConsensusClassifier( classifiers=classifiers, expression=expression) # Assert self.assertNotEqual(classifier, None) self.assertEqual(len(classifier.classifiers), len(learners)) self.assertNotEqual(classifier.basicStat, None) self.assertNotEqual(classifier.classVar, None) self.assertNotEqual(classifier.domain, None) self.assertEqual(classifier.expression, expression) self.assertNotEqual(classifier.imputeData, None) #self.assertEqual(classifier.NTrainEx, len(trainingData)) self.assertEqual(classifier.name, "Consensus classifier") self.assertNotEqual(classifier.varNames, None) self.assertEqual(classifier.verbose, 0) self.assertEqual(classifier.weights, None)
def test_CreateDefaultClassifierUsingPreTrainedRegressionClassifiers(self): """ Test the creation of default Consensus Classifier using pre-trained classification classifiers. """ # Arrange learners = [ AZorngCvSVM.CvSVMLearner(), AZorngCvANN.CvANNLearner(), AZorngRF.RFLearner() ] classifiers = [l(self.getRegressionTrainingData()) for l in learners] # Act classifier = AZorngConsensus.ConsensusClassifier( classifiers=classifiers) # Assert self.assertNotEqual(classifier, None) self.assertEqual(len(classifier.classifiers), len(learners)) self.assertNotEqual(classifier.basicStat, None) self.assertNotEqual(classifier.classVar, None) self.assertNotEqual(classifier.domain, None) self.assertEqual(classifier.expression, None) self.assertNotEqual(classifier.imputeData, None) #self.assertEqual(classifier.NTrainEx, len(trainingData)) self.assertEqual(classifier.name, "Consensus classifier") self.assertNotEqual(classifier.varNames, None) self.assertEqual(classifier.verbose, 0) self.assertEqual(classifier.weights, None)
def test_CanPersistClassificationModelProbabilities(self): """Test the save/load for a classification model - Using probabilities average""" # Arrange learners = [AZorngRF.RFLearner(), AZorngCvANN.CvANNLearner()] learner = AZorngConsensus.ConsensusLearner(learners=learners) classifier = learner(self.irisData) # Act predictions = [] for ex in self.irisData: predictions.append(classifier(ex)) scratchdir = miscUtilities.createScratchDir( desc="ConsensusSaveLoadTest") print scratchdir classifier.write(os.path.join(scratchdir, "./CM.model")) # Assert predictionsL = [] Loaded = AZorngConsensus.Consensusread( os.path.join(scratchdir, "./CM.model")) for ex in self.irisData: predictionsL.append(Loaded(ex)) self.assertEqual(predictions, predictionsL) self.assertEqual(len(Loaded.domain), len(self.irisData.domain)) self.assertEqual(len(Loaded.imputeData), len(Loaded.domain)) self.assertEqual(len(Loaded.basicStat), len(Loaded.domain)) self.assertEqual(Loaded.NTrainEx, len(self.irisData)) miscUtilities.removeDir(scratchdir)
def test_CreateLogicalExpressionConsensusLearner(self): """ Test creation of logical expression consensus learner """ # Arrange # Construct expression learner/classifier learners = { 'firstLearner': AZorngCvSVM.CvSVMLearner(), 'secondLearner': AZorngCvANN.CvANNLearner(), 'thirdLearner': AZorngRF.RFLearner() } discreteExpression = [ "firstLearner == Iris-setosa -> Iris-setosa", "-> Iris-virginica" ] discreteLearner = AZorngConsensus.ConsensusLearner( learners=learners, expression=discreteExpression) discreteClassifier = discreteLearner(self.irisData) verifiedLearner = AZorngCvSVM.CvSVMLearner() verifiedClassifier = verifiedLearner(self.irisData) # Act result = [] verifiedResult = [] for ex in self.irisData: result.append(discreteClassifier(ex)) verifiedResult.append(verifiedClassifier(ex)) # Assert for index, item in enumerate(result): if not result[index].value == verifiedResult[index].value: print "Not equal on index: ", index self.assertEqual(result[index].value, verifiedResult[index].value)
def test_DFV(self): """ Test the Decision Function Value Return""" CvANN = AZorngCvANN.CvANNLearner(self.LdataTrain,stopUPs=0) #Testsing with return of DFV RDFV = True for ex in self.LdataTest: predictedClass = CvANN(ex) a = CvANN(ex,returnDFV = RDFV) b = CvANN(ex,resultType = orange.GetProbabilities,returnDFV = RDFV) c = CvANN(ex,resultType = orange.GetBoth,returnDFV = RDFV) #All must return tuples self.assert_(type(a)==type(b)==type(c)==tuple) # Second element of the tupple must be the DFV self.assert_(type(a[1])==type(b[1])==type(c[1])==float) self.assert_(a[1]==b[1]==c[1]) # check if if the class can be always predicted based on the DFV # Positive values will correspond always to the fisrt element of the class variable # and negative values to the second element of the class variabel if a[1] > 0: guessedClass = ex.domain.classVar[0] else: guessedClass = ex.domain.classVar[1] self.assertEqual(predictedClass,guessedClass) #asking for GetValue self.assert_(type(a[0])==orange.Value) #asking for GetProbabilities self.assert_(type(b[0])==orange.DiscDistribution) #asking for GetBoth... self.assert_(type(c[0])==tuple) # ... where first element is the orange value... self.assert_(type(c[0][0])==orange.Value) # ... and second element is the distribution (so called probabilities) self.assert_(type(c[0][1])==orange.DiscDistribution) # CvANN does always return real probabilities on binary classification self.assertEqual(CvANN.isRealProb(),True) expectedExtremes = {'max': 0.5, 'min':-0.5 } self.assertEqual([round(x,5) for x in CvANN.getDFVExtremes().values()],[round(x,5) for x in expectedExtremes.values()]) self.assertEqual(CvANN.nPredictions,4*len(self.LdataTest)) #Testsing without return of DFV RDFV = False for ex in self.LdataTest: a = CvANN(ex,returnDFV = RDFV) b = CvANN(ex,resultType = orange.GetProbabilities,returnDFV = RDFV) c = CvANN(ex,resultType = orange.GetBoth,returnDFV = RDFV) #asking for GetValue self.assert_(type(a)==orange.Value) #asking for GetProbabilities self.assert_(type(b)==orange.DiscDistribution) #asking for GetBoth... self.assert_(type(c)==tuple) # ... where first element is the orange value... self.assert_(type(c[0])==orange.Value) # ... and second element is the distribution (so called probabilities) self.assert_(type(c[1])==orange.DiscDistribution) # CvANN does always return real probabilities on binary classification self.assertEqual(CvANN.isRealProb(),True) self.assertEqual([round(x,5) for x in CvANN.getDFVExtremes().values()],[round(x,5) for x in expectedExtremes.values()]) self.assertEqual(CvANN.nPredictions,(3+4)*len(self.LdataTest))
def test_CreateCustomClassificationClassifierUsingTrainingData(self): """ Test the creation of custom classification Classifier by calling learner with training data. """ # Arrange learners = { 'a': AZorngCvSVM.CvSVMLearner(), 'b': AZorngCvANN.CvANNLearner(), 'c': AZorngRF.RFLearner() } expression = [ "firstLearner == Iris-setosa -> Iris-setosa", "-> Iris-virginica" ] trainingData = self.getClassificationTrainingData() learner = AZorngConsensus.ConsensusLearner(learners=learners, expression=expression) # Act classifier = learner(trainingData) # Assert self.assertNotEqual(classifier, None) self.assertEqual(len(classifier.classifiers), len(learners)) self.assertNotEqual(classifier.basicStat, None) self.assertNotEqual(classifier.classVar, None) self.assertNotEqual(classifier.domain, None) self.assertEqual(classifier.expression, expression) self.assertNotEqual(classifier.imputeData, None) self.assertEqual(classifier.NTrainEx, len(trainingData)) self.assertEqual(classifier.name, "Consensus classifier") self.assertNotEqual(classifier.varNames, None) self.assertEqual(classifier.verbose, 0) self.assertEqual(classifier.weights, None)
def test_CustomLogicalExpressionUsingOrAndStatement(self): """ Test logical expression using OR/AND statements """ # Arrange # Construct verification learners a = AZorngCvSVM.CvSVMLearner() a = a(self.irisData) b = AZorngCvANN.CvANNLearner() b = b(self.irisData) c = AZorngRF.RFLearner() c = c(self.irisData) # Construct expression learner/classifier learners = { 'a': AZorngCvSVM.CvSVMLearner(), 'b': AZorngCvANN.CvANNLearner(), 'c': AZorngRF.RFLearner() } discreteExpression = [ "a == Iris-setosa and c == Iris-virginica or b == Iris-setosa -> Iris-setosa", "-> Iris-virginica" ] discreteLearner = AZorngConsensus.ConsensusLearner( learners=learners, expression=discreteExpression) discreteClassifier = discreteLearner(self.irisData) # Act result = [] for ex in self.irisData: result.append(discreteClassifier(ex)) verifiedResult = [] for ex in self.irisData: if a(ex).value == "Iris-setosa" and c( ex).value == "Iris-virginica" or b( ex).value == "Iris-setosa": verifiedResult.append("Iris-setosa") else: verifiedResult.append("Iris-virginica") # Assert for index, item in enumerate(result): if not result[index].value == verifiedResult[index]: print "Not equal on index: ", index, " Predicted: ", result[ index].value, " Real: ", verifiedResult[index] self.assertEqual(result[index].value, verifiedResult[index])
def test_PersistentClassAcc(self): """ Assure that the accuracy is perserved for models trained in the same way. """ # One step ann creation ann = AZorngCvANN.CvANNLearner(self.train_data,nHidden = [3],stopUPs=0) # Calculate classification accuracy for the classifier trained in one step oneStepAcc = evalUtilities.getClassificationAccuracy(self.test_data, ann) # Check that the accuracy is what it used to be self.assertEqual(round(0.92381000000000002,5),round(oneStepAcc,5)) #orange1.0 0.95555999999999996,5
def test_TwoWays(self): """ Test that an ann created in one or two steps give the same results """ # One step ann creation ann = AZorngCvANN.CvANNLearner(self.train_data,stopUPs=0) # Calculate classification accuracy for the classifier trained in one step oneStepAcc = evalUtilities.getClassificationAccuracy(self.test_data, ann) # Two step ann creation learner = AZorngCvANN.CvANNLearner(randomWeights = False ,stopUPs=0) ann = learner(self.train_data) # Calculate classification accuracy for the classifier trained in two steps twoStepAcc = evalUtilities.getClassificationAccuracy(self.test_data, ann) # Test that the accuracy of the classifiers created in different ways is the exact same self.assertEqual(oneStepAcc, twoStepAcc)
def test_ImputeTrain(self): """ Assure that imputation works for the ann models. Test on data with missing values This test just assures the the model is trained. The correct imputation test is made on testImpute """ annLearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0) ann = annLearner(self.missingTrain) Acc = evalUtilities.getClassificationAccuracy(self.missingTest, ann) self.assertEqual(round(0.75758000000000003,5),round(Acc,5)) #opencv1.1: 0.95191999999999999
def test_AverageNRegressionExpressionUsingObjMap(self): """ Test regular expression using average N regression with object map """ # Arrange learners = { 'firstLearner': AZorngCvSVM.CvSVMLearner(), 'secondLearner': AZorngCvANN.CvANNLearner(), 'thirdLearner': AZorngRF.RFLearner() } # Construct expression learner/classifier regressionExpression = "(firstLearner + secondLearner + thirdLearner) / 3" expressionLearner = AZorngConsensus.ConsensusLearner( learners=learners, expression=regressionExpression) expressionClassifier = expressionLearner(self.DataReg) # Construct default learner/classifier defaultLearners = [ AZorngRF.RFLearner(), AZorngCvANN.CvANNLearner(), AZorngCvSVM.CvSVMLearner() ] defaultLearner = AZorngConsensus.ConsensusLearner( learners=defaultLearners) defaultClassifier = defaultLearner(self.DataReg) # Act expressionPredictions = [] for ex in self.DataReg: expressionPredictions.append(expressionClassifier(ex)) defaultPredictions = [] for ex in self.DataReg: defaultPredictions.append(defaultClassifier(ex)) # Assert for index in range(len(expressionPredictions)): self.assertEqual( True, float_compare(expressionPredictions[index], defaultPredictions[index]))
def test_MetaDataHandle(self): """Test the handling of Data with Meta Atributes """ expectedAcc = 0.69999999999999996 # Ver 0.3 # Create an ann model CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0) ann = CvANNlearner(self.NoMetaTrain) # Calculate classification accuracy (NoMetaTest and WMeta are the same appart from the meta atribute) AccNoMeta = evalUtilities.getClassificationAccuracy(self.NoMetaTest, ann) AccWMeta = evalUtilities.getClassificationAccuracy(self.WMetaTest, ann) self.assertEqual(AccNoMeta,AccWMeta,"Predictions with and without meta data were different!") self.assertEqual(round(AccNoMeta,9), round(expectedAcc,9))
def test_CreateLearnerWithObjectMapping(self): """ Test the creation of learners with an object map """ # Arrange learners = { 'firstLearner': AZorngCvSVM.CvSVMLearner(), 'secondLearner': AZorngCvANN.CvANNLearner(), 'thirdLearner': AZorngRF.RFLearner() } # Act learner = AZorngConsensus.ConsensusLearner(learners=learners) # Assert self.assertEqual(len(learner.learners), len(learners))
def test_PredictionWithDiffVarOrder(self): """Test Prediction with diff. VarOrder Test the prediction examples with different varOrder """ expectedAcc = 0.69999999999999996 # Ver 0.3 # Create a ann model CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0) ann = CvANNlearner(self.noBadDataTrain) #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest,ann) Acc2 = evalUtilities.getClassificationAccuracy(self.badVarOrderData,ann) self.assertEqual(round(Acc1,9),round(expectedAcc,9),) self.assertEqual(round(Acc2,9),round(expectedAcc,9),)
def test_CustomRegressionExpressionUsingWeights(self): """ Test regression expression using weights """ # Arrange learners = { 'a': AZorngCvSVM.CvSVMLearner(), 'b': AZorngCvANN.CvANNLearner(), 'c': AZorngRF.RFLearner() } weights = {'a': lambda x: 1, 'b': lambda x: 2, 'c': lambda x: 3} regressionExpression = "(a + b + c) / 3" expressionLearner = AZorngConsensus.ConsensusLearner( learners=learners, expression=regressionExpression, weights=weights) classifier = expressionLearner(self.DataReg) # Act result = [] for ex in self.DataReg: result.append(classifier(ex)) verifiedResult = [] for ex in self.DataReg: a_value = classifier.classifiers['a'](ex) a_weight_value = weights['a'](a_value) b_value = classifier.classifiers['b'](ex) b_weight_value = weights['b'](b_value) c_value = classifier.classifiers['c'](ex) c_weight_value = weights['c'](c_value) prediction = (a_value * a_weight_value + b_value * b_weight_value + c_value * c_weight_value) / 3 verifiedResult.append(prediction) # Assert for index, item in enumerate(result): if float_compare(result[index].value, verifiedResult[index]) == False: print "Not equal on index: ", index print "Result: ", result[ index].value, " Verified: ", verifiedResult[index] print "Delta: ", abs(result[index].value - verifiedResult[index]) self.assertEqual( float_compare(result[index].value, verifiedResult[index]), True)
def test_PredictionWithDiffVarType(self): """Test prediction with diff. VarType Test the prediction of examples with different varType """ expectedAcc = 0.66666700000000001 # Ver 0.3 # Create a ann model CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0) ann = CvANNlearner(self.noBadDataTrain) #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible Acc2 = evalUtilities.getClassificationAccuracy(self.noBadDataTest[3:],ann) Acc1 = evalUtilities.getClassificationAccuracy(self.badVarTypeData[3:],ann) self.assertEqual(round(Acc1,6),round(expectedAcc,6)) self.assertEqual(round(Acc2,6),round(expectedAcc,6)) self.assert_(('Fixed Types of variables' in ann.examplesFixedLog) and (ann.examplesFixedLog['Fixed Types of variables']==27), "No report of fixing in classifier class") self.assert_(('Vars needing type fix' in ann.examplesFixedLog) and (ann.examplesFixedLog['Vars needing type fix']['[Br]([C])']=="EnumVariable to FloatVariable", "No report of fixing in classifier class"))
def test_PredictionWithIncompatibleDomain(self): """Test prediction with uncompatible domain Test the non-prediction of examples with an incompatible domain """ expectedAcc1 = 0.69999999999999996 # Ver 0.3 # Create a ann model CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0) ann = CvANNlearner(self.noBadDataTrain) #using from index 3 o the end of data, because we know that from 0 to 2 the examples are not compatible Acc1 = evalUtilities.getClassificationAccuracy(self.noBadDataTest,ann) self.assertEqual(round(Acc1,9),round(expectedAcc1,9)) self.assertEqual(ann(self.badVarTypeData[0]),"NEG","This example could still be predicted") self.assertEqual(ann(self.badVarTypeData[1]),"NEG","This example could still be predicted") self.assertEqual(ann(self.badVarNameData[0]),None,"This example should NOT be predicted") self.assertEqual(ann(self.badVarCountData[0]),None,"This example should NOT be predicted")
def test_CreateLearnerWithObjectMappingWithoutExpression(self): """ Test with name variable mapping defined but not expression given """ # Arrange learners = { 'firstLearner': AZorngCvSVM.CvSVMLearner(), 'secondLearner': AZorngCvANN.CvANNLearner(), 'thirdLearner': AZorngRF.RFLearner() } learner = AZorngConsensus.ConsensusLearner(learners=learners) # Act classifier = learner(self.DataReg) # Assert self.assertEqual(classifier, None)
def test_SaveLoadCustomRegressionExpression(self): """ Test save/load custom expression using average N regression with object map """ # Arrange learners = { 'firstLearner': AZorngCvSVM.CvSVMLearner(), 'secondLearner': AZorngCvANN.CvANNLearner(), 'thirdLearner': AZorngRF.RFLearner() } # Construct expression learner/classifier regressionExpression = "(firstLearner + secondLearner + thirdLearner) / 3" expressionLearner = AZorngConsensus.ConsensusLearner( learners=learners, expression=regressionExpression) expressionClassifier = expressionLearner(self.DataReg) # Construct default learner/classifier result = [] for ex in self.DataReg: result.append(expressionClassifier(ex)) # Act scratchdir = miscUtilities.createScratchDir( desc="ConsensusSaveLoadTest") expressionClassifier.write(os.path.join(scratchdir, "./CM.model")) resultLoaded = [] loaded = AZorngConsensus.Consensusread( os.path.join(scratchdir, "./CM.model")) self.assertNotEqual(loaded, None) for ex in self.DataReg: resultLoaded.append(loaded(ex)) # Assert for index, item in enumerate(result): if not float_compare(result[index].value, resultLoaded[index].value): print "Not equal on index: ", index self.assertEqual( float_compare(result[index].value, resultLoaded[index].value), True) self.assertEqual(len(loaded.domain), len(self.DataReg.domain)) self.assertEqual(len(loaded.imputeData), len(loaded.domain)) self.assertEqual(len(loaded.basicStat), len(loaded.domain)) self.assertEqual(loaded.NTrainEx, len(self.DataReg)) miscUtilities.removeDir(scratchdir)
def test_SaveLoadCustomLogicalExpression(self): """ Test save/load functionality with a custom logical expression """ # Arrange # Construct expression learner/classifier learners = { 'firstLearner': AZorngCvSVM.CvSVMLearner(), 'secondLearner': AZorngCvANN.CvANNLearner(), 'thirdLearner': AZorngRF.RFLearner() } discreteExpression = [ "firstLearner == Iris-setosa -> Iris-setosa", "-> Iris-virginica" ] discreteLearner = AZorngConsensus.ConsensusLearner( learners=learners, expression=discreteExpression) discreteClassifier = discreteLearner(self.irisData) result = [] for ex in self.irisData: result.append(discreteClassifier(ex)) # Act scratchdir = miscUtilities.createScratchDir( desc="ConsensusSaveLoadTest") discreteClassifier.write(os.path.join(scratchdir, "./CM.model")) resultLoaded = [] loaded = AZorngConsensus.Consensusread( os.path.join(scratchdir, "./CM.model")) self.assertNotEqual(loaded, None) for ex in self.irisData: resultLoaded.append(loaded(ex)) # Assert for index, item in enumerate(result): if not result[index].value == resultLoaded[index].value: print "Not equal on index: ", index self.assertEqual(result[index].value, resultLoaded[index].value) self.assertEqual(len(loaded.domain), len(self.irisData.domain)) self.assertEqual(len(loaded.imputeData), len(loaded.domain)) self.assertEqual(len(loaded.basicStat), len(loaded.domain)) self.assertEqual(loaded.NTrainEx, len(self.irisData)) miscUtilities.removeDir(scratchdir)
def test_InvalidCustomRegressionExpression(self): """ Test invalid custom expression """ # Arrange learners = { 'a': AZorngCvSVM.CvSVMLearner(), 'b': AZorngCvANN.CvANNLearner(), 'c': AZorngRF.RFLearner() } regressionExpression = "(a + b + 3cd45 + c) / 3" expressionLearner = AZorngConsensus.ConsensusLearner( learners=learners, expression=regressionExpression) # Act classifier = expressionLearner(self.DataReg) # Assert self.assertEqual(classifier(self.DataReg[0]), None)
def test_PersistentRegAcc(self): """ Assure that the accuracy is perserved for models trained in the same way. """ #This data is loaded here to speed up the test suite since it is too big contTestDataPath = os.path.join(AZOC.AZORANGEHOME,"tests/source/data/linearTest.tab") contTrainDataPath = os.path.join(AZOC.AZORANGEHOME,"tests/source/data/linearTrain.tab") contTrain = dataUtilities.DataTable(contTrainDataPath) contTest = dataUtilities.DataTable(contTestDataPath) # Create a CvANN model CvANNlearner = AZorngCvANN.CvANNLearner(randomWeights = False, nHidden = [3], nEpochs = 100,stopUPs=0) CvANNmodel = CvANNlearner(contTrain) # Calculate classification accuracy Acc = evalUtilities.getRMSE(contTest, CvANNmodel) # Check that the accuracy is what it used to be self.assertEqual(round(0.109667,6),round(Acc,6)) #opencv1.1: 0.168131
def test_Probabilities(self): """Test if the returned probabilities are not fake""" CvANN = AZorngCvANN.CvANNLearner(self.LdataTrain,stopUPs=0) res = [] for idx,ex in enumerate(self.LdataTest): res.append(CvANN(ex,resultType = orange.GetProbabilities)) #print res[-1] self.assert_(res[-1][0]>=0 and res[-1][0]<=1,"Example "+str(idx)+" have impossible probability:"+str(res[-1])) self.assert_(res[-1][1]>=0 and res[-1][1]<=1,"Example "+str(idx)+" have impossible probability:"+str(res[-1])) self.assertEqual(CvANN.isRealProb(),True,"Example "+str(idx)+" did not return real probability") #print "Res",idx,":",res[-1] #print "Sum",idx,":",round(sum(res[-1]),5) self.assertEqual(round(sum(res[-1]),5),1,"Probabilities of Example "+str(idx)+" did not sum 1:"+str(res[-1])) sum0 = sum([x[0] for x in res]) sum1 = sum([x[1] for x in res]) self.assertEqual(len(self.LdataTest),round(sum0+sum1,5)) self.assert_(sum0-int(sum0) > 0) self.assert_(sum1-int(sum1) > 0)
def TopVarImportanceTest(data, expectNone = False): resA = [] resB = [] CvANN = AZorngCvANN.CvANNLearner(data, stopUPs=33) for ex in data: resA.append(CvANN.getTopImportantVars(ex,1)) scratchdir = miscUtilities.createScratchDir(desc="TopVarImportanceTest") modelPath = os.path.join(scratchdir,"CvANNModel") CvANN.write(modelPath) LoadedCvANN = AZorngCvANN.CvANNread(modelPath) miscUtilities.removeDir(scratchdir) for ex in data: resB.append(LoadedCvANN.getTopImportantVars(ex,1)) if expectNone: return resA == resB == [None]*len(data) else: return resA == resB and None not in resA and resA.count(resA[0]) != len(resA)
def applySettings(self): """Create the learner with selected settings and send to the output channel. """ self.error(0) self.warning(0) # Transform settings to those appropriate for AZorngCvANN self.setLearnerVars() # Output a learner regardless of whether input data is provided self.learner = AZorngCvANN.CvANNLearner(\ nHidden = self.nHidden,\ stopCrit = self.stopCrit, \ maxIter = self.maxIter, \ eps = self.eps, optAlg = self.optAlg, \ priors = self.priors, \ scaleClass = self.scaleClass,\ scaleData = self.scaleData) self.learner.name = str(self.name) self.send("Learner", self.learner) self.createClassifier()