def testAvgAll(modelOutputs, mixRatio, data, outputFolder): # finalOutput = mixRatio * modelOutputs[0] + \ # (1 - mixRatio) * modelOutputs[1] finalOutput = np.zeros(modelOutputs[0].shape) for output in modelOutputs: shape0 = min(finalOutput.shape[0], output.shape[0]) shape1 = min(finalOutput.shape[1], output.shape[1]) finalOutput[:shape0, :shape1] += output[:shape0, :shape1] / float(len(modelOutputs)) testAnswerFile = it.getAnswerFilename(outputFolder, resultsFolder) testTruthFile = it.getTruthFilename(outputFolder, resultsFolder) resultsRank, \ resultsCategory, \ resultsWups = it.runAllMetrics( data['testData'][0], finalOutput, data['testData'][1], data['ansIdict'], data['questionTypeArray'], testAnswerFile, testTruthFile) it.writeMetricsToFile( outputFolder, resultsRank, resultsCategory, resultsWups, resultsFolder)
def testEnsemble( ensembleId, models, dataFolder, classDataFolders, resultsFolder): """ Test a class specific model in its original dataset. """ data = it.loadDataset(dataFolder) inputTest = data['testData'][0] targetTest = data['testData'][1] ensembleOutputTest = runEnsemble( inputTest, models, dataFolder, classDataFolders, data['questionTypeArray']) ensembleAnswerFile = getAnswerFilename(ensembleId, resultsFolder) ensembleTruthFile = getTruthFilename(ensembleId, resultsFolder) rate, correct, total = nn.calcRate( model, ensembleOutputTest, data['testData'][1]) print 'rate: %.4f' % rate resultsRank, \ resultsCategory, \ resultsWups = it.runAllMetrics( inputTest, ensembleOutputTest, targetTest, data['ansIdict'], data['questionTypeArray'], ensembleAnswerFile, ensembleTruthFile) it.writeMetricsToFile( ensembleId, rate, resultsRank, resultsCategory, resultsWups, resultsFolder) return ensembleOutputTest