def runAllModels(
                inputTest, 
                questionTypeArray, 
                modelSpecs,
                resultsFolder,
                dataset,
                dataFolder):
    allOutputs = []
    for modelSpec in modelSpecs:
        if modelSpec['isClassEnsemble']:
            print 'Running test data on ensemble model %s...' \
                    % modelSpec['name']
            models = loadEnsemble(modelSpec['id'].split(','), resultsFolder)
            classDataFolders = getClassDataFolders(dataset, dataFolder)
            if modelSpec['runPrior']:
                outputTest = runEnsemblePrior(
                                    inputTest, 
                                    models,
                                    dataFolder,
                                    classDataFolders,
                                    questionTypeArray)
            else:
                outputTest = runEnsemble(
                                    inputTest, 
                                    models,
                                    dataFolder,
                                    classDataFolders,
                                    questionTypeArray)
        elif modelSpec['isAverageEnsemble']:
            modelOutputs = []
            for modelId in modelSpec['id'].split(','):
                model = it.loadModel(modelId, resultsFolder)
                modelOutputs.append(nn.test(model, inputTest))
            outputTest = np.zeros(modelOutputs[0].shape)
            for output in modelOutputs:
                shape0 = min(outputTest.shape[0], output.shape[0])
                shape1 = min(outputTest.shape[1], output.shape[1])
                outputTest[:shape0, :shape1] += output[:shape0, :shape1] / \
                    float(len(modelOutputs))
        else:
            print 'Running test data on model %s...' \
                    % modelSpec['name']
            model = it.loadModel(modelSpec['id'], resultsFolder)
            outputTest = nn.test(model, inputTest)
        allOutputs.append(outputTest)
    return allOutputs
 dataset = 'cocoqa'
 for i, flag in enumerate(sys.argv):
     if flag == '-m' or flag == '-model':
         modelId = sys.argv[i + 1]
     elif flag == '-d' or flag == '-data':
         dataFolder = sys.argv[i + 1]
     elif flag == '-td' or flag == '-tdata':
         testDataFolder = sys.argv[i + 1]
     elif flag == '-reindex':
         needReindex = True
     elif flag == '-r' or flag == '-results':
         resultsFolder = sys.argv[i + 1]
     elif flag == '-dataset':
         dataset = sys.argv[i + 1]
 
 model = it.loadModel(modelId, resultsFolder)
 data = it.loadDataset(dataFolder)
 testdata = it.loadDataset(testDataFolder)
 if needReindex:
     testQuestions, testAnswers = reindexDataset(
         testdata['testData'][0],
         testdata['testData'][1],
         testdata['questionIdict'],
         data['questionDict'],
         testdata['ansIdict'],
         data['ansDict'])
 else:
     testQuestions = testdata['testData'][0]
     testAnswers = testdata['testData'][1]
 outputTest = nn.test(model, testQuestions)
 rate, correct, total = nn.calcRate(model, outputTest, testAnswers)
            modelIds.append(sys.argv[i + 1])
        elif flag == '-vm' or flag == '-vmodel':
            validModelIds.append(sys.argv[i + 1])
        elif flag == '-r' or flag == '-results':
            resultsFolder = sys.argv[i + 1]
        elif flag == '-d' or flag == '-data':
            dataFolder = sys.argv[i + 1]
        elif flag == '-o' or flag == '-output':
            outputFolder = sys.argv[i + 1]
    data = it.loadDataset(dataFolder)
    
    models = []
    validModels = []
    for modelId in modelIds:
        print 'Loading model %s' % modelId
        models.append(it.loadModel(modelId, resultsFolder))
    for modelId in validModelIds:
        print 'Loading model %s' % modelId
        validModels.append(it.loadModel(modelId, resultsFolder))

    modelOutputs = []
    validModelOutputs = []
    # for modelId, model in zip(validModelIds, validModels):
    #     print 'Running model %s' % modelId
    #     modelOutput = nn.test(model, data['validData'][0])
    #     validModelOutputs.append(modelOutput)
    # 
    # mixRatios = np.arange(0, 11) * 0.1
    # bestMixRatio = validAvg(validModelOutputs, mixRatios, data['validData'][1])
    # print 'Best ratio found: %.4f' % bestMixRatio
    bestMixRatio = 0.5
    dataset = 'cocoqa'
    for i, flag in enumerate(sys.argv):
        if flag == '-m' or flag == '-model':
            modelId = sys.argv[i + 1]
        elif flag == '-d' or flag == '-data':
            dataFolder = sys.argv[i + 1]
        elif flag == '-td' or flag == '-tdata':
            testDataFolder = sys.argv[i + 1]
        elif flag == '-reindex':
            needReindex = True
        elif flag == '-r' or flag == '-results':
            resultsFolder = sys.argv[i + 1]
        elif flag == '-dataset':
            dataset = sys.argv[i + 1]

    model = it.loadModel(modelId, resultsFolder)
    data = it.loadDataset(dataFolder)
    testdata = it.loadDataset(testDataFolder)
    if needReindex:
        testQuestions, testAnswers = reindexDataset(testdata['testData'][0],
                                                    testdata['testData'][1],
                                                    testdata['questionIdict'],
                                                    data['questionDict'],
                                                    testdata['ansIdict'],
                                                    data['ansDict'])
    else:
        testQuestions = testdata['testData'][0]
        testAnswers = testdata['testData'][1]
    outputTest = nn.test(model, testQuestions)
    rate, correct, total = nn.calcRate(model, outputTest, testAnswers)
    print 'rate: %.4f' % rate
            visDataFolder = sys.argv[i + 1]
        elif flag == "-md" or flag == "-mdata":
            mainDataFolder = sys.argv[i + 1]
        elif flag == "-r" or flag == "-results":
            resultsFolder = sys.argv[i + 1]
        elif flag == "-qtype":
            questionType = sys.argv[i + 1]
        elif flag == "-o" or flag == "-outweights":
            outputWeightsFolder = sys.argv[i + 1]

    data = it.loadDataset(visDataFolder)
    testInput = data["testData"][0]
    testTarget = data["testData"][1]
    deltas = [0.000001, 0.000005, 0.00001, 0.00005, 0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0]

    preVisModel = it.loadModel(preVisModelId, resultsFolder)

    print "Num answer", len(data["ansIdict"])
    bestDelta = validDelta(
        data["trainData"],
        data["validData"],
        preVisModel,
        data["questionDict"],
        data["questionIdict"],
        len(data["ansIdict"]),
        deltas,
        questionType,
    )

    trainDataAll = combineTrainValid(data["trainData"], data["validData"])
    visModel = it.loadModel(visModelId, resultsFolder)
示例#6
0
    deltas = \
        [0.000001, 
        0.000005, 
        0.00001, 
        0.00005, 
        0.0001, 
        0.0005, 
        0.001, 
        0.005, 
        0.01, 
        0.05, 
        0.1, 
        0.5, 
        1.0]

    preVisModel = it.loadModel(preVisModelId, resultsFolder)

    print 'Num answer', len(data['ansIdict'])
    bestDelta = validDelta(
                            data['trainData'],
                            data['validData'],
                            preVisModel,
                            data['questionDict'],
                            data['questionIdict'],
                            len(data['ansIdict']),
                            deltas,
                            questionType)

    trainDataAll = combineTrainValid(data['trainData'], data['validData'])
    visModel = it.loadModel(visModelId, resultsFolder)
    visTestOutput = runVisPrior(trainDataAll,