#modelBuilder = ModelBuilder(batch_size, question_dim, obj_dim, dictSize) modelBuilder = ModelBuilder(batch_size, macro_batch_size, question_dim, obj_dim, dictSize, args.questionAwareContext, args.f_layers, args.f_inner_layers, args.g_layers, args.h_layers, args.appendPosVec) (inputContext, inputContextLengths, inputContextSentenceLengths, inputQuestion, inputQuestionLengths, objects, question) = modelBuilder.buildWordProcessorLSTMs() if modelToUse == 1: print("Using model I") rnOutput = modelBuilder.buildRN_I(objects, question) elif modelToUse == 2: print("Using model II") rnOutput = modelBuilder.buildRN_II(objects, question) elif modelToUse == 3: print("Using model III") rnOutput = modelBuilder.buildRN_III(objects, question) elif modelToUse == 4: print("Using model IV") rnOutput = modelBuilder.buildRN_IV(objects, question) elif modelToUse == 5: print("Using model V") rnOutput = modelBuilder.buildRN_V(objects, question) elif modelToUse == 6: print("Using model VI") rnOutput = modelBuilder.buildRN_VI(objects, question) elif modelToUse == 7: print("Using model VII") rnOutput = modelBuilder.buildRN_VII_jl(objects, question)
#contextSentenceLengths = [sentenceLengths + [1]*(maxContextLen - len(sentenceLengths)) for sentenceLengths in contextSentenceLengths]#apply padding for tensorflow tensor - padding with 1 instead of 0 so sequence-end-selectors dont fail with bufferunderrun questionInput = [question + [0]*(maxQuestionLen - len(question)) for context, question, answer in samples] answerInput = [answer for context, question, answer in samples] yield contextInput, contextLengths, contextSentenceLengths, questionInput, questionLengths, answerInput #build the whole model and run it modelBuilder = ModelBuilder(batch_size, macro_batch_size, question_dim, obj_dim, dictSize, args.questionAwareContext, args.f_layers, args.f_inner_layers, args.g_layers, args.h_layers, args.appendPosVec, args.batchNorm, args.layerNorm, args.weightPenalty) (inputContext, inputContextLengths, inputContextSentenceLengths, inputQuestion, inputQuestionLengths, objects, question) = modelBuilder.buildWordProcessorLSTMs() if modelToUse == 1: print("Using model I") (rnOutput, isTraining) = modelBuilder.buildRN_I(objects, question) elif modelToUse == 2: print("Using model II") (rnOutput, isTraining) = modelBuilder.buildRN_II(objects, question) elif modelToUse == 3: print("Using model III") (rnOutput, isTraining) = modelBuilder.buildRN_III(objects, question) elif modelToUse == 4: print("Using model IV") (rnOutput, isTraining) = modelBuilder.buildRN_IV(objects, question) elif modelToUse == 5: print("Using model V") (rnOutput, isTraining) = modelBuilder.buildRN_V(objects, question) elif modelToUse == 6: print("Using model VI") (rnOutput, isTraining) = modelBuilder.buildRN_VI(objects, question) elif modelToUse == 7: print("Using model VII") (rnOutput, isTraining) = modelBuilder.buildRN_VII_jl(objects, question)