rnOutput = modelBuilder.buildRN_III(objects, question) elif modelToUse == 4: print("Using model IV") rnOutput = modelBuilder.buildRN_IV(objects, question) elif modelToUse == 5: print("Using model V") rnOutput = modelBuilder.buildRN_V(objects, question) elif modelToUse == 6: print("Using model VI") rnOutput = modelBuilder.buildRN_VI(objects, question) elif modelToUse == 7: print("Using model VII") rnOutput = modelBuilder.buildRN_VII_jl(objects, question) elif modelToUse == 8 and layerCount >= 0: print("Using model VIII with " + str(layerCount) + " layers") rnOutput = modelBuilder.buildRN_VIII_jl(objects, question, layerCount) else: print("Invalid model number specified: " + str(modelToUse)) sys.exit(0) #(answer, answerGates, answerForCorrectness) = modelBuilder.buildAnswerModel(rnOutput) (answer, answerForCorrectness) = modelBuilder.buildAnswerModel(rnOutput) (inputAnswer, loss, accum_ops, zero_ops, train_step, global_step_tensor, gradientsNorm, learningRate) = modelBuilder.buildOptimizer(answer, args.optimizer) #, answerGates) with tf.name_scope('testing'): #correct = tf.reduce_min(tf.cast(tf.equal(inputAnswer, tf.round(answer)), dtype=tf.float32), axis=1)#bad results since the max entries often don't achieve 0.5 so rounding doesnt work #correct = tf.cast(tf.equal(tf.argmax(inputAnswer, axis=1), tf.argmax(answer, axis=1)), dtype=tf.float32)#this is incorrect for multi-answer questions but gives better answers than rounding on single-answer questions -> TODO: find good solution for multi-answer questions
(rnOutput, isTraining) = modelBuilder.buildRN_III(objects, question) elif modelToUse == 4: print("Using model IV") (rnOutput, isTraining) = modelBuilder.buildRN_IV(objects, question) elif modelToUse == 5: print("Using model V") (rnOutput, isTraining) = modelBuilder.buildRN_V(objects, question) elif modelToUse == 6: print("Using model VI") (rnOutput, isTraining) = modelBuilder.buildRN_VI(objects, question) elif modelToUse == 7: print("Using model VII") (rnOutput, isTraining) = modelBuilder.buildRN_VII_jl(objects, question) elif modelToUse == 8 and layerCount >= 0: print("Using model VIII with " + str(layerCount) + " layers") (rnOutput, isTraining) = modelBuilder.buildRN_VIII_jl(objects, inputContextLengths, question, layerCount) else: print("Invalid model number specified: " + str(modelToUse)) sys.exit(0) #(answer, answerGates, answerForCorrectness) = modelBuilder.buildAnswerModel(rnOutput) (answer, answerForCorrectness) = modelBuilder.buildAnswerModel(rnOutput) #(inputAnswer, loss, optimizer_op, global_step_tensor, gradientsNorm, learningRate) = modelBuilder.buildOptimizer(answer, args.optimizer)#, answerGates) (inputAnswer, loss, accum_ops, zero_ops, train_step, global_step_tensor, gradientsNorm, learningRate) = modelBuilder.buildOptimizer(answer, args.optimizer)#, answerGates) with tf.name_scope('validation'): #correct = tf.reduce_min(tf.cast(tf.equal(inputAnswer, tf.round(answer)), dtype=tf.float32), axis=1)#bad results since the max entries often don't achieve 0.5 so rounding doesnt work #correct = tf.cast(tf.equal(tf.argmax(inputAnswer, axis=1), tf.argmax(answer, axis=1)), dtype=tf.float32)#this is incorrect for multi-answer questions but gives better answers than rounding on single-answer questions -> TODO: find good solution for multi-answer questions #idea for better implementation of "correct"-variable: take argmax of answer1, answer2, answer3 each, also round answerGates and then calculate "answer" similar as in "buildModel()" and finally check tf.equal correct = tf.cast(tf.reduce_all(tf.equal(answerForCorrectness, inputAnswer), axis=1), dtype=tf.float32)