for index, eval_set in enumerate(eval_iterators): log_entry.Clear() acc = evaluate(FLAGS, model, eval_set, log_entry, logger, trainer, vocabulary, show_sample=True, eval_index=index) print(log_entry) logger.LogEntry(log_entry) else: train_loop(FLAGS, model, trainer, training_data_iter, eval_iterators, logger) if __name__ == '__main__': get_flags() # Parse command line flags. FLAGS(sys.argv) flag_defaults(FLAGS) if FLAGS.model_type != "RLSPINN": raise Exception("Reinforce is only implemented for RLSPINN.") run(only_forward=FLAGS.expanded_eval_only_mode)
self.debug = FLAGS.debug model.apply(set_debug) # Do an evaluation-only run. eval_str = eval_format(model) logger.Log("Eval-Format: {}".format(eval_str)) eval_extra_str = eval_extra_format(model) logger.Log("Eval-Extra-Format: {}".format(eval_extra_str)) index = 0 eval_set = eval_iterators[index] acc = evaluate(FLAGS, model, data_manager, eval_set, index, logger, step, vocabulary) if __name__ == '__main__': get_flags() # Parse command line flags. FLAGS(sys.argv) flag_defaults(FLAGS, load_log_flags=True) if len(FLAGS.eval_data_path.split(":")) > 1: raise Exception( "The evaluate.py script only runs against one eval set. " "Please refrain from the ':' token in --eval_data_path") run()