parser.add_argument("--lr", type=float, default=2e-3, help="Initial learning rate.") parser.add_argument("--lr_decay_by", type=float, default=0.75, help="The ratio to multiply the learning rate by every " "time the learning rate is decayed.") parser.add_argument("--lr_decay_every", type=float, default=1, help="Decay the learning rate every _ epoch(s).") parser.add_argument("--dev_every", type=int, default=1000, help="Run evaluation on the dev split every _ training " "batches.") parser.add_argument("--save_every", type=int, default=1000, help="Save checkpoint every _ training batches.") parser.add_argument("--embed_dropout", type=float, default=0.08, help="Word embedding dropout rate.") parser.add_argument("--mlp_dropout", type=float, default=0.07, help="SNLIClassifier multi-layer perceptron dropout " "rate.") parser.add_argument("--no-projection", action="store_false", dest="projection", help="Whether word embedding vectors are projected to " "another set of vectors (see d_proj).") parser.add_argument("--predict_transitions", action="store_true", dest="predict", help="Whether the Tracker will perform prediction.") parser.add_argument("--force_cpu", action="store_true", dest="force_cpu", help="Force use CPU-only regardless of whether a GPU is " "available.") FLAGS, unparsed = parser.parse_known_args() tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
help="Word embedding dropout rate.") parser.add_argument( "--mlp_dropout", type=float, default=0.5, help="ChemprotClassifier multi-layer perceptron dropout " "rate.") parser.add_argument("--no-projection", action="store_false", dest="projection", help="Whether word embedding vectors are projected to " "another set of vectors (see d_proj).") parser.add_argument("--predict_transitions", action="store_true", dest="predict", help="Whether the Tracker will perform prediction.") parser.add_argument( "--force_cpu", action="store_true", dest="force_cpu", help="Force use CPU-only regardless of whether a GPU is " "available.") parser.add_argument("--test_bool", action="store_true", dest="test_bool", help="For test") FLAGS, unparsed = parser.parse_known_args() tfe.run(main=main, argv=["--data_root chemprot-data --logdir tmpLog"])
with Measure('apply', times): optimizer.apply_gradients( zip(grad, cvae.variables), global_step=tf.train.get_or_create_global_step()) # Printing output print("#ALL") tot_avg, tot_sum = Measure.print_times(times) print("\nNodes: {0:.1f} ({1:.1f})".format(np.mean(node_count), np.std(node_count))) print("Depths: {0:.1f} ({1:.1f})".format(np.mean(node_depth), np.std(node_depth))) print("Arities: {0:.1f} ({1:.1f})".format(np.mean(node_arity), np.std(node_arity))) print((np.sum(node_count) / tot_sum), FLAGS.batch_size * FLAGS.benchmark_runs / tot_sum) # print("\n#CVAE") # Measure.print_times(cvae.loss_times) # # print("\n#DEC") # Measure.print_times(cvae._det_decoder.__class__.times) if __name__ == "__main__": import warnings warnings.filterwarnings( "ignore") # deprecated stuff in TF spams the console define_flags() tfe.run()