def _get_arguments(argv): parser = argparse.ArgumentParser() config.add_arguments(parser) models.add_arguments(parser) solver.add_arguments(parser) loss_metrics.add_arguments(parser) input_pipeline.add_arguments(parser) custom_evaluator.add_arguments(parser) args = parser.parse_args(argv[1:]) config.check_args(args, parser) config.fill_default_args(args) return args
def main(): parser = argparse.ArgumentParser() config.add_arguments(parser) args = parser.parse_args() config.fill_default(args) tsp_spec = data_kits.load(args.tsp_file, args.sln_file) # data_kits.show_path(tsp_spec, tsp_spec.solutions) # return args.ants = config.maybe_fill(args.ants, tsp_spec.dimension) print(args) num_ants = args.ants num_iters = args.iters num_rep = args.repeat g = graph.TSP(tsp_spec, args=args) best_path = None best_cost = sys.maxsize best_cost_iters = [] # Animation player = data_kits.DynamicShow(tsp_spec) player.launch(args) # Main loop for _ in tqdm.tqdm(range(num_rep), ascii=True): ant_col = colony.AntColony(g, num_ants, num_iters, args) ant_col.begin(player) if ant_col.best_path_cost < best_cost: best_path = ant_col.best_path best_cost = ant_col.best_path_cost best_cost_iters.append(ant_col.iter_costs) best_path = data_kits.adj_path(best_path) print("Best path:", best_path) print("Best cost:", best_cost) # plt.subplot(121) # data_kits.show_path(tsp_spec, [best_path]) # plt.subplot(122) # data_kits.show_iters(best_cost_iters) # plt.ioff() plt.show()
hparams = config.create_hparams(FLAGS) # Source vocab src_vocab_file = "%s.%s" % (hparams.vocab_prefix, hparams.src) tgt_vocab_file = "%s.%s" % (hparams.vocab_prefix, hparams.tgt) print (src_vocab_file, tgt_vocab_file) src_vocab_size, src_vocab_file = vocab_table_util.check_vocab( src_vocab_file, hparams.out_dir, sos=hparams.sos, eos=hparams.eos, unk=vocab_table_util.UNK) # Target vocab tgt_vocab_size, tgt_vocab_file = vocab_table_util.check_vocab( tgt_vocab_file, hparams.out_dir, sos=hparams.sos, eos=hparams.eos, unk=vocab_table_util.UNK) hparams.add_hparam("src_vocab_size", src_vocab_size) hparams.add_hparam("tgt_vocab_size", tgt_vocab_size) hparams.add_hparam("src_vocab_file", src_vocab_file) hparams.add_hparam("tgt_vocab_file", tgt_vocab_file) infer(hparams) pass if __name__ == "__main__": nmt_parser = argparse.ArgumentParser() config.add_arguments(nmt_parser) FLAGS, unparsed = nmt_parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
def get_args(): parser = argparse.ArgumentParser() parser = add_arguments(parser) return parser.parse_args()
decoder_reference_list, decoder_prediction_list, cls_logits, cls_orig_logits, cls_labels, vocab, sent_embs, adv_sent_embs, is_test=True, orig_alphas=orig_alphas, trans_alphas=trans_alphas, cls_logits_def=cls_logits_def, cls_origs_def=cls_origs_def) def main(args): if args.do_train: train(args) if args.do_test: test(args) if args.do_cond_test: test_adv_pos_neg(args) if __name__ == '__main__': args = config.add_arguments() main(args)