ap.add_argument("--opt_diracq", action="store_true") ap.add_argument("--opt_sigmoidvar", action="store_true") ap.add_argument("--opt_pvarbound", type=float, default=0.) ap.add_argument("--opt_interpretability", action="store_true") ap.add_argument("--opt_zeroprior", action="store_true") ap.add_argument("--opt_disentangle", action="store_true") # Paths ap.add_argument( "--model_path", default="/misc/vlgscratch4/ChoGroup/jason/lanmt-ebm/checkpoints/ebm.pt") ap.add_argument( "--result_path", default="/misc/vlgscratch4/ChoGroup/jason/lanmt-ebm/checkpoints/ebm.result" ) OPTS.parse(ap) OPTS.model_path = OPTS.model_path.replace(DATA_ROOT, OPTS.root) OPTS.result_path = OPTS.result_path.replace(DATA_ROOT, OPTS.root) if envswitch.who() == "shu": OPTS.model_path = os.path.join(DATA_ROOT, os.path.basename(OPTS.model_path)) OPTS.result_path = os.path.join(DATA_ROOT, os.path.basename(OPTS.result_path)) OPTS.fixbug1 = True OPTS.fixbug2 = True if envswitch.who() == "jason_prince": OPTS.model_path = os.path.join(HOME_DIR, "checkpoints", "ebm", OPTS.dtok, os.path.basename(OPTS.model_path))
ap.add_argument("--opt_fp16", action="store_true") ap.add_argument("--opt_nokl", action="store_true") ap.add_argument("--opt_klbudget", type=float, default=1.0) ap.add_argument("--opt_beginanneal", type=int, default=-1) ap.add_argument("--opt_fastanneal", action="store_true") ap.add_argument("--opt_diracq", action="store_true") ap.add_argument("--opt_sigmoidvar", action="store_true") ap.add_argument("--opt_pvarbound", type=float, default=0.) ap.add_argument("--opt_interpretability", action="store_true") ap.add_argument("--opt_zeroprior", action="store_true") ap.add_argument("--opt_disentangle", action="store_true") # Paths ap.add_argument("--model_path", default="{}/lanmt.pt".format(DATA_ROOT)) ap.add_argument("--result_path", default="{}/lanmt.result".format(DATA_ROOT)) OPTS.parse(ap) OPTS.model_path = OPTS.model_path.replace(DATA_ROOT, OPTS.root) OPTS.result_path = OPTS.result_path.replace(DATA_ROOT, OPTS.root) # Determine the number of GPUs to use horovod_installed = importlib.util.find_spec("horovod") is not None if torch.cuda.is_available() and horovod_installed: import horovod.torch as hvd hvd.init() torch.cuda.set_device(hvd.local_rank()) part_index = hvd.rank() part_num = hvd.size() gpu_num = hvd.size() else: part_index = 0