def train(self): run_train(model=self.enc_dec, optimizer=self.optimizer, train_queries=self.train_queries, val_queries=self.val_queries, test_queries=self.test_queries, logger=self.logger, max_burn_in=self.args.max_burn_in, batch_size=self.args.batch_size, log_every=100, val_every=self.args.val_every, tol=self.args.tol, max_iter=self.args.max_iter, inter_weight=self.args.inter_weight, path_weight=self.args.path_weight, model_file=self.model_file, edge_conv=self.args.edge_conv, geo_train=self.args.geo_train, val_queries_geo=self.val_queries_geo, test_queries_geo=self.test_queries_geo) torch.save(self.enc_dec.state_dict(), self.model_file)
enc_dec.load_state_dict(dict) if args.opt == "sgd": optimizer = optim.SGD([p for p in enc_dec.parameters() if p.requires_grad], lr=args.lr, momentum=0) elif args.opt == "adam": optimizer = optim.Adam([p for p in enc_dec.parameters() if p.requires_grad], lr=args.lr) log_file = args.log_dir + "/{data:s}-{beta:d}-{depth:d}-{embed_dim:d}-{lr:f}-{decoder:s}-{inter_decoder:s}.log".format( data=args.data_dir.strip().split("/")[-1], beta=args.beta, depth=args.depth, embed_dim=args.embed_dim, lr=args.lr, decoder=args.decoder, inter_decoder=args.inter_decoder) model_file = args.model_dir + "/{data:s}-{beta:d}-{depth:d}-{embed_dim:d}-{lr:f}-{decoder:s}-{inter_decoder:s}".format( data=args.data_dir.strip().split("/")[-1], beta=args.beta, depth=args.depth, embed_dim=args.embed_dim, lr=args.lr, decoder=args.decoder, inter_decoder=args.inter_decoder) logger = setup_logging(log_file) run_train(enc_dec, optimizer, train_queries, val_queries, test_queries, logger, max_burn_in=args.max_burn_in, val_every=args.val_every, model_file=model_file, pretrain=args.pretrain) if not args.pretrain: torch.save(enc_dec.state_dict(), model_file)
lr=args.lr, decoder=args.decoder, inter_decoder=args.inter_decoder) model_file = args.model_dir + "/{data:s}-{depth:d}-{embed_dim:d}-{lr:f}-{decoder:s}-{inter_decoder:s}.log".format( data=args.data_dir.strip().split("/")[-1], depth=args.depth, embed_dim=args.embed_dim, lr=args.lr, decoder=args.decoder, inter_decoder=args.inter_decoder) logger = setup_logging(log_file) enc_dec.load_state_dict(torch.load(model_file)) print "Loading edge data.." train_queries = load_queries_by_formula(args.data_dir + "/train_edges.pkl") val_queries = load_test_queries_by_formula(args.data_dir + "/val_edges-split.pkl") test_queries = load_test_queries_by_formula(args.data_dir + "/test_edges-split.pkl") print "Loading query data.." for i in range(2,4): train_queries.update(load_queries_by_formula(args.data_dir + "/train_queries_{:d}.pkl".format(i))) i_val_queries = load_test_queries_by_formula(args.data_dir + "/val_queries_{:d}-newclean.pkl".format(i)) val_queries["one_neg"].update(i_val_queries["one_neg"]) val_queries["full_neg"].update(i_val_queries["full_neg"]) i_test_queries = load_test_queries_by_formula(args.data_dir + "/test_queries_{:d}-newclean.pkl".format(i)) test_queries["one_neg"].update(i_test_queries["one_neg"]) test_queries["full_neg"].update(i_test_queries["full_neg"]) optimizer = optim.Adam(filter(lambda p : p.requires_grad, enc_dec.parameters()), lr=args.lr) run_train(enc_dec, optimizer, train_queries, val_queries, test_queries, logger, max_iter=1)
data=args.data_dir.strip().split("/")[-1], depth=args.depth, embed_dim=args.embed_dim, lr=args.lr, decoder=args.decoder, inter_decoder=args.inter_decoder) print log_file model_file = args.model_dir + "/{data:s}-{depth:d}-{embed_dim:d}-{lr:f}-{decoder:s}-{inter_decoder:s}.log".format( data=args.data_dir.strip().split("/")[-1], depth=args.depth, embed_dim=args.embed_dim, lr=args.lr, decoder=args.decoder, inter_decoder=args.inter_decoder) logger = setup_logging(log_file) run_train(enc_dec, optimizer, train_paths, val_paths, test_paths, train_inters, val_inters, test_inters, logger, max_iter=args.max_iter, max_path_len=args.max_path_len, max_inter_size=args.max_inter_size, val_every=args.val_every) torch.save(enc_dec.state_dict(), model_file)