def main(args): ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime()) seed = 1111 set_seed(seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('device', device, torch.cuda.current_device()) # exit() data_obj = _DATA() if "yelp" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_yelp_restaurant( args) if "movie" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_movie(args) if "beer" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_beer(args) if "wine" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_wine(args) if "lthing" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_movie(args) if args.train: now_time = datetime.now() time_name = str(now_time.month) + "_" + str(now_time.day) + "_" + str( now_time.hour) + "_" + str(now_time.minute) model_file = os.path.join(args.model_path, args.data_name + "_" + args.model_name) if not os.path.isdir(model_file): print("create a directory", model_file) os.mkdir(model_file) args.model_file = model_file + "/model_best_" + time_name + ".pt" print("model_file", model_file) print("vocab_size", vocab_obj.vocab_size) print("user num", vocab_obj.user_num) print("item num", vocab_obj.item_num) pretrain_model_file = args.pretrain_model_file pretrain_network = None if pretrain_model_file != "": pretrain_network = BPR(vocab_obj, args, device) pretrain_model_abs_file = os.path.join(args.model_path, pretrain_model_file) print("pretrain_model_abs_file", pretrain_model_abs_file) checkpoint = torch.load(pretrain_model_abs_file) pretrain_network.load_state_dict(checkpoint['model']) network = _ATTR_NETWORK(vocab_obj, args, device) total_param_num = 0 for name, param in network.named_parameters(): if param.requires_grad: param_num = param.numel() total_param_num += param_num print(name, "\t", param_num) print("total parameters num", total_param_num) if args.train: logger_obj = _LOGGER() logger_obj.f_add_writer(args) optimizer = _OPTIM(network.parameters(), args) trainer = _TRAINER(vocab_obj, args, device) trainer.f_train(pretrain_network, train_data, valid_data, network, optimizer, logger_obj) logger_obj.f_close_writer() if args.eval: print("=" * 10, "eval", "=" * 10) eval_obj = _EVAL(vocab_obj, args, device) network = network.to(device) eval_obj.f_init_eval(network, args.model_file, reload_model=True) # eval_obj.f_eval_new_user(train_data, valid_data) eval_obj.f_eval(train_data, valid_data) if args.test: print("=" * 10, "eval", "=" * 10) infer_obj = _INFER(vocab_obj, args, device) network = network.to(device) infer_obj.f_init_infer(network, args.model_file, reload_model=True) infer_obj.f_infer(train_data, valid_data)
def main(args): ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime()) seed = 1111 set_seed(seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('device', device) local_rank = None if args.parallel: local_rank = args.local_rank torch.distributed.init_process_group(backend="nccl") device = torch.device('cuda:{}'.format(local_rank)) data_obj = _DATA() if "beer" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_movie(args) if "wine" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_movie(args) if "yelp" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_yelp(args) if "movie" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_movie(args) if "lthing" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_movie(args) if args.train: now_time = datetime.now() time_name = str(now_time.month)+"_"+str(now_time.day)+"_"+str(now_time.hour)+"_"+str(now_time.minute) model_file = os.path.join(args.model_path, args.data_name+"_"+args.model_name) if not os.path.isdir(model_file): print("create a directory", model_file) os.mkdir(model_file) args.model_file = model_file+"/model_best_"+time_name+".pt" print("model_file", model_file) print("vocab_size", vocab_obj.vocab_size) print("user num", vocab_obj.user_num) print("item num", vocab_obj.item_num) network = _ATTR_NETWORK(vocab_obj, args, device) total_param_num = 0 for name, param in network.named_parameters(): if param.requires_grad: param_num = param.numel() total_param_num += param_num print(name, "\t", param_num) print("total parameters num", total_param_num) if args.train: logger_obj = _LOGGER() logger_obj.f_add_writer(args) if args.parallel: network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True) optimizer = _OPTIM(network.parameters(), args) trainer = _TRAINER(args, device) trainer.f_train(train_data, valid_data, network, optimizer, logger_obj, local_rank) logger_obj.f_close_writer() if args.eval: print("="*10, "eval", "="*10) eval_obj = _EVAL(vocab_obj, args, device) network = network.to(device) eval_obj.f_init_eval(network, args.model_file, reload_model=True) # eval_obj.f_eval_new_user(train_data, valid_data) eval_obj.f_eval(train_data, valid_data)
def main(args): ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime()) seed = 1111 set_seed(seed) #### get data data_obj = _DATA() train_data, valid_data, vocab_obj = data_obj.f_load_data_yelp(args) # train_data, valid_data = data() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("device", device) if args.train: now_time = datetime.datetime.now() time_name = str(now_time.month)+"_"+str(now_time.day)+"_"+str(now_time.hour)+"_"+str(now_time.minute) model_file = os.path.join(args.model_path, args.data_name+"_"+args.model_name) if not os.path.isdir(model_file): print("create a directory ", model_file) os.mkdir(model_file) args.model_file = model_file+"/model_best_"+time_name+".pt" print("model_file", model_file) print("vocab_size", vocab_obj.vocab_size) print("user num", vocab_obj.user_size) ### get model network = _NETWORK(vocab_obj, args, device=device) ### add count parameters total_param_num = 0 for name, param in network.named_parameters(): if param.requires_grad: param_num = param.numel() total_param_num += param_num print(name, "\t", param_num) print("total parameters num", total_param_num) if args.train: logger_obj = _LOGGER() logger_obj.f_add_writer(args) # if torch.cuda.device_count() > 1: # print("... let us use", torch.cuda.device_count(), "GPUs!") # network = nn.DataParallel(network) # print("=="*20) # print("device", network.cuda()) # en_parameters = list(network.module.m_embedding.parameters()) + list(network.module.m_user_item_encoder.parameters()) + list(network.module.m_output2vocab.parameters()) # en_optimizer = _OPTIM(en_parameters, args) # de_parameters = network.module.m_generator.parameters() # de_optimizer = _OPTIM(de_parameters, args) en_parameters = list(network.m_embedding.parameters()) + list(network.m_user_item_encoder.parameters()) + list(network.m_output2vocab.parameters()) en_optimizer = _OPTIM(en_parameters, args) de_parameters = network.m_generator.parameters() de_optimizer = _OPTIM(de_parameters, args) trainer = _TRAINER(vocab_obj, args, device) trainer.f_train(train_data, valid_data, network, en_optimizer, de_optimizer, logger_obj) logger_obj.f_close_writer() if args.test: print("="*10, "test", "="*10) infer_obj = _INFER(vocab_obj, args, device) infer_obj.f_init_infer(network, args.model_file, reload_model=True) infer_obj.f_inference(train_data, valid_data) if args.eval: print("="*10, "eval", "="*10) eval_obj = _EVAL(vocab_obj, args, device) eval_obj.f_init_eval(network, args.model_file, reload_model=True) eval_obj.f_eval(train_data, valid_data)
def main(args): ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime()) #### get data set_seed(1111) data_obj = _Data() train_data, valid_data, vocab_obj = data_obj.f_load_data_cloth(args) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") logger_obj = _LOGGER() logger_obj.f_add_writer(args) ### add count parameters if not args.test: now_time = datetime.datetime.now() time_name = str(now_time.day) + "_" + str(now_time.month) + "_" + str( now_time.hour) + "_" + str(now_time.minute) model_file = os.path.join(args.model_path, args.data_name + "_" + args.model_name) if not os.path.isdir(model_file): print("create a directory ", model_file) os.mkdir(model_file) args.model_file = model_file + "/model_best_" + time_name + ".pt" print("model file ", args.model_file) print("vocab_size: ", len(vocab_obj.m_w2i)) ### get model # user_num = 10 network = _NETWORK(vocab_obj, args, device=device) total_param_num = 0 for name, param in network.named_parameters(): if param.requires_grad: param_num = param.numel() total_param_num += param_num print(name, "\t", param_num) print("total parameters num", total_param_num) if args.train: optimizer = _OPTIM(network.parameters(), args) trainer = _TRAINER(vocab_obj, args, device) trainer.f_train(train_data, valid_data, network, optimizer, logger_obj) if args.test: print("=" * 10, "eval", "=" * 10) # eval_obj = EVAL(vocab_obj, args, device) # eval_obj.f_init_eval(network, args.model_file, reload_model=True) # eval_obj.f_eval(valid_data) print("=" * 10, "inference", "=" * 10) infer = _INFER(vocab_obj, args, device) infer.f_init_infer(network, args.model_file, reload_model=True) infer.f_inference(valid_data) if args.eval: print("=" * 10, "eval", "=" * 10) eval_obj = _EVAL(vocab_obj, args, device) eval_obj.f_init_eval(network, args.model_file, reload_model=True) eval_obj.f_eval(valid_data) logger_obj.f_close_writer()
def main(args): ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime()) seed = 1111 set_seed(seed) #### get data device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("device", device) if args.parallel: local_rank = args.local_rank torch.distributed.init_process_group(backend="nccl") device = torch.device('cuda:{}'.format(local_rank)) data_obj = _DATA() train_data, valid_data, vocab_obj = data_obj.f_load_data_yelp(args) # train_data, valid_data = data() if args.train: now_time = datetime.datetime.now() time_name = str(now_time.month) + "_" + str(now_time.day) + "_" + str( now_time.hour) + "_" + str(now_time.minute) model_file = os.path.join(args.model_path, args.data_name + "_" + args.model_name) if not os.path.isdir(model_file): print("create a directory ", model_file) os.mkdir(model_file) args.model_file = model_file + "/model_best_" + time_name + ".pt" print("model_file", model_file) print("vocab_size", vocab_obj.vocab_size) print("user num", vocab_obj.user_size) ### get model network = _GEN_NETWORK(vocab_obj, args) ### add count parameters total_param_num = 0 for name, param in network.named_parameters(): if param.requires_grad: param_num = param.numel() total_param_num += param_num print(name, "\t", param_num) print("total parameters num", total_param_num) if args.train: logger_obj = _LOGGER() logger_obj.f_add_writer(args) E_network = _ENC_NETWORK(vocab_obj, args) E_network = E_network.to(device) # E_network = torch.nn.parallel.DistributedDataParallel(E_network, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True) # torch.distributed.barrier() # map_location = {'cuda:%d'%0:'cuda:%d'%local_rank} model_path = args.model_path E_model_file = args.E_model_file E_model_abs_file = os.path.join(model_path, E_model_file) print("E_model_abs_file", E_model_abs_file) check_point = torch.load(E_model_abs_file) # check_point = torch.load(E_model_abs_file, map_location=map_location) E_network.load_state_dict(check_point['model']) # torch.distributed.barrier() network = network.to(device) if args.parallel: network = torch.nn.parallel.DistributedDataParallel( network, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True) de_parameters = network.parameters() de_optimizer = _OPTIM(de_parameters, args) trainer = _TRAINER(vocab_obj, args, device) trainer.f_train_M(train_data, valid_data, E_network, network, de_optimizer, logger_obj, local_rank) logger_obj.f_close_writer() if args.test: print("=" * 10, "test", "=" * 10) infer_obj = _INFER(vocab_obj, args, device) infer_obj.f_init_infer(network, args.model_file, reload_model=True) infer_obj.f_inference(train_data, valid_data) if args.eval: print("=" * 10, "eval", "=" * 10) eval_obj = _EVAL(vocab_obj, args, device) eval_obj.f_init_eval(network, args.model_file, reload_model=True) eval_obj.f_eval(train_data, valid_data)
# print("=="*20) # print("device", network.cuda()) # en_parameters = list(network.module.m_embedding.parameters()) + list(network.module.m_user_item_encoder.parameters()) + list(network.module.m_output2vocab.parameters()) # en_optimizer = _OPTIM(en_parameters, args) # de_parameters = network.module.m_generator.parameters() # de_optimizer = _OPTIM(de_parameters, args) pretrain_encoder = _USER_ITEM_ENCODER(vocab_obj, args, device) pretrain_parameters = list(pretrain_encoder.parameters()) <<<<<<< HEAD ======= >>>>>>> eea2344b44411e4245f2857f576a41d25d16ec97 pretrain_optimizer = _OPTIM(pretrain_parameters, args) en_parameters = list(network.m_embedding.parameters()) + list(network.m_user_item_encoder.parameters()) + list(network.m_output2vocab.parameters()) en_optimizer = _OPTIM(en_parameters, args) # print("=="*20) # print("user item encoder parameter") # for name, p in network.m_user_item_encoder.named_parameters(): # print(name) de_parameters = network.m_generator.parameters() de_optimizer = _OPTIM(de_parameters, args) # print("=="*20) # print("generator parameter") # for name, p in network.m_generator.named_parameters(): # print(name)