def main(args): ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime()) #### get data set_seed(1111) data_obj = _Data() train_data, valid_data, vocab_obj = data_obj.f_load_data_cloth(args) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") logger_obj = _LOGGER() logger_obj.f_add_writer(args) ### add count parameters if not args.test: now_time = datetime.datetime.now() time_name = str(now_time.day) + "_" + str(now_time.month) + "_" + str( now_time.hour) + "_" + str(now_time.minute) model_file = os.path.join(args.model_path, args.data_name + "_" + args.model_name) if not os.path.isdir(model_file): print("create a directory ", model_file) os.mkdir(model_file) args.model_file = model_file + "/model_best_" + time_name + ".pt" print("model file ", args.model_file) print("vocab_size: ", len(vocab_obj.m_w2i)) ### get model # user_num = 10 network = _NETWORK(vocab_obj, args, device=device) total_param_num = 0 for name, param in network.named_parameters(): if param.requires_grad: param_num = param.numel() total_param_num += param_num print(name, "\t", param_num) print("total parameters num", total_param_num) if args.train: optimizer = _OPTIM(network.parameters(), args) trainer = _TRAINER(vocab_obj, args, device) trainer.f_train(train_data, valid_data, network, optimizer, logger_obj) if args.test: print("=" * 10, "eval", "=" * 10) # eval_obj = EVAL(vocab_obj, args, device) # eval_obj.f_init_eval(network, args.model_file, reload_model=True) # eval_obj.f_eval(valid_data) print("=" * 10, "inference", "=" * 10) infer = _INFER(vocab_obj, args, device) infer.f_init_infer(network, args.model_file, reload_model=True) infer.f_inference(valid_data) if args.eval: print("=" * 10, "eval", "=" * 10) eval_obj = _EVAL(vocab_obj, args, device) eval_obj.f_init_eval(network, args.model_file, reload_model=True) eval_obj.f_eval(valid_data) logger_obj.f_close_writer()
def main(args): ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime()) seed = 1111 set_seed(seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('device', device) local_rank = None if args.parallel: local_rank = args.local_rank torch.distributed.init_process_group(backend="nccl") device = torch.device('cuda:{}'.format(local_rank)) data_obj = _DATA() if "beer" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_movie(args) if "wine" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_movie(args) if "yelp" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_yelp(args) if "movie" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_movie(args) if "lthing" in args.data_name: train_data, valid_data, vocab_obj = data_obj.f_load_data_movie(args) if args.train: now_time = datetime.now() time_name = str(now_time.month)+"_"+str(now_time.day)+"_"+str(now_time.hour)+"_"+str(now_time.minute) model_file = os.path.join(args.model_path, args.data_name+"_"+args.model_name) if not os.path.isdir(model_file): print("create a directory", model_file) os.mkdir(model_file) args.model_file = model_file+"/model_best_"+time_name+".pt" print("model_file", model_file) print("vocab_size", vocab_obj.vocab_size) print("user num", vocab_obj.user_num) print("item num", vocab_obj.item_num) network = _ATTR_NETWORK(vocab_obj, args, device) total_param_num = 0 for name, param in network.named_parameters(): if param.requires_grad: param_num = param.numel() total_param_num += param_num print(name, "\t", param_num) print("total parameters num", total_param_num) if args.train: logger_obj = _LOGGER() logger_obj.f_add_writer(args) if args.parallel: network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True) optimizer = _OPTIM(network.parameters(), args) trainer = _TRAINER(args, device) trainer.f_train(train_data, valid_data, network, optimizer, logger_obj, local_rank) logger_obj.f_close_writer() if args.eval: print("="*10, "eval", "="*10) eval_obj = _EVAL(vocab_obj, args, device) network = network.to(device) eval_obj.f_init_eval(network, args.model_file, reload_model=True) # eval_obj.f_eval_new_user(train_data, valid_data) eval_obj.f_eval(train_data, valid_data)
def main(args): ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime()) seed = 1111 set_seed(seed) #### get data data_obj = _Data() train_data, valid_data, vocab_obj = data_obj.f_load_data(args) # train_data, valid_data = data() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("device", device) if args.train: now_time = datetime.datetime.now() time_name = str(now_time.month)+"_"+str(now_time.day)+"_"+str(now_time.hour)+"_"+str(now_time.minute) model_file = os.path.join(args.model_path, args.model_name+"/model_best_"+time_name+"_"+args.data_name+".pt") args.model_file = model_file print("vocab_size", vocab_obj.vocab_size) print("user num", vocab_obj.user_size) ### get model network = REVIEWDI(vocab_obj, args, device=device) ### add count parameters total_param_num = 0 for name, param in network.named_parameters(): if param.requires_grad: param_num = param.numel() total_param_num += param_num print(name, "\t", param_num) print("total parameters num", total_param_num) if args.train: logger_obj = Logger() logger_obj.f_add_writer(args) optimizer = Optimizer(network.parameters(), args) trainer = TRAINER(vocab_obj, args, device) trainer.f_train(train_data, valid_data, network, optimizer, logger_obj) logger_obj.f_close_writer() if args.test or args.eval: print("="*10, "test", "="*10) infer_obj = INFER(vocab_obj, args, device) infer_obj.f_init_infer(network, args.model_file, reload_model=True) infer_obj.f_inference(valid_data) if args.eval: print("="*10, "eval", "="*10) eval_obj = _EVAL(vocab_obj, args, device) eval_obj.f_init_eval(network, args.model_file, reload_model=True) eval_obj.f_eval(valid_data)