def test(**kwargs): if 'dataset' not in kwargs: opt = getattr(config, 'AmazonDigitalMusic_Config')() else: opt = getattr(config, kwargs['dataset'] + '_Config')() opt.parse(kwargs) assert(len(opt.pth_path) > 0) random.seed(opt.seed) np.random.seed(opt.seed) torch.manual_seed(opt.seed) if opt.use_gpu: torch.cuda.manual_seed_all(opt.seed) if len(opt.gpu_ids) == 0 and opt.use_gpu: torch.cuda.set_device(opt.gpu_id) model = Model(opt, getattr(models, opt.model)) if opt.use_gpu: model.cuda() if len(opt.gpu_ids) > 0: model = nn.DataParallel(model, device_ids=opt.gpu_ids) if model.net.num_fea != opt.num_fea: raise ValueError(f"the num_fea of {opt.model} is error, please specific --num_fea={model.net.num_fea}") model.load(opt.pth_path) print(f"load model: {opt.pth_path}") test_data = ReviewData(opt.data_root, mode="Test") test_data_loader = DataLoader(test_data, batch_size=opt.batch_size, shuffle=False, collate_fn=collate_fn) print(f"{now()}: test in the test datset") predict_loss, test_mse, test_mae = predict(model, test_data_loader, opt)
def test(**kwargs): if 'dataset' not in kwargs: opt = getattr(config, 'Gourmet_Food_data_Config')() else: opt = getattr(config, kwargs['dataset'] + '_Config')() opt.parse(kwargs) logging.basicConfig( filename=f"logs/{opt}.log", filemode="w", format="%(asctime)s %(name)s:%(levelname)s:%(message)s", datefmt="%d-%m-%Y %H:%M:%S", level=logging.DEBUG) random.seed(opt.seed) np.random.seed(opt.seed) torch.manual_seed(opt.seed) if opt.use_gpu: torch.cuda.manual_seed_all(opt.seed) if len(opt.gpu_ids) == 0 and opt.use_gpu: torch.cuda.set_device(opt.gpu_id) model = Model(opt, getattr(models, opt.model)).cuda() print("load...") model.load( "./checkpoints/DPHP_Gourmet_Food_data_cfg-Gourmet_Food_data-poolatt-lr0.001-wd0.0005-drop0.1-id32-hidden100.pth" ) test_data = ReviewData(opt.data_root, mode="Test") test_data_loader = DataLoader(test_data, batch_size=opt.batch_size, shuffle=False, collate_fn=collate_fn) auc, corr, predict_loss = predict(model, test_data_loader, opt, logging)
def generate_conditional_sentence(**kwargs): if 'dataset' not in kwargs: opt = getattr(config, 'AmazonDigitalMusic_Config')() else: opt = getattr(config, kwargs['dataset'] + '_Config')() opt.parse(kwargs) assert(len(opt.pth_path) > 0) random.seed(opt.seed) np.random.seed(opt.seed) torch.manual_seed(opt.seed) if opt.use_gpu: torch.cuda.manual_seed_all(opt.seed) if len(opt.gpu_ids) == 0 and opt.use_gpu: torch.cuda.set_device(opt.gpu_id) model = Model(opt, getattr(models, opt.model)) if opt.use_gpu: model.cuda() if len(opt.gpu_ids) > 0: model = nn.DataParallel(model, device_ids=opt.gpu_ids) if model.net.num_fea != opt.num_fea: raise ValueError(f"the num_fea of {opt.model} is error, please specific --num_fea={model.net.num_fea}") model.load(opt.pth_path) print(f"load model: {opt.pth_path}") test_data = ReviewData(opt.data_root, mode="Test") test_data_loader = DataLoader(test_data, batch_size=1, shuffle=False, collate_fn=collate_fn) print(f"{now()}: generating conditional sentence...") model.eval() with torch.no_grad(): user_review_dict = np.load("./dataset/AmazonDigitalMusic/train/plainUserReviews.npy", allow_pickle=True).item() item_review_dict = np.load("./dataset/AmazonDigitalMusic/train/plainItemReviews.npy", allow_pickle=True).item() cnt = 10 for idx, (test_input, scores) in enumerate(test_data_loader): if idx == cnt: test_input = unpack_input(opt, test_input) output = model(test_input, mode="Generate") uid = test_input[2].item() user_reviews = user_review_dict[uid] iid = test_input[3].item() item_reviews = item_review_dict[iid] imp_user_review_id = output[0].cpu().numpy().squeeze() imp_user_review_id = np.argmax(imp_user_review_id) print(user_reviews[imp_user_review_id]) imp_item_review_id = output[1].cpu().numpy().squeeze() imp_item_review_id = np.argmax(imp_item_review_id) print(item_reviews[imp_item_review_id]) break