def evaluate(split='valid'): y_true = eval(split + '_data') # Ttensor = eval(split + '_tensor') model.eval() y_score, _, _ = model(train_tensor) y_score.detach_() y_score = y_score.squeeze(0) y_score[train_data.row, train_data.col] = 0 _, rec_items = torch.topk(y_score, args.N, dim=1) # y_pred = torch.gather(Ttensor, 1, rec_items).cpu().numpy() run = sort2query(rec_items[:, 0:args.N]) test = csr2test(y_true.tocsr()) evaluator = Evaluator({'recall', 'map_cut'}) evaluator.evaluate(run, test) result = evaluator.show([ 'recall_5', 'recall_10', 'recall_15', 'recall_20', 'map_cut_5', 'map_cut_10', 'map_cut_15', 'map_cut_20' ]) print(result)
for epoch in range(1, args.maxiter + 1): train(epoch) # test() model.eval() score, _, _ = model(Rtensor) score = score.squeeze(0) score[R.row, R.col] = 0 _, idx = torch.sort(score, 1, True) run = sort2query(idx[:, 0:args.N]) test = csr2test(T.tocsr()) evaluator = Evaluator({'recall'}) evaluator.evaluate(run, test) result = evaluator.show( ['recall_5', 'recall_10', 'recall_15', 'recall_20']) print(result) line = 'cVAE\t{}\t{}\t{}\t{}\t0'.format(args.data, args.alpha, args.beta, len(args.layer)) for _, value in result.items(): line += '\t{:.5f}'.format(value) line += '\r\n' file = open('result', 'a') file.write(line) file.close() if args.save: name = 'cvae' if args.rating else 'fvae' path = directory + '/model/' + name for l in args.layer: path += '_' + str(l) model.cpu()