def eval_rank(model, test_loader, lossfn, parallel, top_k): model.eval() HR, NDCG = [], [] for batch_id, batch in enumerate(test_loader): u_idxs = batch[0].long().cuda() i_idxs = batch[1].long().cuda() predictions = model(u_idxs, i_idxs) if parallel and torch.cuda.device_count() >1: i_idxs = i_idxs.view(torch.cuda.device_count(), -1) print(predictions) for device_idx, prediction in enumerate(predictions): device = torch.device('cuda:{}'.format(device_idx)) i_idx = i_idxs[device_idx, :].to(device) _, indices = torch.topk(prediction, top_k) recommends = torch.take(i_idx, indices).cpu().numpy().tolist() gt_item = i_idx[0].item() HR.append(hit(gt_item, recommends)) NDCG.append(ndcg(gt_item, recommends)) else: _, indices = torch.topk(predictions, top_k) recommends = torch.take(i_idxs, indices).cpu().numpy().tolist() gt_item = i_idxs[0].item() HR.append(hit(gt_item, recommends)) NDCG.append(ndcg(gt_item, recommends)) if batch_id % 240 == 0 : print("-----------The timeStamp of evaluating batch {:03d}/{}".format(batch_id, len(test_loader)) + " is: " + time.strftime("%H: %M: %S", time.gmtime(time.time()))) return np.mean(HR), np.mean(NDCG)
def report_pos_neg(x): # print(x) # pos_itemIdx, recommends = x # recommends = list(recommends) # return hit(pos_itemIdx, recommends), ndcg(pos_itemIdx, recommends) pos_itemIdx, itemIdxs, indices = x pos_itemIdx = int(pos_itemIdx) recommends = torch.take(itemIdxs, indices) recommends = list(recommends) return hit(pos_itemIdx, recommends), ndcg(pos_itemIdx, recommends)