def validate(val_loader, model, criterion): losses = AverageMeter() score_micro = np.zeros(3) test_p1, test_p3, test_p5 = 0, 0, 0 test_ndcg1, test_ndcg3, test_ndcg5 = 0, 0, 0 model.eval() with torch.no_grad(): for batch_idx, (input, target) in enumerate(val_loader): input = input.cuda() target = target.cuda() output = model(input) loss = criterion(output, target.float()) target = target.data.cpu().float() output = output.data.cpu() _p1, _p3, _p5 = precision_k(output.topk(k=5)[1].numpy(), target.numpy(), k=[1, 3, 5]) test_p1 += _p1 test_p3 += _p3 test_p5 += _p5 _ndcg1, _ndcg3, _ndcg5 = ndcg_k(output.topk(k=5)[1].numpy(), target.numpy(), k=[1, 3, 5]) test_ndcg1 += _ndcg1 test_ndcg3 += _ndcg3 test_ndcg5 += _ndcg5 output[output > 0.5] = 1 output[output <= 0.5] = 0 score_micro += [ precision_score(target, output, average='micro'), recall_score(target, output, average='micro'), f1_score(target, output, average='micro') ] losses.update(loss.item(), input.size(0)) # plot progress np.set_printoptions(formatter={'float': '{: 0.4}'.format}) print('the result of micro: \n', score_micro / len(val_loader)) test_p1 /= len(val_loader) test_p3 /= len(val_loader) test_p5 /= len(val_loader) test_ndcg1 /= len(val_loader) test_ndcg3 /= len(val_loader) test_ndcg5 /= len(val_loader) print("precision@1 : %.4f , precision@3 : %.4f , precision@5 : %.4f " % (test_p1, test_p3, test_p5)) print("ndcg@1 : %.4f , ndcg@3 : %.4f , ndcg@5 : %.4f " % (test_ndcg1, test_ndcg3, test_ndcg5)) return score_micro / len(val_loader)
def validate(val_loader, model): data_time = AverageMeter() microF1 = AverageMeter() test_p1, test_p3, test_p5 = 0, 0, 0 test_ndcg1, test_ndcg3, test_ndcg5 = 0, 0, 0 model.eval() with torch.no_grad(): end = time.time() for batch_idx, (input, target) in enumerate(val_loader): data_time.update(time.time() - end) input = input.cuda() target = target.cuda() output = model(input) target = target.data.cpu().float() output = output.data.cpu() _p1, _p3, _p5 = precision_k(output.topk(k=5)[1].numpy(), target.numpy(), k=[1, 3, 5]) test_p1 += _p1 test_p3 += _p3 test_p5 += _p5 _ndcg1, _ndcg3, _ndcg5 = ndcg_k(output.topk(k=5)[1].numpy(), target.numpy(), k=[1, 3, 5]) test_ndcg1 += _ndcg1 test_ndcg3 += _ndcg3 test_ndcg5 += _ndcg5 output[output > 0.5] = 1 output[output <= 0.5] = 0 micro, macro = calc_f1(target, output) microF1.update(micro.item(), input.size(0)) np.set_printoptions(formatter={'float': '{: 0.4}'.format}) print('the result of micro: \n', microF1.avg) test_p1 /= len(val_loader) test_p3 /= len(val_loader) test_p5 /= len(val_loader) test_ndcg1 /= len(val_loader) test_ndcg3 /= len(val_loader) test_ndcg5 /= len(val_loader) print("precision@1 : %.4f , precision@3 : %.4f , precision@5 : %.4f " % (test_p1, test_p3, test_p5)) print("ndcg@1 : %.4f , ndcg@3 : %.4f , ndcg@5 : %.4f " % (test_ndcg1, test_ndcg3, test_ndcg5)) return (microF1.avg)
def get_full_sort_score(self, epoch, answers, pred_list): recall, ndcg = [], [] for k in [5, 10, 15, 20]: recall.append(recall_at_k(answers, pred_list, k)) ndcg.append(ndcg_k(answers, pred_list, k)) post_fix = { "Epoch": epoch, "HIT@5": '{:.4f}'.format(recall[0]), "NDCG@5": '{:.4f}'.format(ndcg[0]), "HIT@10": '{:.4f}'.format(recall[1]), "NDCG@10": '{:.4f}'.format(ndcg[1]), "HIT@20": '{:.4f}'.format(recall[3]), "NDCG@20": '{:.4f}'.format(ndcg[3]) } print(post_fix) with open(self.args.log_file, 'a') as f: f.write(str(post_fix) + '\n') return [recall[0], ndcg[0], recall[1], ndcg[1], recall[3], ndcg[3]], str(post_fix)
def fine_tuning(train_loader, model, criterion, optimizer): F1 = np.zeros(54) score_micro = np.zeros(3) score_macro = np.zeros(3) data_time = AverageMeter() losses = AverageMeter() microF1 = AverageMeter() macroF1 = AverageMeter() model.train() test_p1, test_p3, test_p5 = 0, 0, 0 test_ndcg1, test_ndcg3, test_ndcg5 = 0, 0, 0 end = time.time() # bar = Bar('Training', max=len(train_loader)) for batch_idx, (input, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input = input.cuda() target = target.cuda() output = model(input) loss = criterion(output, target.float()) target = target.data.cpu().float() output = output.data.cpu() micro, macro = calc_f1(target, output) losses.update(loss.item(), input.size(0)) microF1.update(micro.item(), input.size(0)) macroF1.update(macro.item(), input.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time _p1, _p3, _p5 = precision_k(output.topk(k=5)[1].numpy(), target.numpy(), k=[1, 3, 5]) test_p1 += _p1 test_p3 += _p3 test_p5 += _p5 _ndcg1, _ndcg3, _ndcg5 = ndcg_k(output.topk(k=5)[1].numpy(), target.numpy(), k=[1, 3, 5]) test_ndcg1 += _ndcg1 test_ndcg3 += _ndcg3 test_ndcg5 += _ndcg5 output[output > 0.5] = 1 output[output <= 0.5] = 0 for l in range(54): F1[l] += f1_score(target[:, l], output[:, l], average='binary') # precision[l] += precision_score(target[:, l], output[:, l], average='binary') # recall[l] += recall_score(target[:, l], output[:, l], average='binary') # micro, macro = calc_f1(target, output) # acc += accuracy_score(target, output) # print("acc",acc) score_micro += [ precision_score(target, output, average='micro'), recall_score(target, output, average='micro'), f1_score(target, output, average='micro') ] score_macro += [ precision_score(target, output, average='macro'), recall_score(target, output, average='macro'), f1_score(target, output, average='macro') ] # acc = calc_acc(target, output) np.set_printoptions(formatter={'float': '{: 0.4}'.format}) print('the result of F1: \n', F1 / len(train_loader)) print('the result of micro: \n', score_micro / len(train_loader)) print('the result of macro: \n', score_macro / len(train_loader)) test_p1 /= len(train_loader) test_p3 /= len(train_loader) test_p5 /= len(train_loader) test_ndcg1 /= len(train_loader) test_ndcg3 /= len(train_loader) test_ndcg5 /= len(train_loader) print("precision@1 : %.4f , precision@3 : %.4f , precision@5 : %.4f " % (test_p1, test_p3, test_p5)) print("ndcg@1 : %.4f , ndcg@3 : %.4f , ndcg@5 : %.4f " % (test_ndcg1, test_ndcg3, test_ndcg5))
def validate(val_loader, model): F1 = np.zeros(54) score_micro = np.zeros(3) score_macro = np.zeros(3) test_p1, test_p3, test_p5 = 0, 0, 0 test_ndcg1, test_ndcg3, test_ndcg5 = 0, 0, 0 model.eval() with torch.no_grad(): for batch_idx, (input, target) in enumerate(val_loader): # measure data loading time input = input.cuda() target = target.cuda() output = model(input) target = target.data.cpu().float() output = output.data.cpu() _p1, _p3, _p5 = precision_k(output.topk(k=5)[1].numpy(), target.numpy(), k=[1, 3, 5]) test_p1 += _p1 test_p3 += _p3 test_p5 += _p5 _ndcg1, _ndcg3, _ndcg5 = ndcg_k(output.topk(k=5)[1].numpy(), target.numpy(), k=[1, 3, 5]) test_ndcg1 += _ndcg1 test_ndcg3 += _ndcg3 test_ndcg5 += _ndcg5 output[output > 0.5] = 1 output[output <= 0.5] = 0 for l in range(54): F1[l] += f1_score(target[:, l], output[:, l], average='binary') # precision[l] += precision_score(target[:, l], output[:, l], average='binary') # recall[l] += recall_score(target[:, l], output[:, l], average='binary') # micro, macro = calc_f1(target, output) # acc += accuracy_score(target, output) # print("acc",acc) score_micro += [ precision_score(target, output, average='micro'), recall_score(target, output, average='micro'), f1_score(target, output, average='micro') ] score_macro += [ precision_score(target, output, average='macro'), recall_score(target, output, average='macro'), f1_score(target, output, average='macro') ] # acc = calc_acc(target, output) np.set_printoptions(formatter={'float': '{: 0.4}'.format}) print('the result of F1: \n', F1 / len(val_loader)) print('the result of micro: \n', score_micro / len(val_loader)) print('the result of macro: \n', score_macro / len(val_loader)) test_p1 /= len(val_loader) test_p3 /= len(val_loader) test_p5 /= len(val_loader) test_ndcg1 /= len(val_loader) test_ndcg3 /= len(val_loader) test_ndcg5 /= len(val_loader) print("precision@1 : %.4f , precision@3 : %.4f , precision@5 : %.4f " % (test_p1, test_p3, test_p5)) print("ndcg@1 : %.4f , ndcg@3 : %.4f , ndcg@5 : %.4f " % (test_ndcg1, test_ndcg3, test_ndcg5))