def validate(loader, model, criterion, netType, debug, flip): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() end = time.time() # predictions predictions = torch.Tensor(loader.dataset.__len__(), 68, 2) model.eval() gt_win, pred_win = None, None bar = Bar('Validating', max=len(loader)) all_dists = torch.zeros((68, loader.dataset.__len__())) for i, (inputs, target, meta) in enumerate(loader): data_time.update(time.time() - end) input_var = torch.autograd.Variable(inputs.cuda()) target_var = torch.autograd.Variable(target.cuda(async=True)) output = model(input_var) score_map = output[-1].data.cpu() if flip: flip_input_var = torch.autograd.Variable( torch.from_numpy(shufflelr( inputs.clone().numpy())).float().cuda()) flip_output_var = model(flip_input_var) flip_output = flip_back(flip_output_var[-1].data.cpu()) score_map += flip_output # intermediate supervision loss = 0 for o in output: loss += criterion(o, target_var) acc, batch_dists = accuracy(score_map, target.cpu(), idx, thr=0.07) all_dists[:, i * args.val_batch:(i + 1) * args.val_batch] = batch_dists preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] if debug: gt_batch_img = batch_with_heatmap(inputs, target) pred_batch_img = batch_with_heatmap(inputs, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() losses.update(loss.data[0], inputs.size(0)) acces.update(acc[0], inputs.size(0)) batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() mean_error = torch.mean(all_dists) auc = calc_metrics(all_dists) # this is auc of predicted maps and target. print("=> Mean Error: {:.2f}, [email protected]: {} based on maps".format( mean_error * 100., auc)) return losses.avg, acces.avg, predictions, auc
def validate(loader, model, criterion, netType, debug, flip): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() end = time.time() # predictions predictions = torch.Tensor(loader.dataset.__len__(), 68, 2) model.eval() gt_win, pred_win = None, None bar = Bar('Validating', max=len(loader)) all_dists = torch.zeros((68, loader.dataset.__len__())) for i, (inputs, target, meta) in enumerate(loader): data_time.update(time.time() - end) input_var = torch.autograd.Variable(inputs.cuda()) target_var = torch.autograd.Variable(target.cuda(async=True)) output = model(input_var) score_map = output[-1].data.cpu() if flip: flip_input_var = torch.autograd.Variable( torch.from_numpy(shufflelr(inputs.clone().numpy())).float().cuda()) flip_output_var = model(flip_input_var) flip_output = flip_back(flip_output_var[-1].data.cpu()) score_map += flip_output # intermediate supervision loss = 0 for o in output: loss += criterion(o, target_var) acc, batch_dists = accuracy(score_map, target.cpu(), idx, thr=0.07) all_dists[:, i * args.val_batch:(i + 1) * args.val_batch] = batch_dists preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] if debug: gt_batch_img = batch_with_heatmap(inputs, target) pred_batch_img = batch_with_heatmap(inputs, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() losses.update(loss.data[0], inputs.size(0)) acces.update(acc[0], inputs.size(0)) batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() mean_error = torch.mean(all_dists) auc = calc_metrics(all_dists) # this is auc of predicted maps and target. print("=> Mean Error: {:.2f}, [email protected]: {} based on maps".format(mean_error*100., auc)) return losses.avg, acces.avg, predictions, auc
def train(loader, model, criterion, optimizer, netType, debug=False, flip=False): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() model.train() end = time.time() # rnn = torch.nn.LSTM(10, 20, 2) # hidden = torch.autograd.Variable(torch.zeros((args.train_batch))) gt_win, pred_win = None, None bar = Bar('Training', max=len(loader)) for i, (inputs, target) in enumerate(loader): data_time.update(time.time() - end) input_var = torch.autograd.Variable(inputs.cuda()) target_var = torch.autograd.Variable(target.cuda(async=True)) if debug: gt_batch_img = batch_with_heatmap(inputs, target) # pred_batch_img = batch_with_heatmap(inputs, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) # plt.subplot(122) # pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) # pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() output = model(input_var) score_map = output[-1].data.cpu() if flip: flip_input_var = torch.autograd.Variable( torch.from_numpy(shufflelr( inputs.clone().numpy())).float().cuda()) flip_output_var = model(flip_input_var) flip_output = flip_back(flip_output_var[-1].data.cpu()) score_map += flip_output # intermediate supervision loss = 0 for o in output: loss += criterion(o, target_var) acc, _ = accuracy(score_map, target.cpu(), idx, thr=0.07) losses.update(loss.data[0], inputs.size(0)) acces.update(acc[0], inputs.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg
def train(loader, model, criterion, optimizer, netType, debug=False, flip=False): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() model.train() end = time.time() # rnn = torch.nn.LSTM(10, 20, 2) # hidden = torch.autograd.Variable(torch.zeros((args.train_batch))) gt_win, pred_win = None, None bar = Bar('Training', max=len(loader)) for i, (inputs, target) in enumerate(loader): data_time.update(time.time() - end) input_var = torch.autograd.Variable(inputs.cuda()) target_var = torch.autograd.Variable(target.cuda(async=True)) if debug: gt_batch_img = batch_with_heatmap(inputs, target) # pred_batch_img = batch_with_heatmap(inputs, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) # plt.subplot(122) # pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) # pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() output = model(input_var) score_map = output[-1].data.cpu() if flip: flip_input_var = torch.autograd.Variable( torch.from_numpy(shufflelr(inputs.clone().numpy())).float().cuda()) flip_output_var = model(flip_input_var) flip_output = flip_back(flip_output_var[-1].data.cpu()) score_map += flip_output # intermediate supervision loss = 0 for o in output: loss += criterion(o, target_var) acc, _ = accuracy(score_map, target.cpu(), idx, thr=0.07) losses.update(loss.data[0], inputs.size(0)) acces.update(acc[0], inputs.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg