def validate(val_loader, model, criterion, num_classes, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(val_loader)) with torch.no_grad(): for i, (input, target, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input = input.to(device, non_blocking=True) target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output = model(input) score_map = output[-1].cpu() if type( output) == list else output.cpu() if flip: flip_input = torch.from_numpy(fliplr( input.clone().numpy())).float().to(device) flip_output = model(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight) output = output[-1] else: # single output loss = criterion(output, target, target_weight) acc = accuracy(score_map, target.cpu(), idx) # generate predictions preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] if debug: gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg, predictions
def validate(val_loader, model, criterion, num_classes, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(val_loader)) with torch.no_grad(): for i, (input, target, meta, img_path) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) indexes = [] input = input.to(device, non_blocking=True) #print (input.shape) #image = input.cpu().permute(0,2,3,1).numpy() #image = np.squeeze(image) path = str(img_path) path = path[3:len(path) - 2] image = cv2.imread(path) # cv2.imshow("image", image) # cv2.waitKey(10) # time.sleep(1) target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output #print (input.shape) output = model(input) score_map = output[-1].cpu() if type( output) == list else output.cpu() if flip: flip_input = torch.from_numpy(fliplr( input.clone().numpy())).float().to(device) flip_output = model(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight) output = output[-1] else: # single output loss = criterion(output, target, target_weight) #print (acc) # generate predictions preds, vals = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) # for z in range(target.shape[1]): # for j in range(target.shape[2]): # for k in range(target.shape[3]): # if target[0,z,j,k]==1.0: # indexes.append(z) # coords = np.squeeze(preds) # for m in range(0,len(coords)): # val = vals[0][m].numpy() # if val>0.6: #threshold for confidence score # x,y = coords[m][0].cpu().numpy(), coords[m][1].cpu().numpy() # cv2.circle(image, (x,y), 1, (0,0,255), -1) # #indexes.append(m) acc = accuracy(score_map, target.cpu(), indexes) #print ((target.cpu()).shape[1]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] #print ("scored", score_map.shape) if debug: gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() cv2.imwrite( '/home/shantam/Documents/Programs/pytorch-pose/example/predictions/pred' + str(i) + '.png', image) #time.sleep(5) # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg, predictions
def train(train_loader, model, criterion, optimizer, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # switch to train mode model.train() end = time.time() gt_win, pred_win = None, None bar = Bar('Train', max=len(train_loader)) for i, (input, target, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input, target = input.to(device), target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output = model(input) if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight) output = output[-1] else: # single output loss = criterion(output, target, target_weight) acc = accuracy(output, target, idx) if debug: # visualize groundtruth and predictions gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, output) if not gt_win or not pred_win: ax1 = plt.subplot(121) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img) ax2 = plt.subplot(122) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg
def validate(val_loader, model, criterion, debug=False, flip=True, test_batch=6, njoints=68): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), njoints, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(val_loader)) interocular_dists = torch.zeros((njoints, val_loader.dataset.__len__())) with torch.no_grad(): for i, (input, target, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input = input.to(device, non_blocking=True) target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output = model(input) score_map = output[-1].cpu() if type( output) == list else output.cpu() if flip: flip_input = torch.from_numpy(fliplr( input.clone().numpy())).float().to(device) flip_output = model(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight, len(idx)) output = output[-1] else: # single output loss = criterion(output, target, target_weight, len(idx)) acc, batch_interocular_dists = accuracy(score_map, target.cpu(), idx) interocular_dists[:, i * test_batch:(i + 1) * test_batch] = batch_interocular_dists # generate predictions preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] if debug: gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f} | Acc: {acc: .8f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() idx_array = np.array(idx) - 1 interocular_dists_pickup = interocular_dists[idx_array, :] mean_error = torch.mean( interocular_dists_pickup[interocular_dists_pickup != -1]) auc = calc_metrics(interocular_dists, idx) # this is auc of predicted maps and target. #print("=> Mean Error: {:.8f}, [email protected]: {:.8f} based on maps".format(mean_error, auc)) return losses.avg, acces.avg, predictions, auc, mean_error
def train(train_loader, model, criterion, optimizer, debug=False, flip=True, train_batch=6, epoch=0, njoints=68): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # switch to train mode model.train() end = time.time() gt_win, pred_win = None, None bar = Bar('Train', max=len(train_loader)) interocular_dists = torch.zeros((njoints, train_loader.dataset.__len__())) for i, (input, target, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input, target = input.to(device), target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output = model(input) if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight, len(idx)) output = output[-1] else: # single output loss = criterion(output, target, target_weight, len(idx)) acc, batch_interocular_dists = accuracy(output, target, idx) interocular_dists[:, i * train_batch:(i + 1) * train_batch] = batch_interocular_dists if debug: # visualize groundtruth and predictions gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, output) if not gt_win or not pred_win: ax1 = plt.subplot(121) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img) ax2 = plt.subplot(122) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f} | Acc: {acc: .8f}'.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() idx_array = np.array(idx) - 1 interocular_dists_pickup = interocular_dists[idx_array, :] mean_error = torch.mean( interocular_dists_pickup[interocular_dists_pickup != -1]) auc = calc_metrics(interocular_dists, idx) # this is auc of predicted maps and target. #print("=> Mean Error: {:.8f}, [email protected]: {:.8f} based on maps".format(mean_error, auc)) return losses.avg, acces.avg
def validate(self): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() predictions = torch.Tensor(self.val_loader.dataset.__len__(), self.num_classes, 2) self.netG.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(self.val_loader)) with torch.no_grad(): for i, (input, target, meta, mpii) in enumerate(self.val_loader): if mpii == False: continue data_time.update(time.time() - end) input = input.to(self.device, non_blocking=True) target = target.to(self.device, non_blocking=True) target_weight = meta['target_weight'].to(self.device, non_blocking=True) output = self.netG(input) score_map = output[-1].cpu() if type( output) == list else output.cpu() if self.flip: flip_input = torch.from_numpy flip_output = self.netG(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output if type(output) == list: loss = 0 for o in output: loss += self.criterion(o, target, target_weight) output = output[-1] else: loss = self.criterion(output, target, target_weight) acc = accuracy(score_map, target.cpu(), self.idx) preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] if self.debug: gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() losses.update(loss.item, input.size(0)) acces.update(acc[0], input.size(0)) batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(self.val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg, predictions
def train(train_loader, model, criterion, optimizer, debug=False, flip=True, train_iters=0): print("Train iters: {}".format(train_iters)) batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # switch to train mode model.train() debug_count = 0 end = time.time() gt_win, pred_win = None, None bar_len = [train_iters if train_iters != 0 else len(train_loader)][0] train_iters = [train_iters if train_iters != 0 else len(train_loader)][0] bar = Bar('Train', max=bar_len) curr_iter = 0 while curr_iter < train_iters: for i, (input, target, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input, target = input.to(device), target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output = model(input) if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight) output = output[-1] else: # single output loss = criterion(output, target, target_weight) acc = accuracy(output, target, idx) if debug: # visualize groundtruth and predictions gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, output) fig = plt.figure() ax1 = fig.add_subplot(121) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img) ax2 = fig.add_subplot(122) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img) plt.pause(.05) plt.draw() fig.savefig('debug/debug_{}.png'.format(str(debug_count)), dpi=500) debug_count += 1 # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=[len(train_loader) if train_iters == 0 else train_iters ][0], data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() curr_iter += 1 if curr_iter >= train_iters - 1: break bar.finish() return losses.avg, acces.avg
def validate(val_loader, model, criterion, flip=True, test_batch=6, njoints=18): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), njoints, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(val_loader)) with torch.no_grad(): for i, (input, target, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input = input.to(device, non_blocking=True) if global_animal == 'horse': target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) elif global_animal == 'tiger': target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) target = target[:, np.array([ 1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 18, 13, 14, 9, 10, 11, 12 ]) - 1, :, :] target_weight = target_weight[:, np.array([ 1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 18, 13, 14, 9, 10, 11, 12 ]) - 1, :] else: raise Exception('please add new animal category') # compute output output = model(input) score_map = output[-1].cpu() if type( output) == list else output.cpu() if flip: flip_input = torch.from_numpy(fliplr( input.clone().numpy())).float().to(device) flip_output = model(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight, len(idx)) output = output[-1] else: # single output loss = criterion(output, target, target_weight, len(idx)) acc, _ = accuracy(score_map, target.cpu(), idx) # generate predictions preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) #for n in range(score_map.size(0)): # predictions[meta['index'][n], :, :] = preds[n, :, :] # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f} | Acc: {acc: .8f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg