def train(train_loader, model, criterion, optimizer, idx, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # switch to train mode model.train() end = time.time() gt_win, pred_win = None, None bar = Bar('Processing', max=len(train_loader)) for i, (inputs, target, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input_var = torch.autograd.Variable(inputs.to(device=device)) target_var = torch.autograd.Variable( target.to(device=device, non_blocking=True)) # compute output output = model(input_var) score_map = output[-1].data.cpu() loss = criterion(output[0], target_var) for j in range(1, len(output)): loss += criterion(output[j], target_var) acc = accuracy(score_map, target, idx, pck_threshold) # measure accuracy and record loss losses.update(loss.item(), inputs.size(0)) acces.update(acc[0], inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = ( '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | ' 'Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}' ).format(batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg
def validate(val_loader, model, criterion): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), 12) # switch to evaluate mode model.eval() end = time.time() bar = Bar('Processing', max=len(val_loader)) for i, (inputs, target) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input_var = torch.autograd.Variable(inputs.cuda()) input_var = input_var.unsqueeze(0) input_var = input_var.type(torch.FloatTensor) target_var = torch.autograd.Variable(target) target_var = target_var.type(torch.FloatTensor) target_var = target_var.cuda() # compute output output = model(input_var) output = output.unsqueeze(0) loss = criterion(output, target_var) acc = accuracy(output.detach(), target.cpu()) # generate predictions predictions[i] = output.detach() # measure accuracy and record loss losses.update(loss.item(), inputs.size(0)) acces.update(acc, inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.6f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg, predictions
def train(train_loader, model, criterion, optimizer): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # switch to train mode model.train() end = time.time() bar = Bar('Processing', max=len(train_loader)) for i, (inputs, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) # target = target.cuda(async=True) input_var = torch.autograd.Variable(inputs.cuda()) input_var = input_var.unsqueeze(0) input_var = input_var.type(torch.FloatTensor) target_var = torch.autograd.Variable(target) target_var = target_var.type(torch.FloatTensor) target_var = target_var.cuda() # compute output output = model(input_var) output = output.unsqueeze(0) loss = criterion(output, target_var) acc = accuracy(output.detach(), target.cpu()) # measure accuracy and record loss losses.update(loss.item(), inputs.size(0)) acces.update(acc, inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.6f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg
def validate(val_loader, model, criterion, num_classes, args, flip=False, test_batch=6): batch_time = AverageMeter() data_time = AverageMeter() acces = AverageMeter() pck_score = np.zeros(num_classes) pck_count = np.zeros(num_classes) # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2) # switch to evaluate mode model.eval() end = time.time() bar = Bar('Eval ', max=len(val_loader)) with torch.no_grad(): for i, (input, target, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input = input.to(device, non_blocking=True) target = target.to(device, non_blocking=True) output, output_refine = model(input, 1, return_domain=False) score_map = output_refine[0].cpu() if flip: flip_input = torch.from_numpy( fliplr(input.clone().cpu().numpy())).float().to(device) _, flip_output_refine = model(flip_input, 1, return_domain=False) flip_output = flip_output_refine[0].cpu() flip_output = flip_back(flip_output, 'real_animal') score_map += flip_output acc, _ = accuracy_2animal(score_map, target.cpu(), idx1, idx2) # cal per joint [email protected] for j in range(num_classes): if acc[j + 1] > -1: pck_score[j] += acc[j + 1].numpy() pck_count[j] += 1 # generate predictions preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] # measure accuracy and record loss acces.update(acc[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Acc: {acc: .8f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, acc=acces.avg) bar.next() bar.finish() for j in range(num_classes): pck_score[j] /= float(pck_count[j]) print("\nper joint [email protected]:") print('Animal: {}, total number of joints: {}'.format( args.animal, pck_count.sum())) print(list(pck_score)) parts = { 'eye': [0, 1], 'chin': [2], 'hoof': [3, 4, 5, 6], 'hip': [7], 'knee': [8, 9, 10, 11], 'shoulder': [12, 13], 'elbow': [14, 15, 16, 17] } for p in parts.keys(): part = parts[p] score = 0. count = 0. for joint in part: score += pck_score[joint] * pck_count[joint] count += pck_count[joint] print('\n Joint {}: {} '.format(p, score / count)) return _, acces.avg, predictions
def validate(val_loader, model, criterion, debug=False, flip=True, test_batch=6, njoints=68): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), njoints, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(val_loader)) interocular_dists = torch.zeros((njoints, val_loader.dataset.__len__())) with torch.no_grad(): for i, (input, target, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input = input.to(device, non_blocking=True) target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output = model(input) score_map = output[-1].cpu() if type( output) == list else output.cpu() if flip: flip_input = torch.from_numpy(fliplr( input.clone().numpy())).float().to(device) flip_output = model(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight, len(idx)) output = output[-1] else: # single output loss = criterion(output, target, target_weight, len(idx)) acc, batch_interocular_dists = accuracy(score_map, target.cpu(), idx) interocular_dists[:, i * test_batch:(i + 1) * test_batch] = batch_interocular_dists # generate predictions preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] if debug: gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f} | Acc: {acc: .8f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() idx_array = np.array(idx) - 1 interocular_dists_pickup = interocular_dists[idx_array, :] mean_error = torch.mean( interocular_dists_pickup[interocular_dists_pickup != -1]) auc = calc_metrics(interocular_dists, idx) # this is auc of predicted maps and target. #print("=> Mean Error: {:.8f}, [email protected]: {:.8f} based on maps".format(mean_error, auc)) return losses.avg, acces.avg, predictions, auc, mean_error
def train(train_loader, model, criterion, optimizer, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # switch to train mode model.train() end = time.time() gt_win, pred_win = None, None bar = Bar('Train', max=len(train_loader)) for i, (input, target, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input, target = input.to(device), target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output = model(input) if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight) output = output[-1] else: # single output loss = criterion(output, target, target_weight) acc = accuracy(output, target, idx) if debug: # visualize groundtruth and predictions gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, output) if not gt_win or not pred_win: ax1 = plt.subplot(121) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img) ax2 = plt.subplot(122) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg
def validate(val_loader, model, criterion, num_classes, debug=False, flip=True, _logger=None): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2) autoloss = models.loss.UniLoss(valid=True) # switch to evaluate mode model.eval() #model.train() gt_win, pred_win = None, None end = time.time() bar = Bar('Processing', max=len(val_loader)) for i, (inputs, target, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) target = target.cuda(async=True) input_var = torch.autograd.Variable(inputs.cuda(), volatile=True) target_var = torch.autograd.Variable(target, volatile=True) # compute output output = model(input_var) score_map = output[-1].data.cpu() if flip: flip_input_var = torch.autograd.Variable( torch.from_numpy(fliplr(inputs.clone().numpy())).float().cuda(), volatile=True ) flip_output_var = model(flip_input_var) flip_output = flip_back(flip_output_var[-1].data.cpu()) score_map += flip_output loss = 0 for o in output: loss += criterion(o, target_var) #acc = accuracy(score_map, target.cpu(), idx) _, acc, _ = autoloss(output[-1], meta) # generate predictions preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] if debug: gt_batch_img = batch_with_heatmap(inputs, target) pred_batch_img = batch_with_heatmap(inputs, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.item(), inputs.size(0)) acces.update(acc.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, loss=losses.avg*100, acc=acces.avg*100 ) _logger.info(bar.suffix) bar.finish() return losses.avg*100, acces.avg*100, predictions
def train(train_loader, model, tmodel, criterion, optimizer, kdloss_alpha, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() kdlosses = AverageMeter() unkdlosses = AverageMeter() tslosses = AverageMeter() gtlosses = AverageMeter() acces = AverageMeter() # switch to train mode model.train() end = time.time() gt_win, pred_win = None, None bar = Bar('Processing', max=len(train_loader)) for i, (inputs, target, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input_var = torch.autograd.Variable(inputs.cuda()) target_var = torch.autograd.Variable(target.cuda(async=True)) # compute output output = model(input_var) score_map = output[-1].data.cpu() # compute teacher network output toutput = tmodel(input_var) toutput = toutput[-1].detach() # lmse : student vs ground truth # gtmask will filter out the samples without ground truth gtloss = torch.tensor(0.0).cuda() kdloss = torch.tensor(0.0).cuda() kdloss_unlabeled = torch.tensor(0.0).cuda() unkdloss_alpha = 1.0 gtmask = meta['gtmask'] train_batch = score_map.shape[0] for j in range(0, len(output)): _output = output[j] for i in range(gtmask.shape[0]): if gtmask[i] < 0.1: # unlabeled data, gtmask=0.0, kdloss only # need to dividen train_batch to keep number equal kdloss_unlabeled += criterion(_output[i,:,:,:], toutput[i, :,:,:])/train_batch else: # labeled data: kdloss + gtloss gtloss += criterion(_output[i,:,:,:], target_var[i, :,:,:])/train_batch kdloss += criterion(_output[i,:,:,:], toutput[i,:,:,:])/train_batch loss_labeled = kdloss_alpha * (kdloss) + (1 - kdloss_alpha)*gtloss total_loss = loss_labeled + unkdloss_alpha * kdloss_unlabeled acc = accuracy(score_map, target, idx) if debug: # visualize groundtruth and predictions gt_batch_img = batch_with_heatmap(inputs, target) pred_batch_img = batch_with_heatmap(inputs, score_map) teacher_batch_img = batch_with_heatmap(inputs, toutput) if not gt_win or not pred_win or not pred_teacher: ax1 = plt.subplot(131) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img) ax2 = plt.subplot(132) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img) ax2 = plt.subplot(133) ax2.title.set_text('teacher') pred_teacher = plt.imshow(teacher_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) pred_teacher.set_data(teacher_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss gtlosses.update(gtloss.item(), inputs.size(0)) kdlosses.update(kdloss.item(), inputs.size(0)) unkdlosses.update(kdloss_unlabeled.item(), inputs.size(0)) losses.update(total_loss.item(), inputs.size(0)) acces.update(acc[0], inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() total_loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} ' \ '| Loss: {loss:.6f} | KdLoss:{kdloss:.6f}| unKdLoss:{unkdloss:.6f}| GtLoss:{gtloss:.6f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, kdloss=kdlosses.avg, unkdloss=unkdlosses.avg, tsloss=tslosses.avg, gtloss=gtlosses.avg, acc=acces.avg ) bar.next() bar.finish() return losses.avg, acces.avg
def validate(val_loader, model, criterion, num_classes, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(val_loader)) with torch.no_grad(): for i, (input, target, meta, img_path) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) indexes = [] input = input.to(device, non_blocking=True) #print (input.shape) #image = input.cpu().permute(0,2,3,1).numpy() #image = np.squeeze(image) path = str(img_path) path = path[3:len(path) - 2] image = cv2.imread(path) # cv2.imshow("image", image) # cv2.waitKey(10) # time.sleep(1) target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output #print (input.shape) output = model(input) score_map = output[-1].cpu() if type( output) == list else output.cpu() if flip: flip_input = torch.from_numpy(fliplr( input.clone().numpy())).float().to(device) flip_output = model(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight) output = output[-1] else: # single output loss = criterion(output, target, target_weight) #print (acc) # generate predictions preds, vals = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) # for z in range(target.shape[1]): # for j in range(target.shape[2]): # for k in range(target.shape[3]): # if target[0,z,j,k]==1.0: # indexes.append(z) # coords = np.squeeze(preds) # for m in range(0,len(coords)): # val = vals[0][m].numpy() # if val>0.6: #threshold for confidence score # x,y = coords[m][0].cpu().numpy(), coords[m][1].cpu().numpy() # cv2.circle(image, (x,y), 1, (0,0,255), -1) # #indexes.append(m) acc = accuracy(score_map, target.cpu(), indexes) #print ((target.cpu()).shape[1]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] #print ("scored", score_map.shape) if debug: gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() cv2.imwrite( '/home/shantam/Documents/Programs/pytorch-pose/example/predictions/pred' + str(i) + '.png', image) #time.sleep(5) # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg, predictions
def validate(val_loader, model, criterion, flip=True, test_batch=6, njoints=18): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), njoints, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(val_loader)) with torch.no_grad(): for i, (input, target, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input = input.to(device, non_blocking=True) if global_animal == 'horse': target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) elif global_animal == 'tiger': target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) target = target[:, np.array([ 1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 18, 13, 14, 9, 10, 11, 12 ]) - 1, :, :] target_weight = target_weight[:, np.array([ 1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 18, 13, 14, 9, 10, 11, 12 ]) - 1, :] else: raise Exception('please add new animal category') # compute output output = model(input) score_map = output[-1].cpu() if type( output) == list else output.cpu() if flip: flip_input = torch.from_numpy(fliplr( input.clone().numpy())).float().to(device) flip_output = model(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight, len(idx)) output = output[-1] else: # single output loss = criterion(output, target, target_weight, len(idx)) acc, _ = accuracy(score_map, target.cpu(), idx) # generate predictions preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) #for n in range(score_map.size(0)): # predictions[meta['index'][n], :, :] = preds[n, :, :] # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f} | Acc: {acc: .8f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg
def validate(val_loader, model, criterion, criterion_seg, debug=False, flip=True, test_batch=6, njoints=68): batch_time = AverageMeter() data_time = AverageMeter() losses_kpt = AverageMeter() losses_seg = AverageMeter() acces = AverageMeter() inter_meter = AverageMeter() union_meter = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), njoints, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(val_loader)) interocular_dists = torch.zeros((njoints, val_loader.dataset.__len__())) with torch.no_grad(): for i, (input, target, target_seg, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input, target, target_seg = input.to(device), target.to( device, non_blocking=True), target_seg.to(device) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output_kpt, output_seg = model(input) score_map = output_kpt[-1].cpu() if type( output_kpt) == list else output_kpt.cpu() if flip: flip_input = torch.from_numpy(fliplr( input.clone().numpy())).float().to(device) flip_output = model(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output if type(output_kpt) == list: # multiple output loss_kpt = 0 loss_seg = 0 for (o, o_seg) in zip(output_kpt, output_seg): loss_kpt += criterion(o, target, target_weight, len(idx)) loss_seg += criterion_seg(o_seg, target_seg) output = output_kpt[-1] output_seg = output_seg[-1] else: # single output loss_kpt = criterion(output_kpt, target, target_weight, len(idx)) loss_seg = criterion(output_seg, target_seg) acc, batch_interocular_dists = accuracy(score_map, target.cpu(), idx) _, pred_seg = torch.max(output_seg, 1) # generate predictions preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] if debug: gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses_kpt.update(loss_kpt.item(), input.size(0)) losses_seg.update(loss_seg.item(), input.size(0)) acces.update(acc[0], input.size(0)) inter, union = inter_and_union( pred_seg.data.cpu().numpy().astype(np.uint8), target_seg.data.cpu().numpy().astype(np.uint8)) inter_meter.update(inter) union_meter.update(union) # measure elapsed time batch_time.update(time.time() - end) end = time.time() iou = inter_meter.sum / (union_meter.sum + 1e-10) # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss_kpt: {loss_kpt:.8f} | Loss_seg: {loss_seg:.8f} | Acc: {acc: .8f} | IOU: {iou:.2f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss_kpt=losses_kpt.avg, loss_seg=losses_seg.avg, acc=acces.avg, iou=iou.mean() * 100) bar.next() bar.finish() print(iou) return losses_kpt.avg, acces.avg, predictions, iou.mean() * 100
def train(train_loader, model, criterion, criterion_seg, optimizer, debug=False, flip=True, train_batch=6, epoch=0, njoints=68): batch_time = AverageMeter() data_time = AverageMeter() losses_kpt = AverageMeter() losses_seg = AverageMeter() acces = AverageMeter() inter_meter = AverageMeter() union_meter = AverageMeter() # switch to train mode model.train() end = time.time() gt_win, pred_win = None, None bar = Bar('Train', max=len(train_loader)) for i, (input, target, target_seg, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input, target, target_seg = input.to(device), target.to( device, non_blocking=True), target_seg.to(device) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output_kpt, output_seg = model(input) if type(output_kpt) == list: # multiple output loss_kpt = 0 loss_seg = 0 for (o, o_seg) in zip(output_kpt, output_seg): loss_kpt += criterion(o, target, target_weight, len(idx)) loss_seg += criterion_seg(o_seg, target_seg) output_kpt = output_kpt[-1] output_seg = output_seg[-1] else: # single output loss_kpt = criterion(output_kpt, target, target_weight, len(idx)) loss_seg = criterion_seg(output_seg, target_seg) acc, batch_interocular_dists = accuracy(output_kpt, target, idx) _, pred_seg = torch.max(output_seg, 1) if debug: # visualize groundtruth and predictions gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, output) if not gt_win or not pred_win: ax1 = plt.subplot(121) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img) ax2 = plt.subplot(122) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() loss = loss_kpt + (0.01 / (epoch + 1)) * loss_seg # measure accuracy and record loss losses_kpt.update(loss_kpt.item(), input.size(0)) losses_seg.update(loss_seg.item(), input.size(0)) acces.update(acc[0], input.size(0)) inter, union = inter_and_union( pred_seg.data.cpu().numpy().astype(np.uint8), target_seg.data.cpu().numpy().astype(np.uint8)) inter_meter.update(inter) union_meter.update(union) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() iou = inter_meter.sum / (union_meter.sum + 1e-10) # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss_kpt: {loss_kpt:.8f} | Loss_seg: {loss_seg:.8f} | Acc: {acc: .4f} | IOU: {iou: .2f}'.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss_kpt=losses_kpt.avg, loss_seg=losses_seg.avg, acc=acces.avg, iou=iou.mean() * 100) bar.next() bar.finish() print(iou) return losses_kpt.avg, acces.avg
def validate(val_loader, model, criterion, num_classes, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Processing', max=len(val_loader)) for i, (inputs, target, target2, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) target = target.cuda(async=True) target2 = target2.cuda(async=True) input_var = torch.autograd.Variable(inputs.cuda(), volatile=True) target_var = torch.autograd.Variable(target, volatile=True) target2_var = torch.autograd.Variable(target2, volatile=True) # compute output output = model(input_var) score_map_hg = output[-2].data.cpu() score_map_emb = output[-1].data.cpu() score_map_emb2 = score_map_emb.cuda(async=True) loss = criterion(output[0], target_var) for j in range(1, (len(output) - 1)): loss += criterion(output[j], target_var) loss += criterion(output[-1], target2_var) acc = accuracy(score_map_emb2, target2, idx) # generate predictions #print(np.shape(predictions)) preds = final_preds(score_map_emb, meta['center'], meta['scale'], [64, 64]) for n in range(score_map_emb.size(0)): predictions[meta['index'][n], :, :] = preds[n, ::2, :] #predictions[meta['index'][n], :, :] = preds[n, :, :] if debug: gt_batch_img = batch_with_heatmap(inputs, target) pred_batch_img = batch_with_heatmap(inputs, score_map_emb) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.data[0], inputs.size(0)) acces.update(acc[0], inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg, predictions
def myvalidate( model, criterion, num_classes, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() img_folder = '/data3/wzwu/dataset/my' img_num = 1 r = 0 center1 = torch.Tensor([1281,2169]) center2 = torch.Tensor([[1281,2169]]) scale = torch.Tensor([10.0]) inp_res = 256 meanstd_file = './data/mpii/mean.pth.tar' if isfile(meanstd_file): meanstd = torch.load(meanstd_file) mean = meanstd['mean'] std = meanstd['std'] input_list = [] for i in range(img_num): img_name = str(i)+'.jpg' img_path = os.path.join(img_folder,img_name) print('img_path') print(img_path) set_trace() img = load_image(img_path) inp = crop(img, center1, scale, [inp_res, inp_res], rot=r) inp = color_normalize(inp, mean, std) input_list.append(inp) # predictions predictions = torch.Tensor(img_num, num_classes, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=img_num) with torch.no_grad(): for i, input in enumerate(input_list): # measure data loading time s0, s1, s2 = input.size() input = input.view(1, s0, s1, s2) data_time.update(time.time() - end) input = input.to(device, non_blocking=True) # compute output output = model(input) score_map = output[-1].cpu() if type(output) == list else output.cpu() #if flip: # flip_input = torch.from_numpy(fliplr(input.clone().numpy())).float().to(device) # flip_output = model(flip_input) # flip_output = flip_output[-1].cpu() if type(flip_output) == list else flip_output.cpu() # flip_output = flip_back(flip_output) # score_map += flip_output # generate predictions set_trace() preds = final_preds(score_map, center2, scale, [64, 64]) set_trace() print('preds') print(preds) print('predictions') print(predictions) for n in range(score_map.size(0)): predictions[i, :, :] = preds[n, :, :] if debug: pred_batch_img = batch_with_heatmap(input, score_map) if not gt_win or not pred_win: #plt.subplot(121) #plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() plt.savefig('/data3/wzwu/test/'+str(i)+'.png') # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=img_num, data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg ) bar.next() bar.finish() return predictions
def train(train_loader, model, criterion, optimizer, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # switch to train mode model.train() end = time.time() gt_win, pred_win = None, None bar = Bar('Processing', max=len(train_loader)) for i, (inputs, target, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input_var = torch.autograd.Variable(inputs.cuda()) target_var = torch.autograd.Variable(target.cuda(async=True)) # compute output output = model(input_var) score_map = output[-1].data.cpu() loss = criterion(output[0], target_var) for j in range(1, len(output)): loss += criterion(output[j], target_var) acc = accuracy_segm(score_map, target) if debug: for j in range(len(score_map)): save_im_in(inputs[j], "debug/test_in_{}.jpg".format(j)) save_im_out(score_map[j, 0, :, :], "debug/test_out_{}.jpg".format(j)) save_im_out(target[j, 0, :, :], "debug/test_target_{}.jpg".format(j)) # measure accuracy and record loss losses.update(loss.data[0], inputs.size(0)) acces.update(acc[0], inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg ) bar.next() bar.finish() return losses.avg, acces.avg
def train(train_loader, model, criterion, optimizer, debug=False, flip=True, train_iters=0): print("Train iters: {}".format(train_iters)) batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # switch to train mode model.train() debug_count = 0 end = time.time() gt_win, pred_win = None, None bar_len = [train_iters if train_iters != 0 else len(train_loader)][0] train_iters = [train_iters if train_iters != 0 else len(train_loader)][0] bar = Bar('Train', max=bar_len) curr_iter = 0 while curr_iter < train_iters: for i, (input, target, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input, target = input.to(device), target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output = model(input) if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight) output = output[-1] else: # single output loss = criterion(output, target, target_weight) acc = accuracy(output, target, idx) if debug: # visualize groundtruth and predictions gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, output) fig = plt.figure() ax1 = fig.add_subplot(121) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img) ax2 = fig.add_subplot(122) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img) plt.pause(.05) plt.draw() fig.savefig('debug/debug_{}.png'.format(str(debug_count)), dpi=500) debug_count += 1 # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=[len(train_loader) if train_iters == 0 else train_iters ][0], data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() curr_iter += 1 if curr_iter >= train_iters - 1: break bar.finish() return losses.avg, acces.avg
def validate(val_loader, model, criterion, num_classes, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Processing', max=len(val_loader)) for i, (inputs, target, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) target = target.cuda(async=True) input_var = torch.autograd.Variable(inputs.cuda(), volatile=True) target_var = torch.autograd.Variable(target, volatile=True) # compute output output = model(input_var) score_map = output[-1].data.cpu() if flip: flip_input_var = torch.autograd.Variable( torch.from_numpy( fliplr(inputs.clone().numpy())).float().cuda(), volatile=True ) flip_output_var = model(flip_input_var) flip_output = flip_back(flip_output_var[-1].data.cpu()) score_map += flip_output loss = 0 for o in output: loss += criterion(o, target_var) acc = accuracy_segm(score_map, target.cpu()) if debug: for j in range(len(score_map)): save_im_in(inputs[j], "debug/test_in_{}.jpg".format(j)) save_im_out(score_map[j, 0, :, :], "debug/test_out_{}.jpg".format(j)) save_im_out(target[j, 0, :, :], "debug/test_target_{}.jpg".format(j)) # measure accuracy and record loss losses.update(loss.data[0], inputs.size(0)) acces.update(acc[0], inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg ) bar.next() bar.finish() return losses.avg, acces.avg
def train(inqueues, outqueues, train_loader, model, criterion, optimizer, debug=False, flip=True, clip=1, _logger=None): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() sum_losses = AverageMeter() # switch to train mode model.train() criterion.valid = False end = time.time() gt_win, pred_win = None, None bar = Bar('Processing', max=len(train_loader)) for i, (inputs, target, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) if True: input_var = torch.autograd.Variable(inputs.cuda()) target_var = torch.autograd.Variable(target.cuda(async=True)) # compute output output = model(input_var) if len(inqueues) > 0: loss = [] acc = [] sum_loss = [] for j in range(len(output)): grad = [] for ii in range(output[0].size(0)): data = {} data['output'] = output[j][ii] data['meta'] = {} data['meta']['bi_target'] = meta['bi_target'][ii] data['meta']['pck'] = meta['pck'][ii] data['meta']['points'] = meta['points'][ii] data['meta']['tpts'] = meta['tpts'][ii] inqueues[ii].send(data) for ii in range(output[0].size(0)): _loss, _acc, _sum_loss, _grad = outqueues[ii].recv() loss.append(_loss) grad.append(_grad) if j == len(output) - 1: acc.append(_acc) sum_loss.append(_sum_loss) optimizer.zero_grad() output[0].backward(torch.stack(grad, 0)) torch.nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() loss = sum(loss) / output[0].size(0) acc = sum(acc) / output[0].size(0) sum_loss = sum(sum_loss) / output[0].size(0) else: optimizer.zero_grad() loss, acc, sum_loss = criterion(output[0], meta) for j in range(1, len(output)): _loss, acc, sum_loss = criterion(output[j], meta) loss += _loss loss.backward() optimizer.step() if debug: # visualize groundtruth and predictions gt_batch_img = batch_with_heatmap(inputs, target) pred_batch_img = batch_with_heatmap(inputs, score_map) if not gt_win or not pred_win: ax1 = plt.subplot(121) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img) ax2 = plt.subplot(122) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.data.item(), inputs.size(0)) acces.update(acc.item(), inputs.size(0)) sum_losses.update(sum_loss) loss = None torch.cuda.empty_cache() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | Loss: {loss:.4f} | Sum_Loss: {sum_loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, loss=losses.avg * 100, sum_loss=sum_losses.avg * 100, acc=acces.avg * 100) _logger.info(bar.suffix) bar.finish() return losses.avg * 100, acces.avg * 100
def validate(val_loader, model, criterion, num_classes, args, flip=False, test_batch=6): batch_time = AverageMeter() data_time = AverageMeter() acces = AverageMeter() pck_score = np.zeros(num_classes) pck_count = np.zeros(num_classes) # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(val_loader)) with torch.no_grad(): for i, (input, target, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input = input.to(device, non_blocking=True) target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output if args.arch == 'hg': output = model(input) elif args.arch == 'hg_multitask': output, _ = model(input) else: raise Exception("unspecified arch") score_map = output[-1].cpu() if type( output) == list else output.cpu() if flip: flip_input = torch.from_numpy( fliplr(input.clone().cpu().numpy())).float().to(device) flip_output = model(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output acc, _ = accuracy_2animal(score_map, target.cpu(), idx1, idx2) # cal per joint [email protected] for j in range(num_classes): if acc[j + 1] > -1: pck_score[j] += acc[j + 1].numpy() pck_count[j] += 1 # generate predictions preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] # measure accuracy and record loss acces.update(acc[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Acc: {acc: .8f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, acc=acces.avg) bar.next() bar.finish() for j in range(num_classes): pck_score[j] /= float(pck_count[j]) print("\nper joint [email protected]:") print(list(pck_score)) return _, acces.avg, predictions
def validate(self): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() predictions = torch.Tensor(self.val_loader.dataset.__len__(), self.num_classes, 2) self.netG.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(self.val_loader)) with torch.no_grad(): for i, (input, target, meta, mpii) in enumerate(self.val_loader): if mpii == False: continue data_time.update(time.time() - end) input = input.to(self.device, non_blocking=True) target = target.to(self.device, non_blocking=True) target_weight = meta['target_weight'].to(self.device, non_blocking=True) output = self.netG(input) score_map = output[-1].cpu() if type( output) == list else output.cpu() if self.flip: flip_input = torch.from_numpy flip_output = self.netG(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output if type(output) == list: loss = 0 for o in output: loss += self.criterion(o, target, target_weight) output = output[-1] else: loss = self.criterion(output, target, target_weight) acc = accuracy(score_map, target.cpu(), self.idx) preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] if self.debug: gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() losses.update(loss.item, input.size(0)) acces.update(acc[0], input.size(0)) batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(self.val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg, predictions
def validate(val_loader, model, criterion, num_classes, idx, save_result_dir, meta_dir, anno_type, flip=True, evaluate=False, scales=[0.7, 0.8, 0.9, 1, 1.2, 1.4, 1.6], multi_scale=False, save_heatmap=False): anno_type = anno_type[0].lower() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() num_scales = len(scales) # switch to evaluate mode model.eval() meanstd_file = '../datasets/arm/mean.pth.tar' meanstd = torch.load(meanstd_file) mean = meanstd['mean'] gt_win, pred_win = None, None end = time.time() bar = Bar('Processing', max=len(val_loader)) for i, (inputs, target, meta) in enumerate(val_loader): #print(inputs.shape) # measure data loading time data_time.update(time.time() - end) if anno_type != 'none': target = target.cuda(async=True) target_var = torch.autograd.Variable(target) input_var = torch.autograd.Variable(inputs.cuda()) with torch.no_grad(): # compute output output = model(input_var) score_map = output[-1].data.cpu() if flip: flip_input_var = torch.autograd.Variable( torch.from_numpy(fliplr( inputs.clone().numpy())).float().cuda(), ) flip_output_var = model(flip_input_var) flip_output = flip_back(flip_output_var[-1].data.cpu(), meta_dir=meta_dir[0]) score_map += flip_output score_map /= 2 if anno_type != 'none': loss = 0 for o in output: loss += criterion(o, target_var) acc = accuracy(score_map, target.cpu(), idx, pck_threshold) if multi_scale: new_scales = [] new_res = [] new_score_map = [] new_inp = [] new_meta = [] img_name = [] confidence = [] new_center = [] num_imgs = score_map.size(0) // num_scales for n in range(num_imgs): score_map_merged, res, conf = multi_scale_merge( score_map[num_scales * n:num_scales * (n + 1)].numpy(), meta['scale'][num_scales * n:num_scales * (n + 1)]) inp_merged, _, _ = multi_scale_merge( inputs[num_scales * n:num_scales * (n + 1)].numpy(), meta['scale'][num_scales * n:num_scales * (n + 1)]) new_score_map.append(score_map_merged) new_scales.append(meta['scale'][num_scales * (n + 1) - 1]) new_center.append(meta['center'][num_scales * n]) new_res.append(res) new_inp.append(inp_merged) img_name.append(meta['img_name'][num_scales * n]) confidence.append(conf) if len(new_score_map) > 1: score_map = torch.tensor( np.stack(new_score_map)) #stack back to 4-dim inputs = torch.tensor(np.stack(new_inp)) else: score_map = torch.tensor( np.expand_dims(new_score_map[0], axis=0)) inputs = torch.tensor(np.expand_dims(new_inp[0], axis=0)) else: img_name = [] confidence = [] for n in range(score_map.size(0)): img_name.append(meta['img_name'][n]) confidence.append( np.amax(score_map[n].numpy(), axis=(1, 2)).tolist()) # generate predictions if multi_scale: preds = final_preds(score_map, new_center, new_scales, new_res[0]) else: preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): if evaluate: with open( os.path.join(save_result_dir, 'preds', img_name[n] + '.json'), 'w') as f: obj = { 'd2_key': preds[n].numpy().tolist(), 'score': confidence[n] } json.dump(obj, f) if evaluate: for n in range(score_map.size(0)): inp = inputs[n] pred = score_map[n] for t, m in zip(inp, mean): t.add_(m) scipy.misc.imsave( os.path.join(save_result_dir, 'visualization', '{}.jpg'.format(img_name[n])), sample_with_heatmap(inp, pred)) if save_heatmap: score_map_original_size = align_back( score_map[n], meta['center'][n], meta['scale'][len(scales) * n - 1], meta['original_size'][n]) np.save( os.path.join(save_result_dir, 'heatmaps', '{}.npy'.format(img_name[n])), score_map_original_size) if anno_type != 'none': # measure accuracy and record loss losses.update(loss.item(), inputs.size(0)) acces.update(acc[0], inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() if anno_type != 'none': return losses.avg, acces.avg else: return 0, 0
def train(self, gen_iterations, start_t, epoch, lr): batch_time = AverageMeter() data_time = AverageMeter() self.netG.train() end = time.time() gt_win, pred_win = None, None bar = Bar('Train', max=len(self.train_loader)) step = 0 errD_total = None errG_total = None for i, (input, target, meta, mpii) in enumerate(self.train_loader): data_time.update(time.time() - end) ###################################################### # (1) Prepare training data and Compute text embeddings ###################################################### input, target = input.to(self.device), target.to(self.device, non_blocking=True) target_weight = meta['target_weight'].to(self.device, non_blocking=True) ####################################################### # (2) Generate fake heatmaps ###################################################### output = self.netG(input) ####################################################### # (3) Update D network ###################################################### errD_total = 0 D_logs = '' for i in range(self.num_stacks): self.netsD[i].zero_grad() errD = discriminator_loss(self.netsD[i], target, target_weight, output[i], input, self.real_labels, self.fake_labels, mpii) errD.backword() self.optimizersD[i].step() errD_total += errD D_logs += 'errD%d: %d.2f ' % (i, errD.data[0]) ####################################################### # (4) Update G network: maximize log(D(G(z))) ###################################################### step += 1 gen_iterations += 1 self.netG.zero_grad() errG_total, G_logs = \ generator_loss(self.netsD, self.domainD, output, self.real_labels, input, target_weight, mpii) if self.debug: gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, output) if not gt_win or not pred_win: ax1 = plt.subplot(121) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img) ax2 = plt.subplot(122) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() errG_total.backward() self.optimizerG.step() batch_time.update(time.time() - end) end = time.time() bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:}'.format( batch=i + 1, size=len(self.train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td) bar.next() if gen_iterations % 100 == 0: print(D_logs + '\n' + G_logs) end_t = time.time() print('''[%d/%d] Loss_D: %.2f Loss_G: %.2f Time: %.2fs''' % (epoch, self.epochs, errD_total.data[0], errG_total.data[0], end_t - start_t)) if epoch % cfg.TRAIN.SNAPSHOT_INTERVAL == 0: self.save_model(self.netsD, lr, epoch) bar.finish()
def train(train_loader, model, criterion, optimizer, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # switch to train mode model.train() end = time.time() gt_win, pred_win = None, None bar = Bar('Processing', max=len(train_loader)) for i, (inputs, target, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input_var = torch.autograd.Variable(inputs.cuda()) target_var = torch.autograd.Variable(target.cuda(async=True)) # compute output output = model(input_var) score_map = output[-1].data.cpu() loss = criterion(output[0], target_var) for j in range(1, len(output)): loss += criterion(output[j], target_var) acc = accuracy(score_map, target, idx) if debug: # visualize groundtruth and predictions gt_batch_img = batch_with_heatmap(inputs, target) pred_batch_img = batch_with_heatmap(inputs, score_map) if not gt_win or not pred_win: ax1 = plt.subplot(121) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img) ax2 = plt.subplot(122) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.data[0], inputs.size(0)) acces.update(acc[0], inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg ) bar.next() bar.finish() return losses.avg, acces.avg
def train(train_loader, model, criterion, optimizer, debug=False, flip=True, train_batch=6, epoch=0, njoints=68): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # switch to train mode model.train() end = time.time() gt_win, pred_win = None, None bar = Bar('Train', max=len(train_loader)) interocular_dists = torch.zeros((njoints, train_loader.dataset.__len__())) for i, (input, target, meta) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input, target = input.to(device), target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output = model(input) if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight, len(idx)) output = output[-1] else: # single output loss = criterion(output, target, target_weight, len(idx)) acc, batch_interocular_dists = accuracy(output, target, idx) interocular_dists[:, i * train_batch:(i + 1) * train_batch] = batch_interocular_dists if debug: # visualize groundtruth and predictions gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, output) if not gt_win or not pred_win: ax1 = plt.subplot(121) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img) ax2 = plt.subplot(122) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f} | Acc: {acc: .8f}'.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() idx_array = np.array(idx) - 1 interocular_dists_pickup = interocular_dists[idx_array, :] mean_error = torch.mean( interocular_dists_pickup[interocular_dists_pickup != -1]) auc = calc_metrics(interocular_dists, idx) # this is auc of predicted maps and target. #print("=> Mean Error: {:.8f}, [email protected]: {:.8f} based on maps".format(mean_error, auc)) return losses.avg, acces.avg
def validate(val_loader, model, criterion, num_classes, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(val_loader)) with torch.no_grad(): for i, (input, target, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input = input.to(device, non_blocking=True) target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output = model(input) score_map = output[-1].cpu() if type( output) == list else output.cpu() if flip: flip_input = torch.from_numpy(fliplr( input.clone().numpy())).float().to(device) flip_output = model(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output = flip_back(flip_output) score_map += flip_output if type(output) == list: # multiple output loss = 0 for o in output: loss += criterion(o, target, target_weight) output = output[-1] else: # single output loss = criterion(output, target, target_weight) acc = accuracy(score_map, target.cpu(), idx) # generate predictions preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) for n in range(score_map.size(0)): predictions[meta['index'][n], :, :] = preds[n, :, :] if debug: gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces.update(acc[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg) bar.next() bar.finish() return losses.avg, acces.avg, predictions
def validate(val_loader, model, criterion, debug=False, flip=True, test_batch=6, njoints=68): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces_re = AverageMeter() # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Eval ', max=len(val_loader)) with torch.no_grad(): for i, (input, target, meta) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) input = input.to(device, non_blocking=True) target = target.to(device, non_blocking=True) target_weight = meta['target_weight'].to(device, non_blocking=True) # compute output output, output_refine = model(input) score_map = output[-1].cpu() if type( output) == list else output.cpu() score_map_refine = output_refine[-1].cpu() if type( output_refine) == list else output_refine.cpu() if flip: flip_input = torch.from_numpy(fliplr( input.clone().numpy())).float().to(device) flip_output, flip_output_re = model(flip_input) flip_output = flip_output[-1].cpu() if type( flip_output) == list else flip_output.cpu() flip_output_re = flip_output_re[-1].cpu() if type( flip_output_re) == list else flip_output_re.cpu() flip_output = flip_back(flip_output, 'real_animal') flip_output_re = flip_back(flip_output_re, 'real_animal') score_map += flip_output score_map_refine += flip_output_re if type(output) == list: # multiple output loss = 0 for (o, o_re) in (output, output_refine): loss = loss + criterion( o, target, target_weight, len(idx)) + criterion( o_re, target, target_weight, len(idx)) else: # single output loss = criterion( output, target, target_weight, len(idx)) + criterion( output_refine, target, target_weight, len(idx)) acc_re, _ = accuracy(score_map_refine, target.cpu(), idx) if debug: gt_batch_img = batch_with_heatmap(input, target) pred_batch_img = batch_with_heatmap(input, score_map) if not gt_win or not pred_win: plt.subplot(121) gt_win = plt.imshow(gt_batch_img) plt.subplot(122) pred_win = plt.imshow(pred_batch_img) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.pause(.05) plt.draw() # measure accuracy and record loss losses.update(loss.item(), input.size(0)) acces_re.update(acc_re[0], input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} ' \ '| Loss: {loss:.8f} | Acc_re: {acc_re: .8f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc_re=acces_re.avg ) bar.next() bar.finish() return losses.avg, acces_re.avg