def validate(cfg, val_loader, model, metric = None, log = None): data_time = AverageMeter() batch_time = AverageMeter() # switch to evaluate mode model.eval() num_samples = len(val_loader.dataset) all_preds = [] idx = 0 bar = Bar('Processing', max=len(val_loader)) with torch.no_grad(): end = time.time() for i, batch in enumerate(val_loader): data_time.update(time.time() - end) size = batch['weight'].size(0) # measure data loading time data_time.update(time.time() - end) # compute output model.set_batch(batch) model.forward() # debug, print intermediate result if cfg.DEBUG: debug(model.outputs, batch) preds = model.get_preds() all_preds.append(preds) # measure elapsed time batch_time.update(time.time() - end) end = time.time() suffix = '({batch}/{size}) Data:{data:.1f}s Batch:{bt:.1f}s Total:{total:} ETA:{eta:} '.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td) if cfg.IS_VALID: metric_ = model.eval_result() metric_['loss'] = model.loss.item() metric.update(metric_, size) for name in metric.names(): suffix += '{}: {:.4f} '.format(name, metric[name].avg) bar.suffix = suffix bar.next() if log: log.info(bar.suffix) bar.finish() return reduce(combine, all_preds)
def train(cfg, train_loader, model, metric, log): data_time = AverageMeter() batch_time = AverageMeter() # switch to train mode model.train() end = time.time() bar = Bar('Processing', max=len(train_loader)) model.collections = [] for i, batch in enumerate(train_loader): # print(i) size = batch['weight'].size(0) # measure data loading time data_time.update(time.time() - end) model.set_batch(batch) model.step() # debug, print intermediate result if cfg.DEBUG: model.debug() #calculate the result model.get_batch_result(type='train') #put the result of this batch into collection for epoch result eval model.collect_batch_result() # measure accuracy and record loss metric_ = model.eval_batch_result() metric.update(metric_, size) # measure elapsed time batch_time.update(time.time() - end) end = time.time() suffix = '({batch}/{size}) Data:{data:.1f}s Batch:{bt:.1f}s Total:{total:} ETA:{eta:} '.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td) for name in metric_.keys(): suffix += '{}: {:.4f} '.format(name, metric[name].avg) bar.suffix = suffix bar.next() log.info(bar.suffix) bar.finish() model.get_epoch_result() metric_ = model.eval_epoch_result() metric.update(metric_, 1) print("".join(["%s : %.4f" % (key, metric_[key]) for key in metric_])) return model.epoch_result
def train( train_loader, model, criterion, optimizer, lr_init=None, lr_now=None, glob_step=None, lr_decay=None, gamma=None, max_norm=True, ): losses = utils.AverageMeter() model.train() start = time.time() batch_time = 0 bar = Bar(">>>", fill=">", max=len(train_loader)) for i, (inps, tars) in enumerate(train_loader): glob_step += 1 if glob_step % lr_decay == 0 or glob_step == 1: lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay, gamma) inputs = Variable(inps.cuda()) targets = Variable(tars.cuda()) outputs = model(inputs) # calculate loss optimizer.zero_grad() loss = criterion(outputs, targets) losses.update(loss.item(), inputs.size(0)) loss.backward() if max_norm: nn.utils.clip_grad_norm(model.parameters(), max_norm=1) optimizer.step() # update summary if (i + 1) % 100 == 0: batch_time = time.time() - start start = time.time() bar.suffix = "({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.4f}".format( batch=i + 1, size=len(train_loader), batchtime=batch_time * 10.0, ttl=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, ) bar.next() bar.finish() return glob_step, lr_now, losses.avg
def test(test_loader, model, criterion, joint_num, procrustes=False): losses = utils.AverageMeter() model.eval() all_dist = [] start = time.time() batch_time = 0 bar = Bar('>>>', fill='>', max=len(test_loader)) for i, data in enumerate(test_loader): joint2d,truth=data['joint2d'],data['truth'] inputs=Variable(joint2d.cuda().type(torch.cuda.FloatTensor)) targets=Variable(truth.cuda().type(torch.cuda.FloatTensor)) outputs = model(inputs) outputs=torch.reshape(outputs,(-1,(joint_num)*3)) targets=torch.reshape(targets,(-1,(joint_num)*3)) # calculate loss loss = criterion(outputs, targets) losses.update(loss.item(), inputs.size(0)) sqerr = (outputs - targets) ** 2 distance = np.zeros((sqerr.shape[0],joint_num+1)) dist_idx = 0 for k in np.arange(0, (joint_num+1) * 3, 3): distance[:, dist_idx] = torch.sqrt(torch.sum(sqerr[:, k:k + 3], axis=1)).to('cpu').detach().numpy() dist_idx += 1 all_dist.append(distance) # update summary if (i + 1) % 100 == 0: batch_time = time.time() - start start = time.time() bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}' \ .format(batch=i + 1, size=len(test_loader), batchtime=batch_time * 10.0, ttl=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg) bar.next() all_dist = np.vstack(all_dist) # joint_err = np.mean(all_dist, axis=0) ttl_err = np.mean(all_dist) bar.finish() print (">>> error: {} <<<".format(ttl_err)) return targets, losses.avg, ttl_err
def train(train_loader, model, criterion, optimizer, joint_num, lr_init=None, lr_now=None, glob_step=None, lr_decay=None, gamma=None, max_norm=True): losses = utils.AverageMeter() model.train() start = time.time() batch_time = 0 bar = Bar('>>>', fill='>', max=len(train_loader)) for i, data in enumerate(train_loader): # Turn down Learning Rate glob_step += 1 if glob_step % lr_decay == 0 or glob_step == 1: lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay, gamma) joint2d, truth = data['joint2d'], data['truth'] inputs=Variable(joint2d.cuda().type(torch.cuda.FloatTensor)) targets=Variable(truth.cuda().type(torch.cuda.FloatTensor)) outputs = model(inputs) outputs=torch.reshape(outputs,(-1,(joint_num)*3)) targets=torch.reshape(targets,(-1,(joint_num)*3)) # calculate loss optimizer.zero_grad() loss = criterion(outputs, targets) losses.update(loss.item(), inputs.size(0)) loss.backward() if max_norm: nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) optimizer.step() # update summary if (i + 1) % 100 == 0: batch_time = time.time() - start start = time.time() bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.4f}' \ .format(batch=i + 1, size=len(train_loader), batchtime=batch_time * 10.0, ttl=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg) bar.next() bar.finish() return glob_step, lr_now, losses.avg
def train(cfg, train_loader, model, metric, log): data_time = AverageMeter() batch_time = AverageMeter() # switch to train mode model.train() end = time.time() bar = Bar('Processing', max=len(train_loader)) for i, batch in enumerate(train_loader): size = batch['weight'].size(0) # measure data loading time data_time.update(time.time() - end) model.set_batch(batch) model.step() # debug, print intermediate result if cfg.DEBUG: debug(model.outputs, batch) # measure accuracy and record loss metric_ = model.eval_result() metric_['loss'] = model.loss.item() metric.update(metric_, size) # measure elapsed time batch_time.update(time.time() - end) end = time.time() suffix = '({batch}/{size}) Data:{data:.1f}s Batch:{bt:.1f}s Total:{total:} ETA:{eta:} '.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td) for name in metric.names(): suffix += '{}: {:.4f} '.format(name, metric[name].avg) bar.suffix = suffix bar.next() log.info(bar.suffix) bar.finish()
def validate(cfg, val_loader, model, metric=None, log=None): data_time = AverageMeter() batch_time = AverageMeter() # switch to evaluate mode model.eval() num_samples = len(val_loader.dataset) model.collections = [] idx = 0 bar = Bar('Processing', max=len(val_loader)) with torch.no_grad(): end = time.time() for i, batch in enumerate(val_loader): data_time.update(time.time() - end) size = batch['weight'].size(0) # measure data loading time data_time.update(time.time() - end) # compute output model.set_batch(batch) model.forward() # debug, print intermediate result if cfg.DEBUG: model.debug() model.get_batch_result(type='valid') #put the result of this batch into collection for epoch result eval model.collect_batch_result() # measure elapsed time batch_time.update(time.time() - end) end = time.time() suffix = '({batch}/{size}) Data:{data:.1f}s Batch:{bt:.1f}s Total:{total:} ETA:{eta:} '.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td) if cfg.IS_VALID: metric_ = model.eval_batch_result() metric.update(metric_, size) for name in metric_.keys(): suffix += '{}: {:.4f} '.format(name, metric[name].avg) bar.suffix = suffix bar.next() if log: log.info(bar.suffix) bar.finish() model.get_epoch_result() metric_ = model.eval_epoch_result() metric.update(metric_, 1) print("".join(["%s : %.4f" % (key, metric_[key]) for key in metric_])) return model.epoch_result
def validate(val_loader, model, criterion, num_classes, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() distes = AverageMeter() # predictions predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2) # switch to evaluate mode model.eval() gt_win, pred_win = None, None end = time.time() bar = Bar('Processing', max=len(val_loader)) for i, (inputs, target) in enumerate(val_loader): # measure data loading time data_time.update(time.time() - end) target = target.cuda(async=True) with torch.no_grad(): input_var = torch.autograd.Variable(inputs.cuda()) target_var = torch.autograd.Variable(target) # compute output output = model(input_var) score_map = output[-1].data.cpu() if flip: flip_input_var = torch.autograd.Variable( torch.from_numpy(fliplr(inputs.clone().numpy())).float().cuda(), volatile=True ) flip_output_var = model(flip_input_var) flip_output = flip_back(flip_output_var[-1].data.cpu()) score_map += flip_output loss = 0 for o in output: loss += criterion(o[:,:21,:,:], target_var) acc, dist = accuracy(score_map[:,:21,:,:].contiguous(), target.cpu(), idx) # generate predictions # preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64]) preds = score_map # for n in range(score_map.size(0)): # predictions[meta['index'][n], :, :] = preds[n, :, :] # print(debug) if debug: gt_batch_img = batch_with_heatmap(inputs, target) pred_batch_img = batch_with_heatmap(inputs, score_map) sz = tuple([x * 4 for x in gt_batch_img[:,:,0].shape]) gt_batch_img = cv2.resize(gt_batch_img,(sz[1],sz[0]),) pred_batch_img = cv2.resize(pred_batch_img, (sz[1],sz[0])) if not gt_win or not pred_win: # plt.imshow(gt_batch_img) plt.subplot(121) gt_win = plt.imshow(gt_batch_img[:,:,::-1]) plt.subplot(122) pred_win = plt.imshow(pred_batch_img[:,:,::-1]) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.savefig("./tmp/" + str(i) + ".png", dpi = 1000, bbox_inches='tight') # measure accuracy and record loss losses.update(loss.item(), inputs.size(0)) acces.update(acc[0], inputs.size(0)) distes.update(dist[0], inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.1f}s | Batch: {bt:.1f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f} | Dist {dist:.3f}'.format( batch=i + 1, size=len(val_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg, dist=distes.avg ) bar.next() bar.finish() return losses.avg, acces.avg, predictions
def train(train_loader, model, criterion, optimizer, debug=False, flip=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acces = AverageMeter() distes = AverageMeter() # switch to train mode model.train() end = time.time() gt_win, pred_win = None, None bar = Bar('Processing', max=len(train_loader)) for i, (inputs, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input_var = torch.autograd.Variable(inputs.cuda()) target_var = torch.autograd.Variable(target.cuda(async=True)) # compute output output = model(input_var) score_map = output[-1].data.cpu() loss = criterion(output[0], target_var) for j in range(1, len(output)): loss += criterion(output[j], target_var) acc, dist = accuracy(score_map, target, idx) if debug: # visualize groundtruth and predictions gt_batch_img = batch_with_heatmap(inputs, target) pred_batch_img = batch_with_heatmap(inputs, score_map) if not gt_win or not pred_win: ax1 = plt.subplot(121) ax1.title.set_text('Groundtruth') gt_win = plt.imshow(gt_batch_img[:,:,::-1]) ax2 = plt.subplot(122) ax2.title.set_text('Prediction') pred_win = plt.imshow(pred_batch_img[:,:,::-1]) else: gt_win.set_data(gt_batch_img) pred_win.set_data(pred_batch_img) plt.plot() plt.pause(.5) # measure accuracy and record loss losses.update(loss.item(), inputs.size(0)) acces.update(acc[0], inputs.size(0)) distes.update(dist[0], inputs.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # plot progress bar.suffix = '({batch}/{size}) Data: {data:.1f}s | Batch: {bt:.1f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f} | Dist {dist:.3f}'.format( batch=i + 1, size=len(train_loader), data=data_time.val, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, acc=acces.avg, dist=distes.avg ) bar.next() bar.finish() return losses.avg, acces.avg
def test(test_loader, model, criterion, stat_3d, procrustes=False): losses = utils.AverageMeter() model.eval() all_dist = [] start = time.time() batch_time = 0 bar = Bar(">>>", fill=">", max=len(test_loader)) for i, (inps, tars) in enumerate(test_loader): inputs = Variable(inps.cuda()) targets = Variable(tars.cuda()) outputs = model(inputs) # calculate loss outputs_coord = outputs loss = criterion(outputs_coord, targets) losses.update(loss.item(), inputs.size(0)) tars = targets # calculate erruracy targets_unnorm = data_process.unNormalizeData(tars.data.cpu().numpy(), stat_3d["mean"], stat_3d["std"], stat_3d["dim_use"]) outputs_unnorm = data_process.unNormalizeData( outputs.data.cpu().numpy(), stat_3d["mean"], stat_3d["std"], stat_3d["dim_use"], ) # remove dim ignored dim_use = np.hstack((np.arange(3), stat_3d["dim_use"])) outputs_use = outputs_unnorm[:, dim_use] targets_use = targets_unnorm[:, dim_use] if procrustes: for ba in range(inps.size(0)): gt = targets_use[ba].reshape(-1, 3) out = outputs_use[ba].reshape(-1, 3) _, Z, T, b, c = get_transformation(gt, out, True) out = (b * out.dot(T)) + c outputs_use[ba, :] = out.reshape(1, 51) sqerr = (outputs_use - targets_use)**2 distance = np.zeros((sqerr.shape[0], 17)) dist_idx = 0 for k in np.arange(0, 17 * 3, 3): distance[:, dist_idx] = np.sqrt(np.sum(sqerr[:, k:k + 3], axis=1)) dist_idx += 1 all_dist.append(distance) # update summary if (i + 1) % 100 == 0: batch_time = time.time() - start start = time.time() bar.suffix = "({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}".format( batch=i + 1, size=len(test_loader), batchtime=batch_time * 10.0, ttl=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, ) bar.next() all_dist = np.vstack(all_dist) joint_err = np.mean(all_dist, axis=0) ttl_err = np.mean(all_dist) bar.finish() print(">>> error: {} <<<".format(ttl_err)) return losses.avg, ttl_err
def test(test_loader, model, criterion, stat_3d, device, procrustes=False, pck_thresholds=[50, 100, 150, 200, 250], noise_fun=lambda x: x, refine_dic=None, refine_coeff_fun=None, refine_extra_kwargs={}, cache_prefix=None, visualize=False): model.eval() all_outputs = [] all_targets = [] losses = utils.AverageMeter() for i, (inps, tars) in enumerate(test_loader): inps_noise = noise_fun(inps) inputs = Variable(inps_noise.to(device)) targets = Variable(tars.to(device)) outputs = model(inputs) # calculate loss outputs_coord = outputs loss = criterion(outputs_coord, targets) losses.update(loss.item(), inputs.size(0)) tars = targets # calculate erruracy targets_unnorm = data_process.unNormalizeData(tars.data.cpu().numpy(), stat_3d['mean'], stat_3d['std'], stat_3d['dim_use']) outputs_unnorm = data_process.unNormalizeData( outputs.data.cpu().numpy(), stat_3d['mean'], stat_3d['std'], stat_3d['dim_use']) # remove dim ignored dim_use = np.hstack((np.arange(3), stat_3d['dim_use'])) outputs_use = outputs_unnorm[:, dim_use] targets_use = targets_unnorm[:, dim_use] all_outputs.append(outputs_use) all_targets.append(targets_use) accu_frames = np.cumsum(test_loader.dataset.frames) all_outputs = np.split(np.concatenate(all_outputs, axis=0), accu_frames)[:-1] all_targets = np.split(np.concatenate(all_targets, axis=0), accu_frames)[:-1] start = time.time() seq_time = 0 bar = Bar('>>>', fill='>', max=len(all_outputs)) all_dist, all_pck = [], [] for i, (outputs_use, targets_use) in enumerate(zip(all_outputs, all_targets)): if refine_dic is not None: origin = outputs_use outputs_use, _ = ru.refine(outputs_use, refine_dic, refine_coeff_fun, **refine_extra_kwargs) if visualize: visual = [ ru.convert_to_pose_16(seq.reshape([-1, 17, 3])) for seq in [outputs_use, origin, targets_use] ] ru.plot_pose_seq(visual, plot_axis=True, r=1000) if procrustes: for frame in range(outputs_use.shape[0]): gt = targets_use[frame].reshape(-1, 3) out = outputs_use[frame].reshape(-1, 3) _, Z, T, b, c = get_transformation(gt, out, True, reflection=False) out = (b * out.dot(T)) + c outputs_use[frame, :] = out.reshape(1, 51) for pred, gt in zip(outputs_use, targets_use): pred, gt = pred.reshape([-1, 3]), gt.reshape([-1, 3]) all_dist.append(mpjpe_fun(pred, gt)) all_pck.append(pck_fun(pred, gt, thresholds=pck_thresholds)) # update summary seq_time = time.time() - start start = time.time() bar.suffix = '({seq}/{size}) | seq: {seqtime:.4}s | Total: {ttl} | ETA: {eta:} | mpjpe: {loss:.6f}' \ .format(seq=i + 1, size=len(all_outputs), seqtime=seq_time, ttl=bar.elapsed_td, eta=bar.eta_td, loss=np.mean(all_dist)) bar.next() all_dist = np.vstack(all_dist) all_pck = np.array(all_pck) mpjpe = np.mean(all_dist) if cache_prefix: with open('cache/{}_.pkl'.format(cache_prefix), 'wb') as f: pickle.dump({'mpjpe': all_dist, 'pck': all_pck}, f) pck = np.mean(all_pck, axis=0) bar.finish() print(">>> error: {:4f}, pck: {} <<<".format( mpjpe, ' '.join(['{:4f}'.format(val) for val in pck]))) return losses.avg, mpjpe, pck