def temp_visualize(eval_dataset, model, stats, opt): # visualze model prediction batch by batch model.eval() data = np.load('./pics/pts1.npy').astype(np.float32) data = data[:,2:] # normalize the data mean_vec = stats['mean_2d'][stats['dim_use_2d']] std_vec = stats['std_2d'][stats['dim_use_2d']] data = (data-mean_vec)/std_vec data = torch.from_numpy(data.astype(np.float32)) data = data.cuda() # forward pass to get prediction prediction = model(data) # un-normalize the data skeleton_2d = data_utils.unNormalizeData(data.data.cpu().numpy(), stats['mean_2d'], stats['std_2d'], stats['dim_ignore_2d']) skeleton_3d_pred = data_utils.unNormalizeData(prediction.data.cpu().numpy(), stats['mean_3d'], stats['std_3d'], stats['dim_ignore_3d']) # visualizing plt.figure() ax = plt.subplot(1, 2, 1) viz.show2Dpose(skeleton_2d[0], ax) plt.gca().invert_yaxis() ax = plt.subplot(1, 2, 2, projection='3d') viz.show3Dpose(skeleton_3d_pred[0], ax, pred=True) plt.show() # rotate the axes and update # for angle in range(0, 360, 5): # for ax in axes: # ax.view_init(30, angle) # plt.draw() # plt.pause(.001) # input('Press enter to view next batch.') return
def evaluate(eval_dataset, model, stats, opt, save = False, save_path=None, verbose = True, procrustes = False, per_joint = False, apply_dropout=False ): """ Evaluate a 2D-to-3D lifting model on a given PyTorch dataset. Adapted from ICCV 2017 baseline https://github.com/una-dinosauria/3d-pose-baseline """ num_of_joints = 14 if opt.pred14 else 17 all_dists = [] model.eval() if apply_dropout: def apply_dropout(m): if type(m) == torch.nn.Dropout: m.train() # enable the dropout layers to produce a loss similar to the training # loss (only for debugging purpose) model.apply(apply_dropout) eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size = opt.batch_size, shuffle = False, num_workers = opt.num_threads ) total_loss = 0 for batch_idx, batch in enumerate(eval_loader): data = batch[0] target = batch[1] if opt.cuda: with torch.no_grad(): data, target = data.cuda(), target.cuda() # forward pass to get prediction prediction = model(data) # mean squared loss loss = F.mse_loss(prediction, target, reduction='sum') total_loss += loss.data.item() # unnormalize the data skeleton_3d_gt = data_utils.unNormalizeData(target.data.cpu().numpy(), stats['mean_3d'], stats['std_3d'], stats['dim_ignore_3d'] ) skeleton_3d_pred = data_utils.unNormalizeData(prediction.data.cpu().numpy(), stats['mean_3d'], stats['std_3d'], stats['dim_ignore_3d'] ) # pick the joints that are used dim_use = stats['dim_use_3d'] skeleton_3d_gt_use = skeleton_3d_gt[:, dim_use] skeleton_3d_pred_use = skeleton_3d_pred[:, dim_use] # error after a regid alignment, corresponding to protocol #2 in the paper if procrustes: skeleton_3d_pred_use = align_skeleton(skeleton_3d_pred_use, skeleton_3d_gt_use, num_of_joints ) # Compute Euclidean distance error per joint sqerr = (skeleton_3d_gt_use - skeleton_3d_pred_use)**2 # Squared error between prediction and expected output dists = np.zeros((sqerr.shape[0], num_of_joints)) # Array with L2 error per joint in mm dist_idx = 0 for k in np.arange(0, num_of_joints*3, 3): # Sum across X,Y, and Z dimenstions to obtain L2 distance dists[:,dist_idx] = np.sqrt(np.sum(sqerr[:, k:k+3], axis=1)) dist_idx = dist_idx + 1 all_dists.append(dists) all_dists = np.vstack(all_dists) if per_joint: # show average error for each joint error_per_joint = all_dists.mean(axis = 0) logging.info('Average error for each joint: ') print(error_per_joint) avg_loss = total_loss/(len(eval_dataset)*16*3) if save: record = {'error':all_dists} np.save(save_path, np.array(record)) avg_distance = all_dists.mean() if verbose: logging.info('Evaluation set: average loss: {:.4f} '.format(avg_loss)) logging.info('Evaluation set: average joint distance: {:.4f} '.format(avg_distance)) return avg_loss, avg_distance
def visualize(eval_dataset, model, stats, opt, save=False, save_dir=None): # visualze model prediction batch by batch batch_size = 9 # how many batches to save if save: num_batches = 10 current_batch = 1 model.eval() eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size, shuffle = True, num_workers = opt.num_threads) for batch_idx, batch in enumerate(eval_loader): if save and current_batch > num_batches: break data = batch[0] target = batch[1] if opt.cuda: with torch.no_grad(): # move to GPU data, target = data.cuda(), target.cuda() # forward pass to get prediction prediction = model(data) # un-normalize the data skeleton_2d = data_utils.unNormalizeData(data.data.cpu().numpy(), stats['mean_2d'], stats['std_2d'], stats['dim_ignore_2d']) skeleton_3d_gt = data_utils.unNormalizeData(target.data.cpu().numpy(), stats['mean_3d'], stats['std_3d'], stats['dim_ignore_3d']) skeleton_3d_pred = data_utils.unNormalizeData(prediction.data.cpu().numpy(), stats['mean_3d'], stats['std_3d'], stats['dim_ignore_3d']) # visualizing if save: plt.ioff() f = plt.figure(figsize=(16, 8)) axes = [] for sample_idx in range(batch_size): ax = plt.subplot(3, 9, 3*sample_idx + 1) viz.show2Dpose(skeleton_2d[sample_idx], ax) plt.gca().invert_yaxis() ax = plt.subplot(3, 9, 3*sample_idx + 2, projection='3d') viz.show3Dpose(skeleton_3d_gt[sample_idx], ax) ax = plt.subplot(3, 9, 3*sample_idx + 3, projection='3d') viz.show3Dpose(skeleton_3d_pred[sample_idx], ax, pred=True) viz.show3Dpose(skeleton_3d_gt[sample_idx], ax, gt=True) axes.append(ax) adjust_figure(left = 0.05, right = 0.95, bottom = 0.05, top = 0.95, wspace = 0.3, hspace = 0.3) if not save: plt.pause(0.5) # rotate the axes and update for angle in range(0, 360, 5): for ax in axes: ax.view_init(30, angle) plt.draw() plt.pause(.001) input('Press enter to view next batch.') else: # save plot f.savefig(save_dir +'/'+ str(current_batch) + '.png') plt.close(f) del axes if save: current_batch += 1 return
def visualize_cascade(eval_dataset, cascade, stats, opt, save=False, save_dir=None): num_stages = len(cascade) # visualze model prediction batch by batch batch_size = 5 # how many batches to save if save: num_batches = 10 current_batch = 1 for stage_model in cascade: stage_model.eval() eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size, shuffle = False, num_workers = opt.num_threads) for batch_idx, batch in enumerate(eval_loader): if save and current_batch > num_batches: break data = batch[0] ## debug # enc_in = np.array([[648., 266], [679, 311], [688, 320], [693, 161], # [620, 244], [526, 156], [642, 160], [590, 310], # [505, 350], [380, 375], [491, 285], # [543, 190], [572, 119], [515, 417], [518, 514], # [512, 638]],dtype=np.float32) enc_in = data enc_in = enc_in.reshape(1, 32) # normalize data_mean_2d = stats['mean_2d'] dim_to_use_2d = stats['dim_use_2d'] data_std_2d = stats['std_2d'] enc_in = (enc_in - data_mean_2d[dim_to_use_2d])/data_std_2d[dim_to_use_2d] data = torch.from_numpy(enc_in.astype(np.float32)) ## End experiment 2019/10/16 target = batch[1] # store predictions for each stage prediction_stages = [] if opt.cuda: with torch.no_grad(): # move to GPU data, target = data.cuda(), target.cuda() # forward pass to get prediction for the first stage prediction = cascade[0](data) prediction_stages.append(prediction) # prediction for later stages for stage_idx in range(1, num_stages): prediction = cascade[stage_idx](data) prediction_stages.append(prediction_stages[stage_idx-1] + prediction) # un-normalize the data skeleton_2d = data_utils.unNormalizeData(data.data.cpu().numpy(), stats['mean_2d'], stats['std_2d'], stats['dim_ignore_2d']) skeleton_3d_gt = data_utils.unNormalizeData(target.data.cpu().numpy(), stats['mean_3d'], stats['std_3d'], stats['dim_ignore_3d']) for stage_idx in range(num_stages): prediction_stages[stage_idx] = data_utils.unNormalizeData(prediction_stages[stage_idx].data.cpu().numpy(), stats['mean_3d'], stats['std_3d'], stats['dim_ignore_3d']) ## save intermediate results # import scipy.io as sio # p3d = prediction_stages[0] # sio.savemat('./teaser_pose3d.mat', {'pred_3d':p3d.reshape(32,3), # 'pred_2d':np.array([[648., 266], [679, 311], [688, 320], [693, 161], # [620, 244], [526, 156], [642, 160], [590, 310], # [505, 350], [447, 348], [380, 375], [491, 285], # [543, 190], [572, 119], [515, 417], [518, 514], # [512, 638]])}) ## End Experiment 2019/10/16 # visualizing if save: plt.ioff() f = plt.figure(figsize=(16, 8)) axes = [] for sample_idx in range(batch_size): for stage_idx in range(num_stages): ax = plt.subplot(batch_size, num_stages+1, 1+(num_stages+1)*sample_idx) viz.show2Dpose(skeleton_2d[sample_idx], ax) plt.gca().invert_yaxis() ax = plt.subplot(batch_size, num_stages+1, 2+stage_idx+(num_stages+1)*sample_idx, projection='3d') viz.show3Dpose(prediction_stages[stage_idx][sample_idx], ax, pred=True) viz.show3Dpose(skeleton_3d_gt[sample_idx], ax, gt=True) axes.append(ax) adjust_figure(left = 0.05, right = 0.95, bottom = 0.05, top = 0.95, wspace = 0.3, hspace = 0.3) if not save: plt.pause(0.5) # rotate the axes and update # for angle in range(0, 360, 5): # for ax in axes: # ax.view_init(30, angle) # plt.draw() # plt.pause(.001) input('Press enter to view next batch.') else: # save plot f.savefig(save_dir +'/'+ str(current_batch) + '.png') plt.close(f) del axes if save: current_batch += 1 return
ax1 = plt.subplot(131) ax1.imshow(img) plt.title('Inputs to a cascaded model') ax2 = plt.subplot(132) plt.title('Input to stage 2: {:d}*2'.format(num_joints)) ax2.set_aspect('equal') ax2.invert_yaxis() skeleton_pred = None skeleton_2d = data_dic[image_name]['p2d'] draw_skeleton(ax2, skeleton_2d, gt=True) plt.plot(skeleton_2d[:,0], skeleton_2d[:,1], 'ro', 2) norm_ske_gt = normalize(skeleton_2d, re_order_idx_2d_MPI_H36M).reshape(1,-1) pred = get_pred(cascade, torch.from_numpy(norm_ske_gt.astype(np.float32))) pred = unNormalizeData(pred.data.numpy(), stats['mean_3d'], stats['std_3d'], stats['dim_ignore_3d'] ) ax3 = plt.subplot(133, projection='3d') plot_3d_ax(ax=ax3, pred=pred, elev=10., azim=-90, title='3D prediction' ) adjust_figure(left = 0.05, right = 0.95, bottom = 0.08, top = 0.92, wspace = 0.3, hspace = 0.3