def train_step(self, input_batch): self.model.train() # Get data from the batch images = input_batch['img'] # input image gt_keypoints_2d = input_batch['keypoints'] # 2D keypoints gt_pose = input_batch['pose'] # SMPL pose parameters gt_betas = input_batch['betas'] # SMPL beta parameters gt_joints = input_batch['pose_3d'] # 3D pose has_smpl = input_batch['has_smpl'].to( torch.bool ) # flag that indicates whether SMPL parameters are valid has_pose_3d = input_batch['has_pose_3d'].to( torch.bool) # flag that indicates whether 3D pose is valid is_flipped = input_batch[ 'is_flipped'] # flag that indicates whether image was flipped during data augmentation rot_angle = input_batch[ 'rot_angle'] # rotation angle used for data augmentation dataset_name = input_batch[ 'dataset_name'] # name of the dataset the image comes from indices = input_batch[ 'sample_index'] # index of example inside its dataset batch_size = images.shape[0] # Get GT vertices and model joints # Note that gt_model_joints is different from gt_joints as it comes from SMPL gt_out = self.smpl(betas=gt_betas, body_pose=gt_pose[:, 3:], global_orient=gt_pose[:, :3]) gt_model_joints = gt_out.joints gt_vertices = gt_out.vertices # Get current best fits from the dictionary opt_pose, opt_betas = self.fits_dict[(dataset_name, indices.cpu(), rot_angle.cpu(), is_flipped.cpu())] opt_pose = opt_pose.to(self.device) opt_betas = opt_betas.to(self.device) # Replace extreme betas with zero betas opt_betas[(opt_betas.abs() > 3).any(dim=-1)] = 0. # Replace the optimized parameters with the ground truth parameters, if available opt_pose[has_smpl, :] = gt_pose[has_smpl, :] opt_betas[has_smpl, :] = gt_betas[has_smpl, :] opt_output = self.smpl(betas=opt_betas, body_pose=opt_pose[:, 3:], global_orient=opt_pose[:, :3]) opt_vertices = opt_output.vertices opt_joints = opt_output.joints input_batch['verts'] = opt_vertices # De-normalize 2D keypoints from [-1,1] to pixel space gt_keypoints_2d_orig = gt_keypoints_2d.clone() gt_keypoints_2d_orig[:, :, :-1] = 0.5 * self.options.img_res * ( gt_keypoints_2d_orig[:, :, :-1] + 1) # Estimate camera translation given the model joints and 2D keypoints # by minimizing a weighted least squares loss gt_cam_t = estimate_translation(gt_model_joints, gt_keypoints_2d_orig, focal_length=self.focal_length, img_size=self.options.img_res) opt_cam_t = estimate_translation(opt_joints, gt_keypoints_2d_orig, focal_length=self.focal_length, img_size=self.options.img_res) # get fitted smpl parameters as pseudo ground truth valid_fit = self.fits_dict.get_vaild_state( dataset_name, indices.cpu()).to(torch.bool).to(self.device) try: valid_fit = valid_fit | has_smpl except RuntimeError: valid_fit = (valid_fit.byte() | has_smpl.byte()).to(torch.bool) # Render Dense Correspondences if self.options.regressor == 'pymaf_net' and cfg.MODEL.PyMAF.AUX_SUPV_ON: gt_cam_t_nr = opt_cam_t.detach().clone() gt_camera = torch.zeros(gt_cam_t_nr.shape).to(gt_cam_t_nr.device) gt_camera[:, 1:] = gt_cam_t_nr[:, :2] gt_camera[:, 0] = (2. * self.focal_length / self.options.img_res) / gt_cam_t_nr[:, 2] iuv_image_gt = torch.zeros( (batch_size, 3, cfg.MODEL.PyMAF.DP_HEATMAP_SIZE, cfg.MODEL.PyMAF.DP_HEATMAP_SIZE)).to(self.device) if torch.sum(valid_fit.float()) > 0: iuv_image_gt[valid_fit] = self.iuv_maker.verts2iuvimg( opt_vertices[valid_fit], cam=gt_camera[valid_fit]) # [B, 3, 56, 56] input_batch['iuv_image_gt'] = iuv_image_gt uvia_list = iuv_img2map(iuv_image_gt) # Feed images in the network to predict camera and SMPL parameters if self.options.regressor == 'hmr': pred_rotmat, pred_betas, pred_camera = self.model(images) # torch.Size([32, 24, 3, 3]) torch.Size([32, 10]) torch.Size([32, 3]) elif self.options.regressor == 'pymaf_net': preds_dict, _ = self.model(images) output = preds_dict loss_dict = {} if self.options.regressor == 'pymaf_net' and cfg.MODEL.PyMAF.AUX_SUPV_ON: dp_out = preds_dict['dp_out'] for i in range(len(dp_out)): r_i = i - len(dp_out) u_pred, v_pred, index_pred, ann_pred = dp_out[r_i][ 'predict_u'], dp_out[r_i]['predict_v'], dp_out[r_i][ 'predict_uv_index'], dp_out[r_i]['predict_ann_index'] if index_pred.shape[-1] == iuv_image_gt.shape[-1]: uvia_list_i = uvia_list else: iuv_image_gt_i = F.interpolate(iuv_image_gt, u_pred.shape[-1], mode='nearest') uvia_list_i = iuv_img2map(iuv_image_gt_i) loss_U, loss_V, loss_IndexUV, loss_segAnn = self.body_uv_losses( u_pred, v_pred, index_pred, ann_pred, uvia_list_i, valid_fit) loss_dict[f'loss_U{r_i}'] = loss_U loss_dict[f'loss_V{r_i}'] = loss_V loss_dict[f'loss_IndexUV{r_i}'] = loss_IndexUV loss_dict[f'loss_segAnn{r_i}'] = loss_segAnn len_loop = len(preds_dict['smpl_out'] ) if self.options.regressor == 'pymaf_net' else 1 for l_i in range(len_loop): if self.options.regressor == 'pymaf_net': if l_i == 0: # initial parameters (mean poses) continue pred_rotmat = preds_dict['smpl_out'][l_i]['rotmat'] pred_betas = preds_dict['smpl_out'][l_i]['theta'][:, 3:13] pred_camera = preds_dict['smpl_out'][l_i]['theta'][:, :3] pred_output = self.smpl(betas=pred_betas, body_pose=pred_rotmat[:, 1:], global_orient=pred_rotmat[:, 0].unsqueeze(1), pose2rot=False) pred_vertices = pred_output.vertices pred_joints = pred_output.joints # Convert Weak Perspective Camera [s, tx, ty] to camera translation [tx, ty, tz] in 3D given the bounding box size # This camera translation can be used in a full perspective projection pred_cam_t = torch.stack([ pred_camera[:, 1], pred_camera[:, 2], 2 * self.focal_length / (self.options.img_res * pred_camera[:, 0] + 1e-9) ], dim=-1) camera_center = torch.zeros(batch_size, 2, device=self.device) pred_keypoints_2d = perspective_projection( pred_joints, rotation=torch.eye(3, device=self.device).unsqueeze(0).expand( batch_size, -1, -1), translation=pred_cam_t, focal_length=self.focal_length, camera_center=camera_center) # Normalize keypoints to [-1,1] pred_keypoints_2d = pred_keypoints_2d / (self.options.img_res / 2.) # Compute loss on SMPL parameters loss_regr_pose, loss_regr_betas = self.smpl_losses( pred_rotmat, pred_betas, opt_pose, opt_betas, valid_fit) loss_regr_pose *= cfg.LOSS.POSE_W loss_regr_betas *= cfg.LOSS.SHAPE_W loss_dict['loss_regr_pose_{}'.format(l_i)] = loss_regr_pose loss_dict['loss_regr_betas_{}'.format(l_i)] = loss_regr_betas # Compute 2D reprojection loss for the keypoints if cfg.LOSS.KP_2D_W > 0: loss_keypoints = self.keypoint_loss( pred_keypoints_2d, gt_keypoints_2d, self.options.openpose_train_weight, self.options.gt_train_weight) * cfg.LOSS.KP_2D_W loss_dict['loss_keypoints_{}'.format(l_i)] = loss_keypoints # Compute 3D keypoint loss loss_keypoints_3d = self.keypoint_3d_loss( pred_joints, gt_joints, has_pose_3d) * cfg.LOSS.KP_3D_W loss_dict['loss_keypoints_3d_{}'.format(l_i)] = loss_keypoints_3d # Per-vertex loss for the shape if cfg.LOSS.VERT_W > 0: loss_shape = self.shape_loss(pred_vertices, opt_vertices, valid_fit) * cfg.LOSS.VERT_W loss_dict['loss_shape_{}'.format(l_i)] = loss_shape # Camera # force the network to predict positive depth values loss_cam = ((torch.exp(-pred_camera[:, 0] * 10))**2).mean() loss_dict['loss_cam_{}'.format(l_i)] = loss_cam for key in loss_dict: if len(loss_dict[key].shape) > 0: loss_dict[key] = loss_dict[key][0] # Compute total loss loss = torch.stack(list(loss_dict.values())).sum() # Do backprop self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Pack output arguments for tensorboard logging output.update({ 'pred_vertices': pred_vertices.detach(), 'opt_vertices': opt_vertices, 'pred_cam_t': pred_cam_t.detach(), 'opt_cam_t': opt_cam_t }) loss_dict['loss'] = loss.detach().item() if self.step_count % 100 == 0: if self.options.multiprocessing_distributed: for loss_name, val in loss_dict.items(): val = val / self.options.world_size if not torch.is_tensor(val): val = torch.Tensor([val]).to(self.device) dist.all_reduce(val) loss_dict[loss_name] = val if self.options.rank == 0: for loss_name, val in loss_dict.items(): self.summary_writer.add_scalar( 'losses/{}'.format(loss_name), val, self.step_count) return {'preds': output, 'losses': loss_dict}
def train_step(self, input_batch): self.model.train() # get data from batch has_smpl = input_batch['has_smpl'].bool() has_pose_3d = input_batch['has_pose_3d'].bool() gt_pose1 = input_batch['pose'] # SMPL pose parameters gt_betas1 = input_batch['betas'] # SMPL beta parameters dataset_name = input_batch['dataset_name'] indices = input_batch[ 'sample_index'] # index of example inside its dataset is_flipped = input_batch[ 'is_flipped'] # flag that indicates whether image was flipped during data augmentation rot_angle = input_batch[ 'rot_angle'] # rotation angle used for data augmentation #print(rot_angle) # Get GT vertices and model joints # Note that gt_model_joints is different from gt_joints as it comes from SMPL gt_betas = torch.cat((gt_betas1, gt_betas1, gt_betas1, gt_betas1), 0) gt_pose = torch.cat((gt_pose1, gt_pose1, gt_pose1, gt_pose1), 0) gt_out = self.smpl(betas=gt_betas, body_pose=gt_pose[:, 3:], global_orient=gt_pose[:, :3]) gt_model_joints = gt_out.joints gt_vertices = gt_out.vertices # Get current best fits from the dictionary opt_pose1, opt_betas1 = self.fits_dict[(dataset_name, indices.cpu(), rot_angle.cpu(), is_flipped.cpu())] opt_pose = torch.cat( (opt_pose1.to(self.device), opt_pose1.to(self.device), opt_pose1.to(self.device), opt_pose1.to(self.device)), 0) #print(opt_pose.device) #opt_betas = opt_betas.to(self.device) opt_betas = torch.cat( (opt_betas1.to(self.device), opt_betas1.to(self.device), opt_betas1.to(self.device), opt_betas1.to(self.device)), 0) opt_output = self.smpl(betas=opt_betas, body_pose=opt_pose[:, 3:], global_orient=opt_pose[:, :3]) opt_vertices = opt_output.vertices opt_joints = opt_output.joints # images images = torch.cat((input_batch['img_0'], input_batch['img_1'], input_batch['img_2'], input_batch['img_3']), 0) batch_size = input_batch['img_0'].shape[0] #input() # Output of CNN pred_rotmat, pred_betas, pred_camera = self.model(images) pred_output = self.smpl(betas=pred_betas, body_pose=pred_rotmat[:, 1:], global_orient=pred_rotmat[:, 0].unsqueeze(1), pose2rot=False) pred_vertices = pred_output.vertices pred_joints = pred_output.joints pred_cam_t = torch.stack([ pred_camera[:, 1], pred_camera[:, 2], 2 * self.focal_length / (self.options.img_res * pred_camera[:, 0] + 1e-9) ], dim=-1) camera_center = torch.zeros(batch_size * 4, 2, device=self.device) pred_keypoints_2d = perspective_projection( pred_joints, rotation=torch.eye(3, device=self.device).unsqueeze(0).expand( batch_size * 4, -1, -1), translation=pred_cam_t, focal_length=self.focal_length, camera_center=camera_center) pred_keypoints_2d = pred_keypoints_2d / (self.options.img_res / 2.) # 2d joint points gt_keypoints_2d = torch.cat( (input_batch['keypoints_0'], input_batch['keypoints_1'], input_batch['keypoints_2'], input_batch['keypoints_3']), 0) gt_keypoints_2d_orig = gt_keypoints_2d.clone() gt_keypoints_2d_orig[:, :, :-1] = 0.5 * self.options.img_res * ( gt_keypoints_2d_orig[:, :, :-1] + 1) gt_cam_t = estimate_translation(gt_model_joints, gt_keypoints_2d_orig, focal_length=self.focal_length, img_size=self.options.img_res) opt_cam_t = estimate_translation(opt_joints, gt_keypoints_2d_orig, focal_length=self.focal_length, img_size=self.options.img_res) #input() opt_joint_loss = self.smplify.get_fitting_loss( opt_pose, opt_betas, opt_cam_t, 0.5 * self.options.img_res * torch.ones(batch_size * 4, 2, device=self.device), gt_keypoints_2d_orig).mean(dim=-1) if self.options.run_smplify: pred_rotmat_hom = torch.cat([ pred_rotmat.detach().view(-1, 3, 3).detach(), torch.tensor( [0, 0, 1], dtype=torch.float32, device=self.device).view( 1, 3, 1).expand(batch_size * 4 * 24, -1, -1) ], dim=-1) pred_pose = rotation_matrix_to_angle_axis( pred_rotmat_hom).contiguous().view(batch_size * 4, -1) pred_pose[torch.isnan(pred_pose)] = 0.0 #pred_pose_detach = pred_pose.detach() #pred_betas_detach = pred_betas.detach() #pred_cam_t_detach = pred_cam_t.detach() new_opt_vertices, new_opt_joints,\ new_opt_pose, new_opt_betas,\ new_opt_cam_t, new_opt_joint_loss = self.smplify( pred_pose.detach(), pred_betas.detach(), pred_cam_t.detach(), 0.5 * self.options.img_res * torch.ones(batch_size*4, 2, device=self.device), gt_keypoints_2d_orig) new_opt_joint_loss = new_opt_joint_loss.mean(dim=-1) # Will update the dictionary for the examples where the new loss is less than the current one update = (new_opt_joint_loss < opt_joint_loss) update1 = torch.cat((update, update, update, update), 0) opt_joint_loss[update] = new_opt_joint_loss[update] #print(opt_joints.size(),new_opt_joints.size()) #input() opt_joints[update1, :] = new_opt_joints[update1, :] #print(opt_pose.size(),new_opt_pose.size()) opt_betas[update1, :] = new_opt_betas[update1, :] opt_pose[update1, :] = new_opt_pose[update1, :] #print(i, opt_pose_mv[i]) opt_vertices[update1, :] = new_opt_vertices[update1, :] opt_cam_t[update1, :] = new_opt_cam_t[update1, :] # now we comput the loss on the four images # Replace the optimized parameters with the ground truth parameters, if available #for i in range(4): #print('Here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1') has_smpl1 = torch.cat((has_smpl, has_smpl, has_smpl, has_smpl), 0) opt_vertices[has_smpl1, :, :] = gt_vertices[has_smpl1, :, :] opt_pose[has_smpl1, :] = gt_pose[has_smpl1, :] opt_cam_t[has_smpl1, :] = gt_cam_t[has_smpl1, :] opt_joints[has_smpl1, :, :] = gt_model_joints[has_smpl1, :, :] opt_betas[has_smpl1, :] = gt_betas[has_smpl1, :] #print(opt_cam_t[0:batch_size],opt_cam_t[batch_size:2*batch_size],opt_cam_t[2*batch_size:3*batch_size],opt_cam_t[3*batch_size:4*batch_size]) # Assert whether a fit is valid by comparing the joint loss with the threshold valid_fit1 = (opt_joint_loss < self.options.smplify_threshold).to( self.device) # Add the examples with GT parameters to the list of valid fits valid_fit = torch.cat( (valid_fit1, valid_fit1, valid_fit1, valid_fit1), 0) | has_smpl1 #gt_keypoints_2d = torch.cat((input_batch['keypoints_0'],input_batch['keypoints_1'],input_batch['keypoints_2'],input_batch['keypoints_3']),0) loss_keypoints = self.keypoint_loss(pred_keypoints_2d, gt_keypoints_2d, 0, 1) #gt_joints = torch.cat((input_batch['pose_3d_0'],input_batch['pose_3d_1'],input_batch['pose_3d_2'],input_batch['pose_3d_3']),0) #loss_keypoints_3d = self.keypoint_3d_loss(pred_joints, gt_joints, torch.cat((has_pose_3d,has_pose_3d,has_pose_3d,has_pose_3d),0)) loss_regr_pose, loss_regr_betas = self.smpl_losses( pred_rotmat, pred_betas, opt_pose, opt_betas, valid_fit) loss_shape = self.shape_loss(pred_vertices, opt_vertices, valid_fit) #print(loss_shape_sum,loss_keypoints_sum,loss_keypoints_3d_sum,loss_regr_pose_sum,loss_regr_betas_sum) #input() loss_all = 0 * loss_shape +\ 5. * loss_keypoints +\ 0. * loss_keypoints_3d +\ loss_regr_pose + 0.001* loss_regr_betas +\ ((torch.exp(-pred_camera[:,0]*10)) ** 2 ).mean() loss_all *= 60 #print(loss_all) # Do backprop self.optimizer.zero_grad() loss_all.backward() self.optimizer.step() output = { 'pred_vertices': pred_vertices, 'opt_vertices': opt_vertices, 'pred_cam_t': pred_cam_t, 'opt_cam_t': opt_cam_t } losses = { 'loss': loss_all.detach().item(), 'loss_keypoints': loss_keypoints.detach().item(), 'loss_keypoints_3d': loss_keypoints_3d.detach().item(), 'loss_regr_pose': loss_regr_pose.detach().item(), 'loss_regr_betas': loss_regr_betas.detach().item(), 'loss_shape': loss_shape.detach().item() } return output, losses
def train_step(self, input_batch): self.model.train() images_hr = input_batch['img_hr'] images_lr_list = input_batch['img_lr'] images_list = [images_hr] + images_lr_list scale_names = ['224', '224_128', '128_64', '64_40', '40_24'] scale_names = scale_names[:len(images_list)] feat_names = ['layer4'] # Get data from the batch gt_keypoints_2d = input_batch['keypoints'] # 2D keypoints gt_pose = input_batch['pose'] # SMPL pose parameters gt_betas = input_batch['betas'] # SMPL beta parameters gt_joints = input_batch['pose_3d'] # 3D pose has_smpl = input_batch['has_smpl'].byte( ) # flag that indicates whether SMPL parameters are valid has_pose_3d = input_batch['has_pose_3d'].byte( ) # flag that indicates whether 3D pose is valid dataset_name = input_batch[ 'dataset_name'] # name of the dataset the image comes from indices = input_batch['sample_index'].numpy( ) # index of example inside mixed dataset batch_size = images_hr.shape[0] # Get GT vertices and model joints # Note that gt_model_joints is different from gt_joints as it comes from SMPL gt_out = self.smpl(betas=gt_betas, body_pose=gt_pose[:, 3:], global_orient=gt_pose[:, :3]) gt_model_joints = gt_out.joints gt_vertices = gt_out.vertices # De-normalize 2D keypoints from [-1,1] to pixel space gt_keypoints_2d_orig = gt_keypoints_2d.clone() gt_keypoints_2d_orig[:, :, :-1] = 0.5 * self.options.img_res * ( gt_keypoints_2d_orig[:, :, :-1] + 1) # Estimate camera translation given the model joints and 2D keypoints # by minimizing a weighted least squares loss gt_cam_t = estimate_translation(gt_model_joints, gt_keypoints_2d_orig, focal_length=self.focal_length, img_size=self.options.img_res) loss_shape = 0 loss_keypoints = 0 loss_keypoints_3d = 0 loss_regr_pose = 0 loss_regr_betas = 0 loss_regr_cam_t = 0 smpl_outputs = [] for i, (images, scale_name) in enumerate( zip(images_list, scale_names[:len(images_list)])): images = images.to(self.device) # Feed images in the network to predict camera and SMPL parameters pred_rotmat, pred_betas, pred_camera, feat_list = self.model( images, scale=i) pred_output = self.smpl(betas=pred_betas, body_pose=pred_rotmat[:, 1:], global_orient=pred_rotmat[:, 0].unsqueeze(1), pose2rot=False) pred_vertices = pred_output.vertices pred_joints = pred_output.joints # Convert Weak Perspective Camera [s, tx, ty] to camera translation [tx, ty, tz] in 3D given the bounding box size # This camera translation can be used in a full perspective projection pred_cam_t = torch.stack([ pred_camera[:, 1], pred_camera[:, 2], 2 * self.focal_length / (self.options.img_res * pred_camera[:, 0] + 1e-9) ], dim=-1) camera_center = torch.zeros(batch_size, 2, device=self.device) pred_keypoints_2d = perspective_projection( pred_joints, rotation=torch.eye(3, device=self.device).unsqueeze(0).expand( batch_size, -1, -1), translation=pred_cam_t, focal_length=self.focal_length, camera_center=camera_center) # Normalize keypoints to [-1,1] pred_keypoints_2d = pred_keypoints_2d / (self.options.img_res / 2.) # Compute loss on SMPL parameters loss_pose, loss_betas, loss_cam_t = self.smpl_losses( pred_rotmat, pred_betas, pred_cam_t, gt_pose, gt_betas, gt_cam_t, has_smpl) loss_regr_pose = loss_regr_pose + (i + 1) * loss_pose loss_regr_betas = loss_regr_betas + (i + 1) * loss_betas loss_regr_cam_t = loss_regr_cam_t + (i + 1) * loss_cam_t # Compute 2D reprojection loss for the keypoints loss_keypoints = loss_keypoints + (i + 1) * self.keypoint_loss( pred_keypoints_2d, gt_keypoints_2d, self.options. openpose_train_weight, self.options.gt_train_weight) # Compute 3D keypoint loss loss_keypoints_3d = loss_keypoints_3d + ( i + 1) * self.keypoint_3d_loss(pred_joints, gt_joints, has_pose_3d) # Per-vertex loss for the shape loss_shape = loss_shape + (i + 1) * self.shape_loss( pred_vertices, gt_vertices, has_smpl) # save pred_rotmat, pred_betas, pred_cam_t for later, from large images to smaller images smpl_outputs.append( [pred_rotmat, pred_betas, pred_cam_t, feat_list]) # update queue size self.feat_queue.update_queue_size(batch_size) # update the queue self.feat_queue.update_all([feat.detach() for feat in feat_list], [name for name in feat_names]) # update dataset name and index for each scale self.feat_queue.update('dataset_names', np.array(dataset_name)) self.feat_queue.update('dataset_indices', indices) # Compute total loss except the consistency loss loss = self.options.shape_loss_weight * loss_shape +\ self.options.keypoint_loss_weight * loss_keypoints + \ self.options.keypoint_loss_weight * loss_keypoints_3d +\ self.options.pose_loss_weight * loss_regr_pose + \ self.options.beta_loss_weight * loss_regr_betas + \ self.options.cam_loss_weight * loss_regr_cam_t loss = loss / len(images_list) # compute the consistency loss loss_consistency = 0 for i in range(len(smpl_outputs)): gt_rotmat, gt_betas, gt_cam_t, gt_feat_list = smpl_outputs[i] gt_rotmat = gt_rotmat.detach() gt_betas = gt_betas.detach() gt_cam_t = gt_cam_t.detach() gt_feat_list = [feat.detach() for feat in gt_feat_list] # sample negative index indices_list = self.feat_queue.select_indices( dataset_name, indices, self.options.sample_size) neg_feat_list = self.feat_queue.batch_sample_all(indices_list, names=feat_names) for j in range(i + 1, len(smpl_outputs)): # compute the consistency loss from high to low: 1:2, 1:3, 2:3 and weighted by 1/(j-i) pred_rotmat, pred_betas, pred_cam_t, pred_feat_list = smpl_outputs[ j] loss_consistency_total, loss_consistency_smpl, loss_consistency_feat = self.consistency_losses( pred_rotmat, pred_betas, pred_cam_t, pred_feat_list, gt_rotmat, gt_betas, gt_cam_t, gt_feat_list, neg_feat_list) loss_consistency = loss_consistency + ( (j - i) / len(smpl_outputs)) * loss_consistency_total loss_consistency = loss_consistency * self.consistency_loss_ramp * self.options.consistency_loss_weight loss += loss_consistency loss *= 60 # Do backprop self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Pack output arguments output = { 'pred_vertices': pred_vertices.detach(), 'pred_cam_t': pred_cam_t.detach() } losses = { 'lr': self.optimizer.param_groups[0]['lr'], 'loss_ramp': self.consistency_loss_ramp, 'loss': loss.detach().item(), 'loss_consistency': loss_consistency.detach().item(), 'loss_consistency_smpl': loss_consistency_smpl.detach().item(), 'loss_consistency_feat': loss_consistency_feat.detach().item(), 'loss_keypoints': loss_keypoints.detach().item(), 'loss_keypoints_3d': loss_keypoints_3d.detach().item(), 'loss_regr_pose': loss_regr_pose.detach().item(), 'loss_regr_betas': loss_regr_betas.detach().item(), 'loss_shape': loss_shape.detach().item() } return output, losses
def train_step(self, input_batch): self.model.train() # Get data from the batch images = input_batch['img'] # input image gt_keypoints_2d = input_batch['keypoints'] # 2D keypoints gt_pose = input_batch['pose'] # SMPL pose parameters gt_betas = input_batch['betas'] # SMPL beta parameters gt_joints = input_batch['pose_3d'] # 3D pose has_smpl = input_batch['has_smpl'].byte( ) # flag that indicates whether SMPL parameters are valid has_pose_3d = input_batch['has_pose_3d'].byte( ) # flag that indicates whether 3D pose is valid is_flipped = input_batch[ 'is_flipped'] # flag that indicates whether image was flipped during data augmentation rot_angle = input_batch[ 'rot_angle'] # rotation angle used for data augmentation dataset_name = input_batch[ 'dataset_name'] # name of the dataset the image comes from indices = input_batch[ 'sample_index'] # index of example inside its dataset batch_size = images.shape[0] # Get GT vertices and model joints # Note that gt_model_joints is different from gt_joints as it comes from SMPL gt_out = self.smpl(betas=gt_betas, body_pose=gt_pose[:, 3:], global_orient=gt_pose[:, :3]) gt_model_joints = gt_out.joints gt_vertices = gt_out.vertices # Get current best fits from the dictionary opt_pose, opt_betas = self.fits_dict[(dataset_name, indices.cpu(), rot_angle.cpu(), is_flipped.cpu())] opt_pose = opt_pose.to(self.device) opt_betas = opt_betas.to(self.device) opt_output = self.smpl(betas=opt_betas, body_pose=opt_pose[:, 3:], global_orient=opt_pose[:, :3]) opt_vertices = opt_output.vertices if opt_vertices.shape != (self.options.batch_size, 6890, 3): opt_vertices = torch.zeros_like(opt_vertices, device=self.device) opt_joints = opt_output.joints # De-normalize 2D keypoints from [-1,1] to pixel space gt_keypoints_2d_orig = gt_keypoints_2d.clone() gt_keypoints_2d_orig[:, :, :-1] = 0.5 * self.options.img_res * ( gt_keypoints_2d_orig[:, :, :-1] + 1) # Estimate camera translation given the model joints and 2D keypoints # by minimizing a weighted least squares loss gt_cam_t = estimate_translation(gt_model_joints, gt_keypoints_2d_orig, focal_length=self.focal_length, img_size=self.options.img_res) opt_cam_t = estimate_translation(opt_joints, gt_keypoints_2d_orig, focal_length=self.focal_length, img_size=self.options.img_res) opt_joint_loss = self.smplify.get_fitting_loss( opt_pose, opt_betas, opt_cam_t, 0.5 * self.options.img_res * torch.ones(batch_size, 2, device=self.device), gt_keypoints_2d_orig).mean(dim=-1) # Feed images in the network to predict camera and SMPL parameters pred_rotmat, pred_betas, pred_camera = self.model(images) pred_output = self.smpl(betas=pred_betas, body_pose=pred_rotmat[:, 1:], global_orient=pred_rotmat[:, 0].unsqueeze(1), pose2rot=False) pred_vertices = pred_output.vertices if pred_vertices.shape != (self.options.batch_size, 6890, 3): pred_vertices = torch.zeros_like(pred_vertices, device=self.device) pred_joints = pred_output.joints # Convert Weak Perspective Camera [s, tx, ty] to camera translation [tx, ty, tz] in 3D given the bounding box size # This camera translation can be used in a full perspective projection pred_cam_t = torch.stack([ pred_camera[:, 1], pred_camera[:, 2], 2 * self.focal_length / (self.options.img_res * pred_camera[:, 0] + 1e-9) ], dim=-1) camera_center = torch.zeros(batch_size, 2, device=self.device) pred_keypoints_2d = perspective_projection( pred_joints, rotation=torch.eye(3, device=self.device).unsqueeze(0).expand( batch_size, -1, -1), translation=pred_cam_t, focal_length=self.focal_length, camera_center=camera_center) # Normalize keypoints to [-1,1] pred_keypoints_2d = pred_keypoints_2d / (self.options.img_res / 2.) if self.options.run_smplify: # Convert predicted rotation matrices to axis-angle pred_rotmat_hom = torch.cat([ pred_rotmat.detach().view(-1, 3, 3).detach(), torch.tensor( [0, 0, 1], dtype=torch.float32, device=self.device).view( 1, 3, 1).expand(batch_size * 24, -1, -1) ], dim=-1) pred_pose = rotation_matrix_to_angle_axis( pred_rotmat_hom).contiguous().view(batch_size, -1) # tgm.rotation_matrix_to_angle_axis returns NaN for 0 rotation, so manually hack it pred_pose[torch.isnan(pred_pose)] = 0.0 # Run SMPLify optimization starting from the network prediction new_opt_vertices, new_opt_joints,\ new_opt_pose, new_opt_betas,\ new_opt_cam_t, new_opt_joint_loss = self.smplify( pred_pose.detach(), pred_betas.detach(), pred_cam_t.detach(), 0.5 * self.options.img_res * torch.ones(batch_size, 2, device=self.device), gt_keypoints_2d_orig) new_opt_joint_loss = new_opt_joint_loss.mean(dim=-1) # Will update the dictionary for the examples where the new loss is less than the current one update = (new_opt_joint_loss < opt_joint_loss) opt_joint_loss[update] = new_opt_joint_loss[update] opt_vertices[update, :] = new_opt_vertices[update, :] opt_joints[update, :] = new_opt_joints[update, :] opt_pose[update, :] = new_opt_pose[update, :] opt_betas[update, :] = new_opt_betas[update, :] opt_cam_t[update, :] = new_opt_cam_t[update, :] self.fits_dict[(dataset_name, indices.cpu(), rot_angle.cpu(), is_flipped.cpu(), update.cpu())] = (opt_pose.cpu(), opt_betas.cpu()) else: update = torch.zeros(batch_size, device=self.device).byte() # Replace extreme betas with zero betas opt_betas[(opt_betas.abs() > 3).any(dim=-1)] = 0. # Replace the optimized parameters with the ground truth parameters, if available opt_vertices[has_smpl, :, :] = gt_vertices[has_smpl, :, :] opt_cam_t[has_smpl, :] = gt_cam_t[has_smpl, :] opt_joints[has_smpl, :, :] = gt_model_joints[has_smpl, :, :] opt_pose[has_smpl, :] = gt_pose[has_smpl, :] opt_betas[has_smpl, :] = gt_betas[has_smpl, :] # Assert whether a fit is valid by comparing the joint loss with the threshold valid_fit = (opt_joint_loss < self.options.smplify_threshold).to( self.device) # Add the examples with GT parameters to the list of valid fits # print(valid_fit.dtype) valid_fit = valid_fit.to(torch.uint8) valid_fit = valid_fit | has_smpl opt_keypoints_2d = perspective_projection( opt_joints, rotation=torch.eye(3, device=self.device).unsqueeze(0).expand( batch_size, -1, -1), translation=opt_cam_t, focal_length=self.focal_length, camera_center=camera_center) opt_keypoints_2d = opt_keypoints_2d / (self.options.img_res / 2.) # Compute loss on SMPL parameters loss_regr_pose, loss_regr_betas = self.smpl_losses( pred_rotmat, pred_betas, opt_pose, opt_betas, valid_fit) # Compute 2D reprojection loss for the keypoints loss_keypoints = self.keypoint_loss(pred_keypoints_2d, gt_keypoints_2d, self.options.openpose_train_weight, self.options.gt_train_weight) # Compute 3D keypoint loss loss_keypoints_3d = self.keypoint_3d_loss(pred_joints, gt_joints, has_pose_3d) # Per-vertex loss for the shape loss_shape = self.shape_loss(pred_vertices, opt_vertices, valid_fit) # Compute total loss # The last component is a loss that forces the network to predict positive depth values loss = self.options.shape_loss_weight * loss_shape +\ self.options.keypoint_loss_weight * loss_keypoints +\ self.options.keypoint_loss_weight * loss_keypoints_3d +\ loss_regr_pose + self.options.beta_loss_weight * loss_regr_betas +\ ((torch.exp(-pred_camera[:,0]*10)) ** 2 ).mean() loss *= 60 # Do backprop self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Pack output arguments for tensorboard logging output = { 'pred_vertices': pred_vertices.detach(), 'opt_vertices': opt_vertices, 'pred_cam_t': pred_cam_t.detach(), 'opt_cam_t': opt_cam_t } losses = { 'loss': loss.detach().item(), 'loss_keypoints': loss_keypoints.detach().item(), 'loss_keypoints_3d': loss_keypoints_3d.detach().item(), 'loss_regr_pose': loss_regr_pose.detach().item(), 'loss_regr_betas': loss_regr_betas.detach().item(), 'loss_shape': loss_shape.detach().item() } return output, losses
def train_step(self, input_batch): # Learning rate decay if self.decay_steps_ind < len(cfg.SOLVER.STEPS) and input_batch[ 'step_count'] == cfg.SOLVER.STEPS[self.decay_steps_ind]: lr = self.optimizer.param_groups[0]['lr'] lr_new = lr * cfg.SOLVER.GAMMA print('Decay the learning on step {} from {} to {}'.format( input_batch['step_count'], lr, lr_new)) for param_group in self.optimizer.param_groups: param_group['lr'] = lr_new lr = self.optimizer.param_groups[0]['lr'] assert lr == lr_new self.decay_steps_ind += 1 self.model.train() # Get data from the batch images = input_batch['img'] # input image gt_keypoints_2d = input_batch['keypoints'] # 2D keypoints gt_pose = input_batch['pose'] # SMPL pose parameters gt_betas = input_batch['betas'] # SMPL beta parameters gt_joints = input_batch['pose_3d'] # 3D pose has_smpl = input_batch['has_smpl'].byte( ) # flag that indicates whether SMPL parameters are valid has_pose_3d = input_batch['has_pose_3d'].byte( ) # flag that indicates whether 3D pose is valid is_flipped = input_batch[ 'is_flipped'] # flag that indicates whether image was flipped during data augmentation rot_angle = input_batch[ 'rot_angle'] # rotation angle used for data augmentation dataset_name = input_batch[ 'dataset_name'] # name of the dataset the image comes from indices = input_batch[ 'sample_index'] # index of example inside its dataset batch_size = images.shape[0] # Get GT vertices and model joints # Note that gt_model_joints is different from gt_joints as it comes from SMPL gt_out = self.smpl(betas=gt_betas, body_pose=gt_pose[:, 3:], global_orient=gt_pose[:, :3]) gt_model_joints = gt_out.joints gt_vertices = gt_out.vertices # Get current pseudo labels (final fits of SPIN) from the dictionary opt_pose, opt_betas = self.fits_dict[(dataset_name, indices.cpu(), rot_angle.cpu(), is_flipped.cpu())] opt_pose = opt_pose.to(self.device) opt_betas = opt_betas.to(self.device) # Replace extreme betas with zero betas opt_betas[(opt_betas.abs() > 3).any(dim=-1)] = 0. # Replace the optimized parameters with the ground truth parameters, if available opt_pose[has_smpl, :] = gt_pose[has_smpl, :] opt_betas[has_smpl, :] = gt_betas[has_smpl, :] opt_output = self.smpl(betas=opt_betas, body_pose=opt_pose[:, 3:], global_orient=opt_pose[:, :3]) opt_vertices = opt_output.vertices opt_joints = opt_output.joints # De-normalize 2D keypoints from [-1,1] to pixel space gt_keypoints_2d_orig = gt_keypoints_2d.clone() gt_keypoints_2d_orig[:, :, :-1] = 0.5 * self.options.img_res * ( gt_keypoints_2d_orig[:, :, :-1] + 1) # Estimate camera translation given the model joints and 2D keypoints # by minimizing a weighted least squares loss gt_cam_t = estimate_translation(gt_model_joints, gt_keypoints_2d_orig, focal_length=self.focal_length, img_size=self.options.img_res) opt_cam_t = estimate_translation(opt_joints, gt_keypoints_2d_orig, focal_length=self.focal_length, img_size=self.options.img_res) if self.options.train_data in ['h36m_coco_itw']: valid_fit = self.fits_dict.get_vaild_state(dataset_name, indices.cpu()).to( self.device) valid_fit = valid_fit | has_smpl else: valid_fit = has_smpl # Feed images in the network to predict camera and SMPL parameters input_batch['opt_pose'] = opt_pose input_batch['opt_betas'] = opt_betas input_batch['valid_fit'] = valid_fit input_batch['dp_dict'] = { k: v.to(self.device) if isinstance(v, torch.Tensor) else v for k, v in input_batch['dp_dict'].items() } has_iuv = torch.tensor([dn not in ['dp_coco'] for dn in dataset_name], dtype=torch.uint8).to(self.device) has_iuv = has_iuv & valid_fit input_batch['has_iuv'] = has_iuv has_dp = input_batch['has_dp'] target_smpl_kps = torch.zeros( (batch_size, 24, 3)).to(opt_output.smpl_joints.device) target_smpl_kps[:, :, :2] = perspective_projection( opt_output.smpl_joints.detach().clone(), rotation=torch.eye(3, device=self.device).unsqueeze(0).expand( batch_size, -1, -1), translation=opt_cam_t, focal_length=self.focal_length, camera_center=torch.zeros(batch_size, 2, device=self.device) + (0.5 * self.options.img_res)) target_smpl_kps[:, :, :2] = target_smpl_kps[:, :, :2] / ( 0.5 * self.options.img_res) - 1 target_smpl_kps[has_iuv == 1, :, 2] = 1 target_smpl_kps[has_dp == 1] = input_batch['smpl_2dkps'][has_dp == 1] input_batch['target_smpl_kps'] = target_smpl_kps # [B, 24, 3] input_batch['target_verts'] = opt_vertices.detach().clone( ) # [B, 6890, 3] # camera translation for neural renderer gt_cam_t_nr = opt_cam_t.detach().clone() gt_camera = torch.zeros(gt_cam_t_nr.shape).to(gt_cam_t_nr.device) gt_camera[:, 1:] = gt_cam_t_nr[:, :2] gt_camera[:, 0] = (2. * self.focal_length / self.options.img_res) / gt_cam_t_nr[:, 2] input_batch['target_cam'] = gt_camera # Do forward danet_return_dict = self.model(input_batch) loss_tatal = 0 losses_dict = {} for loss_key in danet_return_dict['losses']: loss_tatal += danet_return_dict['losses'][loss_key] losses_dict['loss_{}'.format(loss_key)] = danet_return_dict[ 'losses'][loss_key].detach().item() # Do backprop self.optimizer.zero_grad() loss_tatal.backward() self.optimizer.step() if input_batch['pretrain_mode']: pred_vertices = None pred_cam_t = None else: pred_vertices = danet_return_dict['prediction']['vertices'].detach( ) pred_cam_t = danet_return_dict['prediction']['cam_t'].detach() # Pack output arguments for tensorboard logging output = { 'pred_vertices': pred_vertices, 'opt_vertices': opt_vertices, 'pred_cam_t': pred_cam_t, 'opt_cam_t': opt_cam_t, 'visualization': danet_return_dict['visualization'] } losses_dict.update({'loss_tatal': loss_tatal.detach().item()}) return output, losses_dict