Exemple #1
0
    def __init__(self, joint_format='coco25'):
        super(SMPLR, self).__init__()
        self.smpl_male = SMPL(os.path.join(config.model_dir, 'smpl_models',
                                           'smpl'),
                              gender='male',
                              create_transl=False)
        self.smpl_female = SMPL(os.path.join(config.model_dir, 'smpl_models',
                                             'smpl'),
                                gender='female',
                                create_transl=False)
        self.smpl_neutral = SMPL(os.path.join(config.model_dir, 'smpl_models',
                                              'smpl'),
                                 gender='neutral',
                                 create_transl=False)
        self.smpls = {
            'f': self.smpl_female,
            'm': self.smpl_male,
            'n': self.smpl_neutral
        }

        self.joint_format = joint_format
        self.J_regressor = torch.from_numpy(
            np.load(
                os.path.join(
                    config.model_dir,
                    "spin_data/J_regressor_h36m.npy"))).float().unsqueeze(0)
Exemple #2
0
    def __init__(self,
                 options,
                 orig_size=224,
                 feat_in_dim=None,
                 smpl_mean_params=None,
                 pretrained=True):
        super(SMPL_Regressor, self).__init__()

        self.mapping_to_detectron = None
        self.orphans_in_detectron = None

        self.focal_length = 5000.
        self.options = options

        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')

        self.orig_size = orig_size

        mean_params = np.load(smpl_mean_params)
        init_pose_6d = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)
        if cfg.DANET.USE_6D_ROT:
            init_pose = init_pose_6d
        else:
            init_pose_rotmat = rot6d_to_rotmat(init_pose_6d)
            init_pose = init_pose_rotmat.reshape(-1).unsqueeze(0)
        init_shape = torch.from_numpy(
            mean_params['shape'][:].astype('float32')).unsqueeze(0)
        init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)

        init_params = (init_cam, init_shape, init_pose)

        self.smpl = SMPL(path_config.SMPL_MODEL_DIR,
                         batch_size=self.options.batch_size,
                         create_transl=False)

        if cfg.DANET.DECOMPOSED:
            print('using decomposed predictor.')
            self.smpl_para_Outs = DecomposedPredictor(feat_in_dim, init_params,
                                                      pretrained)
        else:
            print('using global predictor.')
            self.smpl_para_Outs = GlobalPredictor(feat_in_dim, pretrained)

        # Per-vertex loss on the shape
        self.criterion_shape = nn.L1Loss().to(self.device)
        # Keypoint (2D and 3D) loss
        # No reduction because confidence weighting needs to be applied
        self.criterion_keypoints = nn.MSELoss(reduction='none').to(self.device)
        # Loss for SMPL parameter regression
        self.criterion_regr = nn.MSELoss().to(self.device)
Exemple #3
0
 def __init__(self, focal_length=5000., render_res=224):
     # Parameters for rendering
     self.focal_length = focal_length
     self.render_res = render_res
     # We use Neural 3D mesh renderer for rendering masks and part segmentations
     self.neural_renderer = nr.Renderer(dist_coeffs=None,
                                        orig_size=self.render_res,
                                        image_size=render_res,
                                        light_intensity_ambient=1,
                                        light_intensity_directional=0,
                                        anti_aliasing=False)
     self.faces = torch.from_numpy(
         SMPL(cfg.SMPL_MODEL_DIR).faces.astype(np.int32)).cuda().int()
     textures = np.load(cfg.VERTEX_TEXTURE_FILE)
     self.textures = torch.from_numpy(textures).cuda().float()
     self.cube_parts = torch.cuda.FloatTensor(np.load(cfg.CUBE_PARTS_FILE))
Exemple #4
0
    def __init__(self, num_features):
        super(ResNet, self).__init__()

        self.num_in = 2 * 14
        self.num_out = dim_theta
        self.num_features = num_features

        self.linear1 = nn.Linear(self.num_in, num_features)
        self.mod1 = ResNetModule(num_features)
        self.mod2 = ResNetModule(num_features)
        self.linear2 = nn.Linear(num_features, self.num_out)

        # set weights
        nn.init.normal_(self.linear1.weight, mean=0, std=0.001)
        nn.init.constant_(self.linear1.bias, 0)
        nn.init.normal_(self.linear2.weight, mean=0, std=0.001)
        nn.init.constant_(self.linear2.bias, 0)

        #self.smpl = smpl = SMPL(joint_type='lsp')
        self.smpl = smpl = SMPL(joint_type='lsp', obj_saveable=True)
Exemple #5
0
    def __init__(self,
                 step_size=1e-2,
                 batch_size=66,
                 num_iters=100,
                 focal_length=5000,
                 device=torch.device('cuda')):

        # Store options
        self.device = device
        self.focal_length = focal_length
        self.step_size = step_size

        # Ignore the the following joints for the fitting process
        ign_joints = ['OP Neck', 'OP RHip', 'OP LHip', 'Right Hip', 'Left Hip']
        self.ign_joints = [constants.JOINT_IDS[i] for i in ign_joints]
        self.num_iters = num_iters
        # GMM pose prior
        self.pose_prior = MaxMixturePrior(prior_folder='data',
                                          num_gaussians=8,
                                          dtype=torch.float32).to(device)
        # Load SMPL model
        self.smpl = SMPL(config.SMPL_MODEL_DIR,
                         batch_size=batch_size,
                         create_transl=False).to(self.device)
Exemple #6
0
    def __init__(self,
                 options,
                 dataset,
                 ignore_3d=False,
                 use_augmentation=True,
                 is_train=True):
        super().__init__()
        self.dataset = dataset
        self.is_train = is_train
        self.options = options
        self.img_dir = path_config.DATASET_FOLDERS[dataset]
        self.normalize_img = Normalize(mean=constants.IMG_NORM_MEAN,
                                       std=constants.IMG_NORM_STD)

        if not is_train and dataset == 'h36m-p2' and options.eval_pve:
            self.data = np.load(
                path_config.DATASET_FILES[is_train]['h36m-p2-mosh'],
                allow_pickle=True)
        else:
            self.data = np.load(path_config.DATASET_FILES[is_train][dataset],
                                allow_pickle=True)

        self.imgname = self.data['imgname']
        self.dataset_dict = {dataset: 0}

        logger.info('len of {}: {}'.format(self.dataset, len(self.imgname)))

        # Get paths to gt masks, if available
        try:
            self.maskname = self.data['maskname']
        except KeyError:
            pass
        try:
            self.partname = self.data['partname']
        except KeyError:
            pass

        # Bounding boxes are assumed to be in the center and scale format
        self.scale = self.data['scale']
        self.center = self.data['center']

        # If False, do not do augmentation
        self.use_augmentation = use_augmentation

        # Get gt SMPL parameters, if available
        try:
            self.pose = self.data['pose'].astype(np.float)  # (N, 72)
            self.betas = self.data['shape'].astype(np.float)  # (N, 10)

            ################# generate final_fits file in case of it is missing #################
            # import os
            # params_ = np.concatenate((self.pose, self.betas), axis=-1)
            # out_file_fit = os.path.join('data/final_fits', self.dataset)
            # if not os.path.exists('data/final_fits'):
            #     os.makedirs('data/final_fits')
            # np.save(out_file_fit, params_)
            # raise ValueError('Please copy {}.npy file to data/final_fits, and delete this code block.'.format(self.dataset))
            ########################################################################

            if 'has_smpl' in self.data:
                self.has_smpl = self.data['has_smpl']
            else:
                self.has_smpl = np.ones(len(self.imgname), dtype=np.float32)
        except KeyError:
            self.has_smpl = np.zeros(len(self.imgname), dtype=np.float32)
        if ignore_3d:
            self.has_smpl = np.zeros(len(self.imgname), dtype=np.float32)

        # Get SMPL 2D keypoints
        try:
            self.smpl_2dkps = self.data['smpl_2dkps']
            self.has_smpl_2dkps = 1
        except KeyError:
            self.has_smpl_2dkps = 0

        # Get gt 3D pose, if available
        try:
            self.pose_3d = self.data['S']
            self.has_pose_3d = 1
        except KeyError:
            self.has_pose_3d = 0
        if ignore_3d:
            self.has_pose_3d = 0

        # Get 2D keypoints
        try:
            keypoints_gt = self.data['part']
        except KeyError:
            keypoints_gt = np.zeros((len(self.imgname), 24, 3))
        try:
            keypoints_openpose = self.data['openpose']
        except KeyError:
            keypoints_openpose = np.zeros((len(self.imgname), 25, 3))
        self.keypoints = np.concatenate([keypoints_openpose, keypoints_gt],
                                        axis=1)

        # Get gender data, if available
        try:
            gender = self.data['gender']
            self.gender = np.array([0 if str(g) == 'm' else 1
                                    for g in gender]).astype(np.int32)
        except KeyError:
            self.gender = -1 * np.ones(len(self.imgname)).astype(np.int32)

        self.length = self.scale.shape[0]

        self.smpl = SMPL(path_config.SMPL_MODEL_DIR,
                         batch_size=cfg.TRAIN.BATCH_SIZE,
                         create_transl=False)

        self.faces = self.smpl.faces
def process_surreal(is_train, uv_type, renderer):
    dataset_file = cfg.DATASET_FILES[is_train]['surreal']
    root_dir = cfg.DATASET_FOLDERS['surreal']
    iuv_dir = join(root_dir, '{}_IUV_gt'.format(uv_type), 'data', 'cmu',
                   'train')

    smpl_female = SMPL(cfg.FEMALE_SMPL_FILE)
    smpl_male = SMPL(cfg.MALE_SMPL_FILE)
    H = 240
    W = 320
    img_empty = np.zeros([H, W, 3])

    data = np.load(dataset_file, allow_pickle=True)
    shape_list = data['shape']
    pose_list = data['pose']
    gender_list = data['gender']
    part24_list = data['part_smpl']
    videoname_list = data['videoname']
    framenum_list = data['framenum']
    dataset_size = len(data['gender'])
    joint3d_list = data['S_smpl']
    iuvnames = []

    for i in tqdm(range(dataset_size)):

        videoname = videoname_list[i]
        framenum = framenum_list[i]
        iuv_name = videoname[:-4] + '_{}.png'.format(framenum)
        output_path = join(iuv_dir, iuv_name)
        if not exists(os.path.dirname(output_path)):
            os.makedirs(os.path.dirname(output_path))
        iuvnames.append(iuv_name)

        if not exists(output_path):
            shape = shape_list[i]
            pose = pose_list[i]
            gender = gender_list[i]
            part24 = part24_list[i, :, :-1]

            pose_t = torch.from_numpy(pose).float()
            shape_t = torch.from_numpy(shape).float()
            if gender == 'f':
                vertices = smpl_female(pose_t.unsqueeze(0),
                                       shape_t.unsqueeze(0))
                joint3d = smpl_female.get_smpl_joints(vertices)[0]
            else:
                vertices = smpl_male(pose_t.unsqueeze(0), shape_t.unsqueeze(0))
                joint3d = smpl_male.get_smpl_joints(vertices)[0]

            target_3d = joint3d_list[i]

            origin_2d = joint3d[:, :2]
            target_2d = torch.tensor(part24).float()

            target_2d[:, 0] = (2 * target_2d[:, 0] - W) / W
            target_2d[:, 1] = (2 * target_2d[:, 1] - H) / W
            cam, err = cal_cam(origin_2d, target_2d)

            uv_tmp = render_IUV(img_empty, vertices[0].detach().cpu().numpy(),
                                cam.detach().cpu().numpy(), renderer)
            uv_im = np.zeros(uv_tmp.shape)
            uv_im[:, :, 0] = 1 - uv_tmp[:, :, 0]
            uv_im[:, :, 1] = uv_tmp[:, :, 1]
            mask_im = uv_im.max(axis=-1) > 0
            mask_im = mask_im[:, :, np.newaxis]

            uv_im_int = np.around(uv_im * 255).astype('uint8')
            mask_im_int = mask_im.astype('uint8')

            iuv_im_out = np.concatenate((mask_im_int, uv_im_int), axis=-1)

            flag_plt = False
            if flag_plt:
                import matplotlib.pyplot as plt
                from skimage.draw import circle
                from models.dense_cnn import warp_feature
                from models.uv_generator import Index_UV_Generator
                uv_sampler = Index_UV_Generator(128, uv_type=uv_type)

                video_dir = join(root_dir, 'data', 'cmu', 'train')
                cap = cv2.VideoCapture(join(video_dir, videoname))
                cap.set(cv2.CAP_PROP_POS_FRAMES, framenum)
                a, img = cap.read()
                # the img should be flipped first
                img = np.fliplr(img)[:, :, ::-1].copy().astype(np.float32)

                joint = part24
                for j2d in joint[:, :2]:
                    rr, cc = circle(j2d[1], j2d[0], 2, img.shape[0:2])
                    img[rr, cc] = [255, 0, 0]

                plt.subplot(2, 2, 1)
                plt.imshow(img[:, :, ::-1] / 255)

                plt.subplot(2, 2, 2)
                tmp = iuv_im_out
                plt.imshow(tmp[:, :, ::-1])

                plt.subplot(2, 2, 3)
                iuv = torch.FloatTensor(iuv_im_out)
                iuv[:, :, 1:] = iuv[:, :, 1:] / 255.0
                uv_map = warp_feature(
                    iuv.permute(2, 0, 1).unsqueeze(0),
                    torch.Tensor(img).permute(2, 0, 1).unsqueeze(0), 128)
                uv_map = uv_map[0, :3].permute(1, 2, 0).cpu().numpy()
                plt.imshow(uv_map[:, :, ::-1] / 255)

                plt.subplot(2, 2, 4)
                texture = uv_sampler.resample(
                    torch.Tensor(uv_map).unsqueeze(0))[0]
                vert = (vertices[0, :, :2].cpu() + cam[1:]) * cam[0]
                vert[:, 0] = (vert[:, 0] * W + W) / 2
                vert[:, 1] = (vert[:, 1] * W + H) / 2

                vert = vert.long()
                back_img = texture.new_zeros(img.shape)
                for v_i in range(vert.shape[0]):
                    back_img[vert[v_i, 1], vert[v_i, 0], :] = back_img[vert[
                        v_i, 1], vert[v_i, 0], :] + texture[v_i, :]
                # back_img[vert[:, 1], vert[:, 0], :] = texture

                plt.imshow(uv_sampler.mask.cpu().numpy())
                plt.imshow(back_img.cpu().numpy()[:, :, ::-1] / 255)

                # back_img = torch.nn.functional.grid_sample(torch.Tensor(uv_map).permute(2,0,1).unsqueeze(0), torch.Tensor(uv_im * 2 - 1).unsqueeze(0))
                # back_img = back_img[0].permute(1,2,0).cpu().numpy()
                # plt.imshow(back_img[:, :, ::-1])

            cv2.imwrite(output_path, iuv_im_out)

    save_data = dict(data)
    save_data['iuv_names'] = iuvnames
    np.savez(dataset_file, **save_data)
    return 0
Exemple #8
0
    model.to(device)
    model.eval()
    f = h5py.File(config.SMPL_MEAN_PARAMS, 'r')
    init_grot = np.array([np.pi, 0., 0.])
    init_pose = np.hstack([init_grot, f['pose'][3:]])
    init_grot = torch.tensor(init_grot.astype('float32'))
    init_pose = torch.tensor(init_pose.astype('float32'))
    init_shape = torch.tensor(f['shape'][:].astype('float32')).to(device).view(
        1, 10)
    init_cam = torch.tensor([0.9, 0., 0.]).to(device).view(1, 3)
    init_rotmat = batch_rodrigues(init_pose.unsqueeze(0).contiguous())
    init_rot6d = init_rotmat.view(-1, 3,
                                  3)[:, :, :2].contiguous().view(1,
                                                                 -1).to(device)

    smpl_gen = SMPL(config.SMPL_MODEL_DIR).to(device)

    # Setup renderer for visualization
    renderer = SMPLRenderer(img_size=224, face_path='data/smpl_faces.npy')

    # Preprocess input image and generate predictions
    img = process_image(args.img, args.bbox, args.openpose, input_res=224)
    with torch.no_grad():
        _, _, _, \
        _, _, _, \
        pred_rot6d3, pred_shape3, pred_cam3 = \
        model(img.unsqueeze(0).to(device), init_rot6d, init_shape, init_cam)
        pred_rotmat3 = rot6d_to_rotmat(pred_rot6d3).unsqueeze(0)
        pred_verts = smpl_gen(global_orient=pred_rotmat3[:, [0]],
                              body_pose=pred_rotmat3[:, 1:],
                              betas=pred_shape3,
Exemple #9
0
def run_evaluation(model, dataset_name, dataset, result_file,
                   batch_size=32, img_res=224, 
                   num_workers=0, shuffle=False, log_freq=50):
    """Run evaluation on the datasets and metrics we report in the paper. """

    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

    # Transfer model to the GPU
    model.to(device)

    # Load SMPL model
    # smpl_neutral = create_smpl(smpl_dir=config.SMPL_MODEL_DIR, gender='neutral').to(device)
    smpl_neutral = SMPL(config.SMPL_MODEL_DIR, gender='neutral').to(device)
    smpl_male = SMPL(config.SMPL_MODEL_DIR, gender='male').to(device)
    smpl_female = SMPL(config.SMPL_MODEL_DIR, gender='female').to(device)

    f = h5py.File(config.SMPL_MEAN_PARAMS, 'r')
    init_grot = np.array([np.pi, 0., 0.])
    init_pose = np.hstack([init_grot, f['pose'][3:]])
    init_grot = torch.tensor(init_grot.astype('float32'))
    init_pose = torch.tensor(init_pose.astype('float32'))
    init_shape = torch.tensor(f['shape'][:].astype('float32')).to(device).view(1, 10)
    init_cam = torch.tensor([0.9, 0., 0.]).to(device).view(1, 3)
    init_rotmat = batch_rodrigues(init_pose.unsqueeze(0).contiguous())
    init_rot6d = init_rotmat.view(-1,3,3)[:,:,:2].contiguous().view(1,-1).to(device)
    
    renderer = PartRenderer()
    
    # Regressor for H36m joints
    J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()
    
    save_results = result_file is not None
    # Disable shuffling if you want to save the results
    if save_results:
        shuffle=False
    # Create dataloader for the dataset
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
    
    # Pose metrics
    # MPJPE and Reconstruction error for the non-parametric and parametric shapes
    mpjpe = np.zeros(len(dataset))
    recon_err = np.zeros(len(dataset))

    # Shape metrics
    # Mean per-vertex error
    shape_error = np.zeros(len(dataset))
    shape_error_pa = np.zeros(len(dataset))

    # Mask and part metrics
    # Accuracy
    accuracy = 0.
    parts_accuracy = 0.
    # True positive, false positive and false negative
    tp = np.zeros((2,1))
    fp = np.zeros((2,1))
    fn = np.zeros((2,1))
    parts_tp = np.zeros((7,1))
    parts_fp = np.zeros((7,1))
    parts_fn = np.zeros((7,1))
    # Pixel count accumulators
    pixel_count = 0
    parts_pixel_count = 0

    # Store SMPL parameters
    smpl_pose = np.zeros((len(dataset), 72))
    smpl_betas = np.zeros((len(dataset), 10))
    smpl_camera = np.zeros((len(dataset), 3))
    pred_joints = np.zeros((len(dataset), 17, 3))

    eval_pose = False
    eval_shape = False
    eval_masks = False
    eval_parts = False
    # Choose appropriate evaluation for each dataset
    if dataset_name == 'h36m-p1' or dataset_name == 'h36m-p2':
        eval_pose = True
    elif dataset_name == '3dpw':
        eval_shape = True
    elif dataset_name == 'lsp':
        eval_masks = True
        eval_parts = True
        annot_path = config.DATASET_FOLDERS['upi-s1h']

    joint_mapper = constants.H36M_TO_J14
    # Iterate over the entire dataset
    for step, batch in enumerate(tqdm(data_loader, desc='Eval', total=len(data_loader))):
        # Get ground truth annotations from the batch
        gt_pose = batch['pose'].to(device)
        gt_betas = batch['betas'].to(device)
        gt_vertices = smpl_neutral(betas=gt_betas, body_pose=gt_pose[:, 3:], global_orient=gt_pose[:, :3]).vertices
        images = batch['img'].to(device)
        gender = batch['gender'].to(device)
        curr_batch_size = images.shape[0]
        
        with torch.no_grad():
            _, _, _, \
            _, _, _, \
            pred_rot6d3, pred_shape3, pred_cam3 = \
            model(images.to(device), init_rot6d.expand(curr_batch_size, -1), init_shape.expand(curr_batch_size, -1), init_cam.expand(curr_batch_size, -1))
            pred_rotmat = rot6d_to_rotmat(pred_rot6d3).view(-1, 24, 3, 3)
            # pred_rotmat = rot6d_to_rotmat(pred_rot6d3).view(-1, 24, 3, 3)
            pred_vertices = smpl_neutral(global_orient=pred_rotmat[:, [0]], body_pose=pred_rotmat[:, 1:], betas=pred_shape3, pose2rot=False).vertices
            pred_camera = pred_cam3

        if save_results:
            rot_pad = torch.tensor([0,0,1], dtype=torch.float32, device=device).view(1,3,1)
            rotmat = torch.cat((pred_rotmat.view(-1, 3, 3), rot_pad.expand(curr_batch_size * 24, -1, -1)), dim=-1)
            pred_pose = tgm.rotation_matrix_to_angle_axis(rotmat).contiguous().view(-1, 72)
            smpl_pose[step * batch_size:step * batch_size + curr_batch_size, :] = pred_pose.cpu().numpy()
            smpl_betas[step * batch_size:step * batch_size + curr_batch_size, :]  = pred_betas.cpu().numpy()
            smpl_camera[step * batch_size:step * batch_size + curr_batch_size, :]  = pred_camera.cpu().numpy()
            
        # 3D pose evaluation
        if eval_pose:
            # Regressor broadcasting
            J_regressor_batch = J_regressor[None, :].expand(pred_vertices.shape[0], -1, -1).to(device)
            # Get 14 ground truth joints
            if 'h36m' in dataset_name:
                gt_keypoints_3d = batch['pose_3d'].cuda()
                gt_keypoints_3d = gt_keypoints_3d[:, joint_mapper]

            # Get 14 predicted joints from the mesh
            pred_keypoints_3d = torch.matmul(J_regressor_batch, pred_vertices)
            if save_results:
                pred_joints[step * batch_size:step * batch_size + curr_batch_size, :, :]  = pred_keypoints_3d.cpu().numpy()
            pred_pelvis = pred_keypoints_3d[:, [0],:].clone()
            pred_keypoints_3d = pred_keypoints_3d[:, joint_mapper, :]
            pred_keypoints_3d = pred_keypoints_3d - pred_pelvis 

            # Absolute error (MPJPE)
            error = torch.sqrt(((pred_keypoints_3d - gt_keypoints_3d) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
            mpjpe[step * batch_size:step * batch_size + curr_batch_size] = error

            # Reconstuction_error
            r_error = reconstruction_error(pred_keypoints_3d.cpu().numpy(), gt_keypoints_3d.cpu().numpy(), reduction=None)
            recon_err[step * batch_size:step * batch_size + curr_batch_size] = r_error

        if eval_shape:
            gt_vertices = smpl_male(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices 
            gt_vertices_female = smpl_female(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices 
            gt_vertices[gender==1, :, :] = gt_vertices_female[gender==1, :, :]

        # Absolute error (MPJPE)
        shape_err = torch.sqrt(((pred_vertices - gt_vertices) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
        shape_error[step * batch_size:step * batch_size + curr_batch_size] = shape_err

        # Reconstuction_error
        shape_r_error = reconstruction_error(pred_vertices.cpu().numpy(), gt_vertices.cpu().numpy(), reduction=None)
        shape_error_pa[step * batch_size:step * batch_size + curr_batch_size] = shape_r_error


        # If mask or part evaluation, render the mask and part images
        if eval_masks or eval_parts:
            mask, parts = renderer(pred_vertices, pred_camera)

        # Mask evaluation (for LSP)
        if eval_masks:
            center = batch['center'].cpu().numpy()
            scale = batch['scale'].cpu().numpy()
            # Dimensions of original image
            orig_shape = batch['orig_shape'].cpu().numpy()
            for i in range(curr_batch_size):
                # After rendering, convert imate back to original resolution
                pred_mask = uncrop(mask[i].cpu().numpy(), center[i], scale[i], (224, 224), orig_shape[i]) > 0
                # Load gt mask
                gt_mask = cv2.imread(os.path.join(annot_path, batch['maskname'][i]), 0) > 0
                # Evaluation consistent with the original UP-3D code
                accuracy += (gt_mask == pred_mask).sum()
                pixel_count += np.prod(np.array(gt_mask.shape))
                for c in range(2):
                    cgt = gt_mask == c
                    cpred = pred_mask == c
                    tp[c] += (cgt & cpred).sum()
                    fp[c] +=  (~cgt & cpred).sum()
                    fn[c] +=  (cgt & ~cpred).sum()
                f1 = 2 * tp / (2 * tp + fp + fn)

        # Part evaluation (for LSP)
        if eval_parts:
            center = batch['center'].cpu().numpy()
            scale = batch['scale'].cpu().numpy()
            orig_shape = batch['orig_shape'].cpu().numpy()
            for i in range(curr_batch_size):
                pred_parts = uncrop(parts[i].cpu().numpy().astype(np.uint8), center[i], scale[i], (224, 224), orig_shape[i])
                # Load gt part segmentation
                gt_parts = cv2.imread(os.path.join(annot_path, batch['partname'][i]), 0)
                # Evaluation consistent with the original UP-3D code
                # 6 parts + background
                for c in range(7):
                   cgt = gt_parts == c
                   cpred = pred_parts == c
                   cpred[gt_parts == 255] = 0
                   parts_tp[c] += (cgt & cpred).sum()
                   parts_fp[c] +=  (~cgt & cpred).sum()
                   parts_fn[c] +=  (cgt & ~cpred).sum()
                gt_parts[gt_parts == 255] = 0
                pred_parts[pred_parts == 255] = 0
                parts_f1 = 2 * parts_tp / (2 * parts_tp + parts_fp + parts_fn)
                parts_accuracy += (gt_parts == pred_parts).sum()
                parts_pixel_count += np.prod(np.array(gt_parts.shape))

        # Print intermediate results during evaluation
        if step % log_freq == log_freq - 1:
            if eval_pose:
                print('MPJPE: ' + str(1000 * mpjpe[:step * batch_size].mean()))
                print('Reconstruction Error: ' + str(1000 * recon_err[:step * batch_size].mean()))
                print()
            if eval_masks:
                print('Accuracy: ', accuracy / pixel_count)
                print('F1: ', f1.mean())
                print()
            if eval_parts:
                print('Parts Accuracy: ', parts_accuracy / parts_pixel_count)
                print('Parts F1 (BG): ', parts_f1[[0,1,2,3,4,5,6]].mean())
                print()
            if eval_shape:
                print('Shape Error (Absolute): ', str(1000 * shape_error[:step * batch_size].mean()))
                print('Shape Error (PA): ', str(1000 * shape_error_pa[:step * batch_size].mean()))
                print()

    # Save reconstructions to a file for further processing
    if save_results:
        np.savez(result_file, pred_joints=pred_joints, pose=smpl_pose, betas=smpl_betas, camera=smpl_camera)
    # Print final results during evaluation
    print('*** Final Results ***')
    print()
    if eval_pose:
        print('MPJPE: ' + str(1000 * mpjpe.mean()))
        print('Reconstruction Error: ' + str(1000 * recon_err.mean()))
        print()
    if eval_masks:
        print('Accuracy: ', accuracy / pixel_count)
        print('F1: ', f1.mean())
        print()
    if eval_parts:
        print('Parts Accuracy: ', parts_accuracy / parts_pixel_count)
        print('Parts F1 (BG): ', parts_f1[[0,1,2,3,4,5,6]].mean())
        print()
    if eval_shape:
        print('Shape Error (Absolute): ', str(1000 * shape_error.mean()))
        print('Shape Error (PA): ', str(1000 * shape_error_pa.mean()))
        print()