Ejemplo n.º 1
0
    def regress(self, img_original, body_bbox_list):
        """
            args: 
                img_original: original raw image (BGR order by using cv2.imread)
                body_bbox: bounding box around the target: (minX, minY, width, height)
            outputs:
                pred_vertices_img:
                pred_joints_vis_img:
                pred_rotmat
                pred_betas
                pred_camera
                bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
                bboxTopLeft:  bbox top left (redundant)
                boxScale_o2n: bbox scaling factor (redundant) 
        """
        pred_output_list = list()

        for body_bbox in body_bbox_list:
            img, norm_img, boxScale_o2n, bboxTopLeft, bbox = process_image_bbox(
                img_original, body_bbox, input_res=constants.IMG_RES)

            # bboxTopLeft = bbox['bboxXYWH'][:2]
            '''
            print("bboxTopLeft", bboxTopLeft)
            print('img', img.shape)
            cv2.imwrite("img.png", img)
            print("here !!!!", )
            sys.exit(0)
            '''

            if img is None:
                pred_output_list.append(None)
                continue

            with torch.no_grad():
                # model forward
                pred_rotmat, pred_betas, pred_camera = self.model_regressor(
                    norm_img.to(self.device))

                #Convert rot_mat to aa since hands are always in aa
                pred_aa = rotmat3x3_to_angleaxis(pred_rotmat)
                pred_aa = pred_aa.view(pred_aa.shape[0], -1)
                smpl_output = self.smpl(betas=pred_betas,
                                        body_pose=pred_aa[:, 3:],
                                        global_orient=pred_aa[:, :3],
                                        pose2rot=True)
                pred_vertices = smpl_output.vertices
                pred_joints_3d = smpl_output.joints

                pred_vertices = pred_vertices[0].cpu().numpy()

                pred_camera = pred_camera.cpu().numpy().ravel()
                camScale = pred_camera[0]  # *1.15
                camTrans = pred_camera[1:]

                pred_output = dict()
                # Convert mesh to original image space (X,Y are aligned to image)
                # 1. SMPL -> 2D bbox
                # 2. 2D bbox -> original 2D image
                pred_vertices_bbox = convert_smpl_to_bbox(
                    pred_vertices, camScale, camTrans)
                pred_vertices_img = convert_bbox_to_oriIm(
                    pred_vertices_bbox, boxScale_o2n, bboxTopLeft,
                    img_original.shape[1], img_original.shape[0])

                # Convert joint to original image space (X,Y are aligned to image)
                pred_joints_3d = pred_joints_3d[0].cpu().numpy()  # (1,49,3)
                pred_joints_vis = pred_joints_3d[:, :3]  # (49,3)
                pred_joints_vis_bbox = convert_smpl_to_bbox(
                    pred_joints_vis, camScale, camTrans)
                pred_joints_vis_img = convert_bbox_to_oriIm(
                    pred_joints_vis_bbox, boxScale_o2n, bboxTopLeft,
                    img_original.shape[1], img_original.shape[0])

                # Output
                pred_output['img_cropped'] = img[:, :, ::-1]
                pred_output['pred_vertices_smpl'] = smpl_output.vertices[
                    0].cpu().numpy()  # SMPL vertex in original smpl space
                pred_output[
                    'pred_vertices_img'] = pred_vertices_img  # SMPL vertex in image space
                pred_output[
                    'pred_joints_img'] = pred_joints_vis_img  # SMPL joints in image space

                pred_rotmat_tensor = torch.zeros((1, 24, 3, 4),
                                                 dtype=torch.float32)
                pred_rotmat_tensor[:, :, :, :3] = pred_rotmat.detach().cpu()
                pred_aa_tensor = gu.rotation_matrix_to_angle_axis(
                    pred_rotmat_tensor.squeeze())
                pred_output['pred_body_pose'] = pred_aa_tensor.cpu().numpy(
                ).reshape(1, 72)

                pred_output['pred_rotmat'] = pred_rotmat.detach().cpu().numpy(
                )  # (1, 24, 3, 3)
                pred_output['pred_betas'] = pred_betas.detach().cpu().numpy(
                )  # (1, 10)

                pred_output['pred_camera'] = pred_camera
                pred_output['bbox_top_left'] = bboxTopLeft
                pred_output['bbox_scale_ratio'] = boxScale_o2n
                pred_output['faces'] = self.smpl.faces

                if self.use_smplx:
                    img_center = np.array(
                        (img_original.shape[1], img_original.shape[0])) * 0.5
                    # right hand
                    pred_joints = smpl_output.right_hand_joints[0].cpu().numpy(
                    )
                    pred_joints_bbox = convert_smpl_to_bbox(
                        pred_joints, camScale, camTrans)
                    pred_joints_img = convert_bbox_to_oriIm(
                        pred_joints_bbox, boxScale_o2n, bboxTopLeft,
                        img_original.shape[1], img_original.shape[0])
                    pred_output[
                        'right_hand_joints_img_coord'] = pred_joints_img
                    # left hand
                    pred_joints = smpl_output.left_hand_joints[0].cpu().numpy()
                    pred_joints_bbox = convert_smpl_to_bbox(
                        pred_joints, camScale, camTrans)
                    pred_joints_img = convert_bbox_to_oriIm(
                        pred_joints_bbox, boxScale_o2n, bboxTopLeft,
                        img_original.shape[1], img_original.shape[0])
                    pred_output['left_hand_joints_img_coord'] = pred_joints_img

                pred_output_list.append(pred_output)

        return pred_output_list
Ejemplo n.º 2
0
def intergration_copy_paste(pred_body_list, pred_hand_list, smplx_model,
                            image_shape):
    integral_output_list = list()
    for i in range(len(pred_body_list)):
        body_info = pred_body_list[i]
        hand_info = pred_hand_list[i]
        if body_info is None or hand_info is None:
            integral_output_list.append(None)
            continue

        # copy and paste
        pred_betas = torch.from_numpy(body_info['pred_betas']).cuda()
        pred_rotmat = torch.from_numpy(body_info['pred_rotmat']).cuda()

        if hand_info['right_hand'] is not None:
            right_hand_pose = torch.from_numpy(
                hand_info['right_hand']['pred_hand_pose'][:, 3:]).cuda()
            right_hand_global_orient = torch.from_numpy(
                hand_info['right_hand']['pred_hand_pose'][:, :3]).cuda()
            right_hand_local_orient = transfer_hand_wrist(
                smplx_model, pred_rotmat[0], right_hand_global_orient,
                'right_hand', 'l2g')
            pred_rotmat[0, 21] = right_hand_local_orient
        else:
            right_hand_pose = torch.from_numpy(
                np.zeros((1, 45), dtype=np.float32)).cuda()

        if hand_info['left_hand'] is not None:
            left_hand_pose = torch.from_numpy(
                hand_info['left_hand']['pred_hand_pose'][:, 3:]).cuda()
            left_hand_global_orient = torch.from_numpy(
                hand_info['left_hand']['pred_hand_pose'][:, :3]).cuda()
            left_hand_local_orient = transfer_hand_wrist(
                smplx_model, pred_rotmat[0], left_hand_global_orient,
                'left_hand', 'l2g')
            pred_rotmat[0, 20] = left_hand_local_orient
        else:
            left_hand_pose = torch.from_numpy(
                np.zeros((1, 45), dtype=np.float32)).cuda()

        # smplx_output = smplx_model(
        #     betas = pred_betas,
        #     body_pose = pred_rotmat[:,1:],
        #     global_orient = pred_rotmat[:,0].unsqueeze(1),
        #     right_hand_pose = right_hand_pose,
        #     left_hand_pose= left_hand_pose,
        #     pose2rot = False)

        #Convert rot_mat to aa since hands are always in aa
        pred_aa = rotmat3x3_to_angleaxis(pred_rotmat)
        pred_aa = pred_aa.view(pred_aa.shape[0], -1)
        smplx_output = smplx_model(betas=pred_betas,
                                   body_pose=pred_aa[:, 3:],
                                   global_orient=pred_aa[:, :3],
                                   right_hand_pose=right_hand_pose,
                                   left_hand_pose=left_hand_pose,
                                   pose2rot=True)

        pred_vertices = smplx_output.vertices
        pred_vertices = pred_vertices[0].detach().cpu().numpy()
        pred_joints_3d = smplx_output.joints
        pred_joints_3d = pred_joints_3d[0].detach().cpu().numpy()

        camScale = body_info['pred_camera'][0]
        camTrans = body_info['pred_camera'][1:]
        bbox_top_left = body_info['bbox_top_left']
        bbox_scale_ratio = body_info['bbox_scale_ratio']

        integral_output = dict()
        integral_output['pred_vertices_smpl'] = pred_vertices
        integral_output['faces'] = smplx_model.faces
        integral_output['bbox_scale_ratio'] = bbox_scale_ratio
        integral_output['bbox_top_left'] = bbox_top_left
        integral_output['pred_camera'] = body_info['pred_camera']

        pred_rotmat_tensor = torch.zeros((1, 24, 3, 4), dtype=torch.float32)
        pred_rotmat_tensor[:, :, :, :3] = pred_rotmat.detach().cpu()
        pred_aa_tensor = gu.rotation_matrix_to_angle_axis(
            pred_rotmat_tensor.squeeze())
        integral_output['pred_body_pose'] = pred_aa_tensor.cpu().numpy(
        ).reshape(1, 72)

        integral_output['pred_betas'] = pred_betas.detach().cpu().numpy()
        integral_output['pred_left_hand_pose'] = left_hand_pose.detach().cpu(
        ).numpy()
        integral_output['pred_right_hand_pose'] = right_hand_pose.detach().cpu(
        ).numpy()

        # convert mesh to original image space (X,Y are aligned to image)
        pred_vertices_bbox = convert_smpl_to_bbox(pred_vertices, camScale,
                                                  camTrans)
        pred_vertices_img = convert_bbox_to_oriIm(pred_vertices_bbox,
                                                  bbox_scale_ratio,
                                                  bbox_top_left,
                                                  image_shape[1],
                                                  image_shape[0])
        integral_output['pred_vertices_img'] = pred_vertices_img

        integral_output_list.append(integral_output)

    return integral_output_list