def _get_frame_gt(dataset_objects: List[bpy.types.Mesh],
                      unit_scaling: float,
                      ignore_dist_thres: float,
                      destination_frame: List[str] = ["X", "-Y", "-Z"]):
        """ Returns GT pose annotations between active camera and objects.
        :param dataset_objects: Save annotations for these objects.
        :param unit_scaling: 1000. for outputting poses in mm
        :param ignore_dist_thres: Distance between camera and object after which object is ignored. Mostly due to failed physics.
        :param destination_frame: Transform poses from Blender internal coordinates to OpenCV coordinates
        :return: A list of GT camera-object pose annotations for scene_gt.json
        """

        H_c2w_opencv = Matrix(
            WriterUtility.get_cam_attribute(
                bpy.context.scene.camera,
                'cam2world_matrix',
                local_frame_change=destination_frame))

        frame_gt = []
        for obj in dataset_objects:

            H_m2w = Matrix(
                WriterUtility.get_common_attribute(obj, 'matrix_world'))

            cam_H_m2c = H_c2w_opencv.inverted() @ H_m2w
            cam_R_m2c = cam_H_m2c.to_quaternion().to_matrix()
            cam_t_m2c = cam_H_m2c.to_translation()

            # ignore examples that fell through the plane
            if not np.linalg.norm(list(cam_t_m2c)) > ignore_dist_thres:
                cam_t_m2c = list(cam_t_m2c * unit_scaling)
                frame_gt.append({
                    'cam_R_m2c':
                    list(cam_R_m2c[0]) + list(cam_R_m2c[1]) +
                    list(cam_R_m2c[2]),
                    'cam_t_m2c':
                    cam_t_m2c,
                    'obj_id':
                    obj["category_id"]
                })
            else:
                print('ignored obj, ', obj["category_id"], 'because either ')
                print(
                    '(1) it is further away than parameter "ignore_dist_thres: ",',
                    ignore_dist_thres)
                print(
                    '(e.g. because it fell through a plane during physics sim)'
                )
                print('or')
                print('(2) the object pose has not been given in meters')

        return frame_gt
    def _get_frame_camera(save_world2cam,
                          depth_scale=1.0,
                          unit_scaling=1000.,
                          destination_frame=["X", "-Y", "-Z"]):
        """ Returns camera parameters for the active camera.
        :param save_world2cam: If true, camera to world transformations "cam_R_w2c", "cam_t_w2c" are saved in scene_camera.json
        :param depth_scale: Multiply the uint16 output depth image with this factor to get depth in mm.
        :param unit_scaling: 1000. for outputting poses in mm
        :param destination_frame: Transform poses from Blender internal coordinates to OpenCV coordinates
        :return: dict containing info for scene_camera.json 
        """

        cam_K = WriterUtility.get_cam_attribute(bpy.context.scene.camera,
                                                'cam_K')

        frame_camera_dict = {
            'cam_K': cam_K[0] + cam_K[1] + cam_K[2],
            'depth_scale': depth_scale
        }

        if save_world2cam:
            H_c2w_opencv = Matrix(
                WriterUtility.get_cam_attribute(
                    bpy.context.scene.camera,
                    'cam2world_matrix',
                    local_frame_change=destination_frame))

            H_w2c_opencv = H_c2w_opencv.inverted()
            R_w2c_opencv = H_w2c_opencv.to_quaternion().to_matrix()
            t_w2c_opencv = H_w2c_opencv.to_translation() * unit_scaling

            frame_camera_dict['cam_R_w2c'] = list(R_w2c_opencv[0]) + list(
                R_w2c_opencv[1]) + list(R_w2c_opencv[2])
            frame_camera_dict['cam_t_w2c'] = list(t_w2c_opencv)

        return frame_camera_dict
    def _write_camera(camera_path: str, depth_scale: float = 1.0):
        """ Writes camera.json into dataset_dir.
        :param camera_path: Path to camera.json
        :param depth_scale: Multiply the uint16 output depth image with this factor to get depth in mm.
        """

        cam_K = WriterUtility.get_cam_attribute(bpy.context.scene.camera,
                                                'cam_K')
        camera = {
            'cx': cam_K[0][2],
            'cy': cam_K[1][2],
            'depth_scale': depth_scale,
            'fx': cam_K[0][0],
            'fy': cam_K[1][1],
            'height': bpy.context.scene.render.resolution_y,
            'width': bpy.context.scene.render.resolution_x
        }

        BopWriterUtility._save_json(camera_path, camera)