def _cam2world_matrix_from_cam_extrinsics(self,
                                              config: Config) -> np.ndarray:
        """ Determines camera extrinsics by using the given config and returns them in form of a cam to world frame transformation matrix.

        :param config: The configuration object.
        :return: The 4x4 cam to world transformation matrix.
        """
        if not config.has_param("cam2world_matrix"):
            # Print warning if local_frame_change is used with other attributes than cam2world_matrix
            if self.local_frame_change != ["X", "Y", "Z"]:
                print(
                    "Warning: The local_frame_change parameter is at the moment only supported when setting the cam2world_matrix attribute."
                )

            position = change_coordinate_frame_of_point(
                config.get_vector3d("location", [0, 0, 0]),
                self.world_frame_change)

            # Rotation
            rotation_format = config.get_string("rotation/format", "euler")
            value = config.get_vector3d("rotation/value", [0, 0, 0])
            # Transform to blender coord frame
            value = change_coordinate_frame_of_point(value,
                                                     self.world_frame_change)

            if rotation_format == "euler":
                # Rotation, specified as euler angles
                rotation_matrix = Euler(value, 'XYZ').to_matrix()
            elif rotation_format == "forward_vec":
                # Convert forward vector to euler angle (Assume Up = Z)
                rotation_matrix = CameraUtility.rotation_from_forward_vec(
                    value)
            elif rotation_format == "look_at":
                # Convert forward vector to euler angle (Assume Up = Z)
                rotation_matrix = CameraUtility.rotation_from_forward_vec(
                    value - position)
            else:
                raise Exception("No such rotation format:" +
                                str(rotation_format))

            if rotation_format == "look_at" or rotation_format == "forward_vec":
                inplane_rot = config.get_float("rotation/inplane_rot", 0.0)
                rotation_matrix = np.matmul(
                    rotation_matrix,
                    Euler((0.0, 0.0, inplane_rot)).to_matrix())

            cam2world_matrix = build_transformation_mat(
                position, rotation_matrix)
        else:
            cam2world_matrix = np.array(
                config.get_list("cam2world_matrix")).reshape(4, 4).astype(
                    np.float32)
            cam2world_matrix = change_source_coordinate_frame_of_transformation_matrix(
                cam2world_matrix, self.local_frame_change)
            cam2world_matrix = change_target_coordinate_frame_of_transformation_matrix(
                cam2world_matrix, self.world_frame_change)
        return cam2world_matrix
    def _set_cam_extrinsics(self, config, frame=None):
        """ Sets camera extrinsics according to the config.

        :param frame: Optional, the frame to set the camera pose to.
        :param config: A configuration object with cam extrinsics.
        """
        if config.has_param("frame"):
            frame = config.get_int("frame")

        cam2world_matrix = self._cam2world_matrix_from_cam_extrinsics(config)
        CameraUtility.add_camera_pose(cam2world_matrix, frame)
def stereo_global_matching(
    color_images: List[np.ndarray],
    depth_max: Optional[float] = None,
    window_size: int = 7,
    num_disparities: int = 32,
    min_disparity: int = 0,
    disparity_filter: bool = True,
    depth_completion: bool = True
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
    """ Does the stereo global matching in the following steps:
    1. Collect camera object and its state,
    2. For each frame, load left and right images and call the `sgm()` methode.
    3. Write the results to a numpy file.

    :param color_images: A list of stereo images, where each entry has the shape [2, height, width, 3].
    :param depth_max: The maximum depth value for clipping the resulting depth values. If None, distance_start + distance_range that were configured for distance rendering are used.
    :param window_size: Semi-global matching kernel size. Should be an odd number.
    :param num_disparities: Semi-global matching number of disparities. Should be > 0 and divisible by 16.
    :param min_disparity: Semi-global matching minimum disparity.
    :param disparity_filter: Applies post-processing of the generated disparity map using WLS filter.
    :param depth_completion: Applies basic depth completion using image processing techniques.
    :return: Returns the computed depth and disparity images for all given frames.
    """
    # Collect camera and camera object
    cam_ob = bpy.context.scene.camera
    cam = cam_ob.data

    baseline = cam.stereo.interocular_distance
    if not baseline:
        raise Exception(
            "Stereo parameters are not set. Make sure to enable RGB stereo rendering before this module."
        )

    if depth_max is None:
        depth_max = bpy.context.scene.world.mist_settings.start + bpy.context.scene.world.mist_settings.depth

    baseline = cam.stereo.interocular_distance
    if not baseline:
        raise Exception(
            "Stereo parameters are not set. Make sure to enable RGB stereo rendering before this module."
        )

    focal_length = CameraUtility.get_intrinsics_as_K_matrix()[0, 0]

    depth_frames = []
    disparity_frames = []
    for frame, color_image in enumerate(color_images):
        depth, disparity = StereoGlobalMatching._sgm(
            color_image[0], color_image[1], baseline, depth_max, focal_length,
            window_size, num_disparities, min_disparity, disparity_filter,
            depth_completion)

        depth_frames.append(depth)
        disparity_frames.append(disparity)

    return depth_frames, disparity_frames
Пример #4
0
    def set_default_parameters():
        """ Loads and sets default parameters defined in DefaultConfig.py """
        # Set default intrinsics
        CameraUtility.set_intrinsics_from_blender_params(
            DefaultConfig.fov, DefaultConfig.resolution_x,
            DefaultConfig.resolution_y, DefaultConfig.clip_start,
            DefaultConfig.clip_end, DefaultConfig.pixel_aspect_x,
            DefaultConfig.pixel_aspect_y, DefaultConfig.shift_x,
            DefaultConfig.shift_y, DefaultConfig.lens_unit)
        CameraUtility.set_stereo_parameters(
            DefaultConfig.stereo_convergence_mode,
            DefaultConfig.stereo_convergence_distance,
            DefaultConfig.stereo_interocular_distance)

        # Init renderer
        RendererUtility._render_init()
        RendererUtility.set_samples(DefaultConfig.samples)
        addon_utils.enable("render_auto_tile_size")
        RendererUtility.toggle_auto_tile_size(True)

        # Set number of cpu cores used for rendering (1 thread is always used for coordination => 1
        # cpu thread means GPU-only rendering)
        RendererUtility.set_cpu_threads(0)
        RendererUtility.set_denoiser(DefaultConfig.denoiser)

        RendererUtility.set_simplify_subdivision_render(
            DefaultConfig.simplify_subdivision_render)

        RendererUtility.set_light_bounces(
            DefaultConfig.diffuse_bounces, DefaultConfig.glossy_bounces,
            DefaultConfig.ao_bounces_render, DefaultConfig.max_bounces,
            DefaultConfig.transmission_bounces,
            DefaultConfig.transparency_bounces, DefaultConfig.volume_bounces)

        RendererUtility.set_output_format(DefaultConfig.file_format,
                                          DefaultConfig.color_depth,
                                          DefaultConfig.enable_transparency,
                                          DefaultConfig.jpg_quality)
Пример #5
0
    def sample_and_validate_cam_pose(self, config: Config,
                                     existing_poses: List[np.ndarray]) -> bool:
        """ Samples a new camera pose, sets the parameters of the given camera object accordingly and validates it.

        :param config: The config object describing how to sample
        :param existing_poses: A list of already sampled valid poses.
        :return: True, if the sampled pose was valid
        """
        # Sample camera extrinsics (we do not set them yet for performance reasons)
        cam2world_matrix = self._sample_pose(config)

        if self._is_pose_valid(cam2world_matrix, existing_poses):
            # Set camera extrinsics as the pose is valid
            frame = CameraUtility.add_camera_pose(cam2world_matrix)
            # Optional callback
            self._on_new_pose_added(cam2world_matrix, frame)
            # Add to the list of added cam poses
            existing_poses.append(cam2world_matrix)
            return True
        else:
            return False
def enable_normals_output(output_dir: Optional[str] = None, file_prefix: str = "normals_",
                          output_key: str = "normals"):
    """ Enables writing normal images.

    Normal images will be written in the form of .exr files during the next rendering.

    :param output_dir: The directory to write files to, if this is None the temporary directory is used.
    :param file_prefix: The prefix to use for writing the files.
    :param output_key: The key to use for registering the normal output.
    """
    if output_dir is None:
        output_dir = Utility.get_temporary_directory()

    bpy.context.scene.render.use_compositing = True
    bpy.context.scene.use_nodes = True
    tree = bpy.context.scene.node_tree
    links = tree.links

    # Use existing render layer
    render_layer_node = Utility.get_the_one_node_with_type(tree.nodes, 'CompositorNodeRLayers')

    separate_rgba = tree.nodes.new("CompositorNodeSepRGBA")
    space_between_nodes_x = 200
    space_between_nodes_y = -300
    separate_rgba.location.x = space_between_nodes_x
    separate_rgba.location.y = space_between_nodes_y
    links.new(render_layer_node.outputs["Normal"], separate_rgba.inputs["Image"])

    combine_rgba = tree.nodes.new("CompositorNodeCombRGBA")
    combine_rgba.location.x = space_between_nodes_x * 14

    c_channels = ["R", "G", "B"]
    offset = space_between_nodes_x * 2
    multiplication_values: List[List[bpy.types.Node]] = [[], [], []]
    channel_results = {}
    for row_index, channel in enumerate(c_channels):
        # matrix multiplication
        mulitpliers = []
        for column in range(3):
            multiply = tree.nodes.new("CompositorNodeMath")
            multiply.operation = "MULTIPLY"
            multiply.inputs[1].default_value = 0  # setting at the end for all frames
            multiply.location.x = column * space_between_nodes_x + offset
            multiply.location.y = row_index * space_between_nodes_y
            links.new(separate_rgba.outputs[c_channels[column]], multiply.inputs[0])
            mulitpliers.append(multiply)
            multiplication_values[row_index].append(multiply)

        first_add = tree.nodes.new("CompositorNodeMath")
        first_add.operation = "ADD"
        first_add.location.x = space_between_nodes_x * 5 + offset
        first_add.location.y = row_index * space_between_nodes_y
        links.new(mulitpliers[0].outputs["Value"], first_add.inputs[0])
        links.new(mulitpliers[1].outputs["Value"], first_add.inputs[1])

        second_add = tree.nodes.new("CompositorNodeMath")
        second_add.operation = "ADD"
        second_add.location.x = space_between_nodes_x * 6 + offset
        second_add.location.y = row_index * space_between_nodes_y
        links.new(first_add.outputs["Value"], second_add.inputs[0])
        links.new(mulitpliers[2].outputs["Value"], second_add.inputs[1])

        channel_results[channel] = second_add

    # set the matrix accordingly
    rot_around_x_axis = mathutils.Matrix.Rotation(math.radians(-90.0), 4, 'X')
    for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end):
        used_rotation_matrix = CameraUtility.get_camera_pose(frame) @ rot_around_x_axis
        for row_index in range(3):
            for column_index in range(3):
                current_multiply = multiplication_values[row_index][column_index]
                current_multiply.inputs[1].default_value = used_rotation_matrix[column_index][row_index]
                current_multiply.inputs[1].keyframe_insert(data_path='default_value', frame=frame)
    offset = 8 * space_between_nodes_x
    for index, channel in enumerate(c_channels):
        multiply = tree.nodes.new("CompositorNodeMath")
        multiply.operation = "MULTIPLY"
        multiply.location.x = space_between_nodes_x * 2 + offset
        multiply.location.y = index * space_between_nodes_y
        links.new(channel_results[channel].outputs["Value"], multiply.inputs[0])
        if channel == "G":
            multiply.inputs[1].default_value = -0.5
        else:
            multiply.inputs[1].default_value = 0.5
        add = tree.nodes.new("CompositorNodeMath")
        add.operation = "ADD"
        add.location.x = space_between_nodes_x * 3 + offset
        add.location.y = index * space_between_nodes_y
        links.new(multiply.outputs["Value"], add.inputs[0])
        add.inputs[1].default_value = 0.5
        output_channel = channel
        if channel == "G":
            output_channel = "B"
        elif channel == "B":
            output_channel = "G"
        links.new(add.outputs["Value"], combine_rgba.inputs[output_channel])

    output_file = tree.nodes.new("CompositorNodeOutputFile")
    output_file.base_path = output_dir
    output_file.format.file_format = "OPEN_EXR"
    output_file.file_slots.values()[0].path = file_prefix
    output_file.location.x = space_between_nodes_x * 15
    links.new(combine_rgba.outputs["Image"], output_file.inputs["Image"])

    Utility.add_output_entry({
        "key": output_key,
        "path": os.path.join(output_dir, file_prefix) + "%04d" + ".exr",
        "version": "2.0.0"
    })
Пример #7
0
def load_bop(bop_dataset_path: str, sys_paths: Union[List[str], str], temp_dir: str = None, model_type: str = "", cam_type: str = "", split: str = "test", scene_id: int = -1, obj_ids: list = [], sample_objects: bool = False, num_of_objs_to_sample: int = None, obj_instances_limit: int = -1, move_origin_to_x_y_plane: bool = False, source_frame: list = ["X", "-Y", "-Z"], mm2m: bool = False) -> List[MeshObject]:
    """ Loads the 3D models of any BOP dataset and allows replicating BOP scenes

    - Interfaces with the bob_toolkit, allows loading of train, val and test splits
    - Relative camera poses are loaded/computed with respect to a reference model
    - Sets real camera intrinsics

    :param bop_dataset_path: Full path to a specific bop dataset e.g. /home/user/bop/tless.
    :param sys_paths: System paths to append. Can be a string or a list of strings.
    :param temp_dir: A temp directory which is used for writing the temporary .ply file.
    :param model_type: Optionally, specify type of BOP model.  Available: [reconst, cad or eval].
    :param cam_type: Camera type. If not defined, dataset-specific default camera type is used.
    :param split: Optionally, test or val split depending on BOP dataset.
    :param scene_id: Optionally, specify BOP dataset scene to synthetically replicate. Default: -1 (no scene is replicated,
                     only BOP Objects are loaded).
    :param obj_ids: List of object ids to load. Default: [] (all objects from the given BOP dataset if scene_id is not
                    specified).
    :param sample_objects: Toggles object sampling from the specified dataset.
    :param num_of_objs_to_sample: Amount of objects to sample from the specified dataset. If this amount is bigger than the dataset
                                  actually contains, then all objects will be loaded.
    :param obj_instances_limit: Limits the amount of object copies when sampling. Default: -1 (no limit).
    :param move_origin_to_x_y_plane: Move center of the object to the lower side of the object, this will not work when used in combination with
                                     pose estimation tasks! This is designed for the use-case where BOP objects are used as filler objects in
                                     the background.
    :param source_frame: Can be used if the given positions and rotations are specified in frames different from the blender
                        frame. Has to be a list of three strings. Example: ['X', '-Z', 'Y']: Point (1,2,3) will be transformed
                        to (1, -3, 2). Available: ['X', 'Y', 'Z', '-X', '-Y', '-Z'].
    :param mm2m: Specify whether to convert poses and models to meters.
    :return: The list of loaded mesh objects.
    """
    # Make sure sys_paths is a list
    if not isinstance(sys_paths, list):
        sys_paths = [sys_paths]

    for sys_path in sys_paths:
        if 'bop_toolkit' in sys_path:
            sys.path.append(sys_path)

    if temp_dir is None:
        temp_dir = Utility.get_temporary_directory()

    scale = 0.001 if mm2m else 1
    bop_dataset_name = os.path.basename(bop_dataset_path)
    has_external_texture = bop_dataset_name in ["ycbv", "ruapc"]
    if obj_ids or sample_objects:
        allow_duplication = True
    else:
        allow_duplication = False

    datasets_path = os.path.dirname(bop_dataset_path)
    dataset = os.path.basename(bop_dataset_path)

    print("bob: {}, dataset_path: {}".format(bop_dataset_path, datasets_path))
    print("dataset: {}".format(dataset))

    try:
        from bop_toolkit_lib import dataset_params, inout
    except ImportError as error:
        print('ERROR: Please download the bop_toolkit package and add it to sys_paths in config!')
        print('https://github.com/thodan/bop_toolkit')
        raise error

    model_p = dataset_params.get_model_params(datasets_path, dataset, model_type=model_type if model_type else None)
    cam_p = dataset_params.get_camera_params(datasets_path, dataset, cam_type=cam_type if cam_type else None)

    try:
        split_p = dataset_params.get_split_params(datasets_path, dataset, split=split)
    except ValueError:
        raise Exception("Wrong path or {} split does not exist in {}.".format(split, dataset))

    bpy.context.scene.render.resolution_x = cam_p['im_size'][0]
    bpy.context.scene.render.resolution_y = cam_p['im_size'][1]

    loaded_objects = []

    # only load all/selected objects here, use other modules for setting poses
    # e.g. camera.CameraSampler / object.ObjectPoseSampler
    if scene_id == -1:

        # TLESS exception because images are cropped
        if bop_dataset_name in ['tless']:
            cam_p['K'][0, 2] = split_p['im_size'][0] / 2
            cam_p['K'][1, 2] = split_p['im_size'][1] / 2

        # set camera intrinsics
        CameraUtility.set_intrinsics_from_K_matrix(cam_p['K'], split_p['im_size'][0], split_p['im_size'][1])

        obj_ids = obj_ids if obj_ids else model_p['obj_ids']
        # if sampling is enabled
        if sample_objects:
            loaded_ids = {}
            loaded_amount = 0
            if obj_instances_limit != -1 and len(obj_ids) * obj_instances_limit < num_of_objs_to_sample:
                raise RuntimeError("{}'s {} split contains {} objects, {} object where requested to sample with "
                                   "an instances limit of {}. Raise the limit amount or decrease the requested "
                                   "amount of objects.".format(bop_dataset_path, split, len(obj_ids),
                                                               num_of_objs_to_sample,
                                                               obj_instances_limit))
            while loaded_amount != num_of_objs_to_sample:
                random_id = choice(obj_ids)
                if random_id not in loaded_ids.keys():
                    loaded_ids.update({random_id: 0})
                # if there is no limit or if there is one, but it is not reached for this particular object
                if obj_instances_limit == -1 or loaded_ids[random_id] < obj_instances_limit:
                    cur_obj = BopLoader._load_mesh(random_id, model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale)
                    loaded_ids[random_id] += 1
                    loaded_amount += 1
                    loaded_objects.append(cur_obj)
                else:
                    print("ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
                          "being requested".format(random_id, loaded_ids[random_id], obj_instances_limit,
                                                   loaded_amount, num_of_objs_to_sample))
        else:
            for obj_id in obj_ids:
                cur_obj = BopLoader._load_mesh(obj_id, model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale)
                loaded_objects.append(cur_obj)

    # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
    else:
        sc_gt = inout.load_scene_gt(split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
        sc_camera = inout.load_json(split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))
        for i, (cam_id, insts) in enumerate(sc_gt.items()):
            cam_K, cam_H_m2c_ref = BopLoader._get_ref_cam_extrinsics_intrinsics(sc_camera, cam_id, insts, scale)

            if i == 0:
                # define world = first camera
                cam_H_m2w_ref = cam_H_m2c_ref.copy()

                cur_objs = []
                # load scene objects and set their poses
                for inst in insts:
                    cur_objs.append(BopLoader._load_mesh(inst['obj_id'], model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale))
                    BopLoader.set_object_pose(cur_objs[-1], inst, scale)

            cam_H_c2w = BopLoader._compute_camera_to_world_trafo(cam_H_m2w_ref, cam_H_m2c_ref, source_frame)
            # set camera intrinsics
            CameraUtility.set_intrinsics_from_K_matrix(cam_K, split_p['im_size'][0], split_p['im_size'][1])

            # set camera extrinsics as next frame
            frame_id = CameraUtility.add_camera_pose(cam_H_c2w)

            # Add key frame for camera shift, as it changes from frame to frame in the tless replication
            cam = bpy.context.scene.camera.data
            cam.keyframe_insert(data_path='shift_x', frame=frame_id)
            cam.keyframe_insert(data_path='shift_y', frame=frame_id)

            # Copy object poses to key frame (to be sure)
            for cur_obj in cur_objs:
                BopLoader._insert_key_frames(cur_obj, frame_id)

    # move the origin of the object to the world origin and on top of the X-Y plane
    # makes it easier to place them later on, this does not change the `.location`
    # This is only useful if the BOP objects are not used in a pose estimation scenario.
    if move_origin_to_x_y_plane:
        for obj in loaded_objects:
            obj.move_origin_to_bottom_mean_point()

    return loaded_objects
    def _set_cam_intrinsics(self, cam, config):
        """ Sets camera intrinsics from a source with following priority

           1. from config function parameter if defined
           2. from custom properties of cam if set in Loader
           3. default config:
                resolution_x/y: 512
                pixel_aspect_x: 1
                clip_start:   : 0.1
                clip_end      : 1000
                fov           : 0.691111

        :param cam: The camera which contains only camera specific attributes.
        :param config: A configuration object with cam intrinsics.
        """
        if config.is_empty():
            return

        width = config.get_int("resolution_x",
                               bpy.context.scene.render.resolution_x)
        height = config.get_int("resolution_y",
                                bpy.context.scene.render.resolution_y)

        # Clipping
        clip_start = config.get_float("clip_start", cam.clip_start)
        clip_end = config.get_float("clip_end", cam.clip_end)

        if config.has_param("cam_K"):
            if config.has_param("fov"):
                print(
                    'WARNING: FOV defined in config is ignored. Mutually exclusive with cam_K'
                )
            if config.has_param("pixel_aspect_x"):
                print(
                    'WARNING: pixel_aspect_x defined in config is ignored. Mutually exclusive with cam_K'
                )

            cam_K = np.array(config.get_list("cam_K")).reshape(3, 3).astype(
                np.float32)

            CameraUtility.set_intrinsics_from_K_matrix(cam_K, width, height,
                                                       clip_start, clip_end)
        else:
            # Set FOV
            fov = config.get_float("fov", cam.angle)

            # Set Pixel Aspect Ratio
            pixel_aspect_x = config.get_float(
                "pixel_aspect_x", bpy.context.scene.render.pixel_aspect_x)
            pixel_aspect_y = config.get_float(
                "pixel_aspect_y", bpy.context.scene.render.pixel_aspect_y)

            # Set camera shift
            shift_x = config.get_float("shift_x", cam.shift_x)
            shift_y = config.get_float("shift_y", cam.shift_y)

            CameraUtility.set_intrinsics_from_blender_params(fov,
                                                             width,
                                                             height,
                                                             clip_start,
                                                             clip_end,
                                                             pixel_aspect_x,
                                                             pixel_aspect_y,
                                                             shift_x,
                                                             shift_y,
                                                             lens_unit="FOV")

        CameraUtility.set_stereo_parameters(
            config.get_string("stereo_convergence_mode",
                              cam.stereo.convergence_mode),
            config.get_float("convergence_distance",
                             cam.stereo.convergence_distance),
            config.get_float("interocular_distance",
                             cam.stereo.interocular_distance))
        if config.has_param("depth_of_field"):
            depth_of_field_config = Config(
                config.get_raw_dict("depth_of_field"))
            fstop_value = depth_of_field_config.get_float("fstop", 2.4)
            aperture_blades = depth_of_field_config.get_int(
                "aperture_blades", 0)
            aperture_ratio = depth_of_field_config.get_float(
                "aperture_ratio", 1.0)
            aperture_rotation = depth_of_field_config.get_float(
                "aperture_rotation_in_rad", 0.0)
            if depth_of_field_config.has_param(
                    "depth_of_field_dist") and depth_of_field_config.has_param(
                        "focal_object"):
                raise RuntimeError(
                    "You can only use either depth_of_field_dist or a focal_object but not both!"
                )
            if depth_of_field_config.has_param("depth_of_field_dist"):
                depth_of_field_dist = depth_of_field_config.get_float(
                    "depth_of_field_dist")
                CameraUtility.add_depth_of_field(cam, None, fstop_value,
                                                 aperture_blades,
                                                 aperture_rotation,
                                                 aperture_ratio,
                                                 depth_of_field_dist)
            elif depth_of_field_config.has_param("focal_object"):
                focal_object = depth_of_field_config.get_list("focal_object")
                if len(focal_object) != 1:
                    raise RuntimeError(
                        f"There has to be exactly one focal object, use 'random_samples: 1' or change "
                        f"the selector. Found {len(focal_object)}.")
                CameraUtility.add_depth_of_field(Entity(focal_object[0]),
                                                 fstop_value, aperture_blades,
                                                 aperture_rotation,
                                                 aperture_ratio)
            else:
                raise RuntimeError(
                    "The depth_of_field dict must contain either a focal_object definition or "
                    "a depth_of_field_dist")