コード例 #1
0
def enable_diffuse_color_output(output_dir: Optional[str] = None, file_prefix: str = "diffuse_",
                                output_key: str = "diffuse"):
    """ Enables writing diffuse color (albedo) images.

    Diffuse color images will be written in the form of .png files during the next rendering.

    :param output_dir: The directory to write files to, if this is None the temporary directory is used.
    :param file_prefix: The prefix to use for writing the files.
    :param output_key: The key to use for registering the diffuse color output.
    """
    if output_dir is None:
        output_dir = Utility.get_temporary_directory()

    bpy.context.scene.render.use_compositing = True
    bpy.context.scene.use_nodes = True
    tree = bpy.context.scene.node_tree
    links = tree.links

    bpy.context.view_layer.use_pass_diffuse_color = True
    render_layer_node = Utility.get_the_one_node_with_type(tree.nodes, 'CompositorNodeRLayers')
    final_output = render_layer_node.outputs["DiffCol"]

    output_file = tree.nodes.new('CompositorNodeOutputFile')
    output_file.base_path = output_dir
    output_file.format.file_format = "PNG"
    output_file.file_slots.values()[0].path = file_prefix
    links.new(final_output, output_file.inputs['Image'])

    Utility.add_output_entry({
        "key": output_key,
        "path": os.path.join(output_dir, file_prefix) + "%04d" + ".png",
        "version": "2.0.0"
    })
コード例 #2
0
    def build_convex_decomposition_collision_shape(
            self,
            vhacd_path: str,
            temp_dir: str = None,
            cache_dir: str = "blenderproc_resources/decomposition_cache"):
        """ Builds a collision shape of the object by decomposing it into near convex parts using V-HACD

        :param vhacd_path: The directory in which vhacd should be installed or is already installed.
        :param temp_dir: The temp dir to use for storing the object files created by v-hacd.
        :param cache_dir: If a directory is given, convex decompositions are stored there named after the meshes hash. If the same mesh is decomposed a second time, the result is loaded from the cache and the actual decomposition is skipped.
        """
        if platform == "win32":
            raise Exception("This is currently not supported under Windows")

        if temp_dir is None:
            temp_dir = Utility.get_temporary_directory()

        # Decompose the object
        parts = convex_decomposition(self,
                                     temp_dir,
                                     resolve_path(vhacd_path),
                                     cache_dir=resolve_path(cache_dir))
        parts = [MeshObject(p) for p in parts]

        # Make the convex parts children of this object, enable their rigid body component and hide them
        for part in parts:
            part.set_parent(self)
            part.enable_rigidbody(True, "CONVEX_HULL")
            part.hide()
コード例 #3
0
def enable_distance_output(output_dir: Optional[str] = None, file_prefix: str = "distance_",
                           output_key: str = "distance", distance_start: float = 0.1, distance_range: float = 25.0,
                           distance_falloff: str = "LINEAR"):
    """ Enables writing distance images.

    Distance images will be written in the form of .exr files during the next rendering.

    :param output_dir: The directory to write files to, if this is None the temporary directory is used.
    :param file_prefix: The prefix to use for writing the files.
    :param output_key: The key to use for registering the distance output.
    :param distance_start: Starting distance of the distance, measured from the camera.
    :param distance_range: Total distance in which the distance is measured. \
                           distance_end = distance_start + distance_range.
    :param distance_falloff: Type of transition used to fade distance. Available: [LINEAR, QUADRATIC, INVERSE_QUADRATIC]
    """
    if output_dir is None:
        output_dir = Utility.get_temporary_directory()

    bpy.context.scene.render.use_compositing = True
    bpy.context.scene.use_nodes = True
    GlobalStorage.add("renderer_distance_end", distance_start + distance_range)

    tree = bpy.context.scene.node_tree
    links = tree.links
    # Use existing render layer
    render_layer_node = Utility.get_the_one_node_with_type(tree.nodes, 'CompositorNodeRLayers')

    # Set mist pass limits
    bpy.context.scene.world.mist_settings.start = distance_start
    bpy.context.scene.world.mist_settings.depth = distance_range
    bpy.context.scene.world.mist_settings.falloff = distance_falloff

    bpy.context.view_layer.use_pass_mist = True  # Enable distance pass
    # Create a mapper node to map from 0-1 to SI units
    mapper_node = tree.nodes.new("CompositorNodeMapRange")
    links.new(render_layer_node.outputs["Mist"], mapper_node.inputs['Value'])
    # map the values 0-1 to range distance_start to distance_range
    mapper_node.inputs['To Min'].default_value = distance_start
    mapper_node.inputs['To Max'].default_value = distance_start + distance_range
    final_output = mapper_node.outputs['Value']

    # Build output node
    output_file = tree.nodes.new("CompositorNodeOutputFile")
    output_file.base_path = output_dir
    output_file.format.file_format = "OPEN_EXR"
    output_file.file_slots.values()[0].path = file_prefix

    # Feed the Z-Buffer or Mist output of the render layer to the input of the file IO layer
    links.new(final_output, output_file.inputs['Image'])

    Utility.add_output_entry({
        "key": output_key,
        "path": os.path.join(output_dir, file_prefix) + "%04d" + ".exr",
        "version": "2.0.0",
        "trim_redundant_channels": True
    })
コード例 #4
0
    def _default_init(self):
        """
        These operations are called during all modules inits
        """
        self._output_dir = resolve_path(
            self.config.get_string("output_dir", ""))
        os.makedirs(self._output_dir, exist_ok=True)

        self._temp_dir = Utility.get_temporary_directory()

        self._avoid_output = self.config.get_bool("avoid_output", False)
コード例 #5
0
def render(output_dir: Optional[str] = None, file_prefix: str = "rgb_", output_key: Optional[str] = "colors",
           load_keys: Optional[Set[str]] = None, return_data: bool = True,
           keys_with_alpha_channel: Optional[Set[str]] = None) -> Dict[str, Union[np.ndarray, List[np.ndarray]]]:
    """ Render all frames.

    This will go through all frames from scene.frame_start to scene.frame_end and render each of them.

    :param output_dir: The directory to write files to, if this is None the temporary directory is used. \
                       The temporary directory is usually in the shared memory (only true for linux).
    :param file_prefix: The prefix to use for writing the images.
    :param output_key: The key to use for registering the output.
    :param load_keys: Set of output keys to load when available
    :param return_data: Whether to load and return generated data. Backwards compatibility to config-based pipeline.
    :param keys_with_alpha_channel: A set containing all keys whose alpha channels should be loaded.
    :return: dict of lists of raw renderer output. Keys can be 'distance', 'colors', 'normals'
    """
    if output_dir is None:
        output_dir = Utility.get_temporary_directory()
    if load_keys is None:
        load_keys = {'colors', 'distance', 'depth', 'normals', 'diffuse'}
        keys_with_alpha_channel = {'colors'} if bpy.context.scene.render.film_transparent else None

    if output_key is not None:
        Utility.add_output_entry({
            "key": output_key,
            "path": os.path.join(output_dir, file_prefix) + "%04d" +
                    map_file_format_to_file_ending(bpy.context.scene.render.image_settings.file_format),
            "version": "2.0.0"
        })
        load_keys.add(output_key)

    bpy.context.scene.render.filepath = os.path.join(output_dir, file_prefix)

    # Skip if there is nothing to render
    if bpy.context.scene.frame_end != bpy.context.scene.frame_start:
        if len(get_all_blender_mesh_objects()) == 0:
            raise Exception("There are no mesh-objects to render, "
                            "please load an object before invoking the renderer.")
        # As frame_end is pointing to the next free frame, decrease it by one, as
        # blender will render all frames in [frame_start, frame_ned]
        bpy.context.scene.frame_end -= 1
        bpy.ops.render.render(animation=True, write_still=True)
        # Revert changes
        bpy.context.scene.frame_end += 1

    return WriterUtility.load_registered_outputs(load_keys, keys_with_alpha_channel) if return_data else {}
コード例 #6
0
def enable_depth_output(output_dir: Optional[str] = None, file_prefix: str = "depth_", output_key: str = "depth"):
    """ Enables writing depth images.

    Depth images will be written in the form of .exr files during the next rendering.

    :param output_dir: The directory to write files to.
    :param file_prefix: The prefix to use for writing the files.
    :param output_key: The key to use for registering the depth output.
    """
    if output_dir is None:
        output_dir = Utility.get_temporary_directory()

    bpy.context.scene.render.use_compositing = True
    bpy.context.scene.use_nodes = True

    tree = bpy.context.scene.node_tree
    links = tree.links
    # Use existing render layer
    render_layer_node = Utility.get_the_one_node_with_type(tree.nodes, 'CompositorNodeRLayers')

    # Enable z-buffer pass
    bpy.context.view_layer.use_pass_z = True

    # Build output node
    output_file = tree.nodes.new("CompositorNodeOutputFile")
    output_file.base_path = output_dir
    output_file.format.file_format = "OPEN_EXR"
    output_file.file_slots.values()[0].path = file_prefix

    # Feed the Z-Buffer output of the render layer to the input of the file IO layer
    links.new(render_layer_node.outputs["Depth"], output_file.inputs['Image'])

    Utility.add_output_entry({
        "key": output_key,
        "path": os.path.join(output_dir, file_prefix) + "%04d" + ".exr",
        "version": "2.0.0",
        "trim_redundant_channels": True
    })
コード例 #7
0
def enable_normals_output(output_dir: Optional[str] = None, file_prefix: str = "normals_",
                          output_key: str = "normals"):
    """ Enables writing normal images.

    Normal images will be written in the form of .exr files during the next rendering.

    :param output_dir: The directory to write files to, if this is None the temporary directory is used.
    :param file_prefix: The prefix to use for writing the files.
    :param output_key: The key to use for registering the normal output.
    """
    if output_dir is None:
        output_dir = Utility.get_temporary_directory()

    bpy.context.scene.render.use_compositing = True
    bpy.context.scene.use_nodes = True
    tree = bpy.context.scene.node_tree
    links = tree.links

    # Use existing render layer
    render_layer_node = Utility.get_the_one_node_with_type(tree.nodes, 'CompositorNodeRLayers')

    separate_rgba = tree.nodes.new("CompositorNodeSepRGBA")
    space_between_nodes_x = 200
    space_between_nodes_y = -300
    separate_rgba.location.x = space_between_nodes_x
    separate_rgba.location.y = space_between_nodes_y
    links.new(render_layer_node.outputs["Normal"], separate_rgba.inputs["Image"])

    combine_rgba = tree.nodes.new("CompositorNodeCombRGBA")
    combine_rgba.location.x = space_between_nodes_x * 14

    c_channels = ["R", "G", "B"]
    offset = space_between_nodes_x * 2
    multiplication_values: List[List[bpy.types.Node]] = [[], [], []]
    channel_results = {}
    for row_index, channel in enumerate(c_channels):
        # matrix multiplication
        mulitpliers = []
        for column in range(3):
            multiply = tree.nodes.new("CompositorNodeMath")
            multiply.operation = "MULTIPLY"
            multiply.inputs[1].default_value = 0  # setting at the end for all frames
            multiply.location.x = column * space_between_nodes_x + offset
            multiply.location.y = row_index * space_between_nodes_y
            links.new(separate_rgba.outputs[c_channels[column]], multiply.inputs[0])
            mulitpliers.append(multiply)
            multiplication_values[row_index].append(multiply)

        first_add = tree.nodes.new("CompositorNodeMath")
        first_add.operation = "ADD"
        first_add.location.x = space_between_nodes_x * 5 + offset
        first_add.location.y = row_index * space_between_nodes_y
        links.new(mulitpliers[0].outputs["Value"], first_add.inputs[0])
        links.new(mulitpliers[1].outputs["Value"], first_add.inputs[1])

        second_add = tree.nodes.new("CompositorNodeMath")
        second_add.operation = "ADD"
        second_add.location.x = space_between_nodes_x * 6 + offset
        second_add.location.y = row_index * space_between_nodes_y
        links.new(first_add.outputs["Value"], second_add.inputs[0])
        links.new(mulitpliers[2].outputs["Value"], second_add.inputs[1])

        channel_results[channel] = second_add

    # set the matrix accordingly
    rot_around_x_axis = mathutils.Matrix.Rotation(math.radians(-90.0), 4, 'X')
    for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end):
        used_rotation_matrix = CameraUtility.get_camera_pose(frame) @ rot_around_x_axis
        for row_index in range(3):
            for column_index in range(3):
                current_multiply = multiplication_values[row_index][column_index]
                current_multiply.inputs[1].default_value = used_rotation_matrix[column_index][row_index]
                current_multiply.inputs[1].keyframe_insert(data_path='default_value', frame=frame)
    offset = 8 * space_between_nodes_x
    for index, channel in enumerate(c_channels):
        multiply = tree.nodes.new("CompositorNodeMath")
        multiply.operation = "MULTIPLY"
        multiply.location.x = space_between_nodes_x * 2 + offset
        multiply.location.y = index * space_between_nodes_y
        links.new(channel_results[channel].outputs["Value"], multiply.inputs[0])
        if channel == "G":
            multiply.inputs[1].default_value = -0.5
        else:
            multiply.inputs[1].default_value = 0.5
        add = tree.nodes.new("CompositorNodeMath")
        add.operation = "ADD"
        add.location.x = space_between_nodes_x * 3 + offset
        add.location.y = index * space_between_nodes_y
        links.new(multiply.outputs["Value"], add.inputs[0])
        add.inputs[1].default_value = 0.5
        output_channel = channel
        if channel == "G":
            output_channel = "B"
        elif channel == "B":
            output_channel = "G"
        links.new(add.outputs["Value"], combine_rgba.inputs[output_channel])

    output_file = tree.nodes.new("CompositorNodeOutputFile")
    output_file.base_path = output_dir
    output_file.format.file_format = "OPEN_EXR"
    output_file.file_slots.values()[0].path = file_prefix
    output_file.location.x = space_between_nodes_x * 15
    links.new(combine_rgba.outputs["Image"], output_file.inputs["Image"])

    Utility.add_output_entry({
        "key": output_key,
        "path": os.path.join(output_dir, file_prefix) + "%04d" + ".exr",
        "version": "2.0.0"
    })
コード例 #8
0
def load_AMASS(data_path: str,
               sub_dataset_id: str,
               temp_dir: str = None,
               body_model_gender: str = None,
               subject_id: str = "",
               sequence_id: int = -1,
               frame_id: int = -1,
               num_betas: int = 10,
               num_dmpls: int = 8) -> List[MeshObject]:
    """
    use the pose parameters to generate the mesh and loads it to the scene.

    :param data_path: The path to the AMASS Dataset folder in resources folder.
    :param sub_dataset_id: Identifier for the sub dataset, the dataset which the human pose object should be extracted from.
                                Available: ['CMU', 'Transitions_mocap', 'MPI_Limits', 'SSM_synced', 'TotalCapture',
                                'Eyes_Japan_Dataset', 'MPI_mosh', 'MPI_HDM05', 'HumanEva', 'ACCAD', 'EKUT', 'SFU', 'KIT', 'H36M', 'TCD_handMocap', 'BML']
    :param temp_dir: A temp directory which is used for writing the temporary .obj file.
    :param body_model_gender: The model gender, pose will represented using male, female or neutral body shape.
                                   Available:[male, female, neutral]. If None is selected a random one is choosen.
    :param subject_id: Type of motion from which the pose should be extracted, this is dataset dependent parameter.
                            If left empty a random subject id is picked.
    :param sequence_id: Sequence id in the dataset, sequences are the motion recorded to represent certain action.
                             If set to -1 a random sequence id is selected.
    :param frame_id: Frame id in a selected motion sequence. If none is selected a random one is picked
    :param num_betas: Number of body parameters
    :param num_dmpls: Number of DMPL parameters
    :return: The list of loaded mesh objects.
    """
    if body_model_gender is None:
        body_model_gender = random.choice(["male", "female", "neutral"])
    if temp_dir is None:
        temp_dir = Utility.get_temporary_directory()

    # Install required additonal packages
    SetupUtility.setup_pip([
        "git+https://github.com/abahnasy/smplx",
        "git+https://github.com/abahnasy/human_body_prior"
    ])

    # Get the currently supported mocap datasets by this loader
    taxonomy_file_path = os.path.join(data_path, "taxonomy.json")
    supported_mocap_datasets = AMASSLoader._get_supported_mocap_datasets(
        taxonomy_file_path, data_path)

    # selected_obj = self._files_with_fitting_ids
    pose_body, betas = AMASSLoader._get_pose_parameters(
        supported_mocap_datasets, num_betas, sub_dataset_id, subject_id,
        sequence_id, frame_id)
    # load parametric Model
    body_model, faces = AMASSLoader._load_parametric_body_model(
        data_path, body_model_gender, num_betas, num_dmpls)
    # Generate Body representations using SMPL model
    body_repr = body_model(pose_body=pose_body, betas=betas)
    # Generate .obj file represents the selected pose
    generated_obj = AMASSLoader._write_body_mesh_to_obj_file(
        body_repr, faces, temp_dir)

    loaded_obj = load_obj(generated_obj)

    AMASSLoader._correct_materials(loaded_obj)

    # set the shading mode explicitly to smooth
    for obj in loaded_obj:
        obj.set_shading_mode("SMOOTH")

    # removes the x axis rotation found in all ShapeNet objects, this is caused by importing .obj files
    # the object has the same pose as before, just that the rotation_euler is now [0, 0, 0]
    for obj in loaded_obj:
        obj.persist_transformation_into_mesh(location=False,
                                             rotation=True,
                                             scale=False)

    # move the origin of the object to the world origin and on top of the X-Y plane
    # makes it easier to place them later on, this does not change the `.location`
    for obj in loaded_obj:
        obj.move_origin_to_bottom_mean_point()
    bpy.ops.object.select_all(action='DESELECT')

    return loaded_obj
コード例 #9
0
def render_optical_flow(output_dir: str = None, temp_dir: str = None, get_forward_flow: bool = True,
                        get_backward_flow: bool = True, blender_image_coordinate_style: bool = False,
                        forward_flow_output_file_prefix: str = "forward_flow_",
                        forward_flow_output_key: str = "forward_flow",
                        backward_flow_output_file_prefix: str = "backward_flow_",
                        backward_flow_output_key: str = "backward_flow", return_data: bool = True) -> \
        Dict[str, Union[np.ndarray, List[np.ndarray]]]:
    """ Renders the optical flow (forward and backward) for all frames.

    :param output_dir: The directory to write images to.
    :param temp_dir: The directory to write intermediate data to.
    :param get_forward_flow: Whether to render forward optical flow.
    :param get_backward_flow: Whether to render backward optical flow.
    :param blender_image_coordinate_style: Whether to specify the image coordinate system at the bottom left (blender default; True) or top left (standard convention; False).
    :param forward_flow_output_file_prefix: The file prefix that should be used when writing forward flow to a file.
    :param forward_flow_output_key: The key which should be used for storing forward optical flow values.
    :param backward_flow_output_file_prefix: The file prefix that should be used when writing backward flow to a file.
    :param backward_flow_output_key: The key which should be used for storing backward optical flow values.
    :param return_data: Whether to load and return generated data. Backwards compatibility to config-based pipeline.
    :return: dict of lists of raw renderer outputs. Keys can be 'forward_flow', 'backward_flow'
    """
    if get_forward_flow is False and get_backward_flow is False:
        raise Exception(
            "Take the FlowRenderer Module out of the config if both forward and backward flow are set to False!"
        )

    if output_dir is None:
        output_dir = Utility.get_temporary_directory()
    if temp_dir is None:
        temp_dir = Utility.get_temporary_directory()

    with Utility.UndoAfterExecution():
        RendererUtility._render_init()
        RendererUtility.set_samples(1)
        RendererUtility.set_adaptive_sampling(0)
        RendererUtility.set_denoiser(None)
        RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)

        FlowRendererUtility._output_vector_field(get_forward_flow,
                                                 get_backward_flow, output_dir)

        # only need to render once; both fwd and bwd flow will be saved
        temporary_fwd_flow_file_path = os.path.join(temp_dir, 'fwd_flow_')
        temporary_bwd_flow_file_path = os.path.join(temp_dir, 'bwd_flow_')
        RendererUtility.render(temp_dir, "bwd_flow_", None, load_keys=set())

        # After rendering: convert to optical flow or calculate hsv visualization, if desired
        for frame in range(bpy.context.scene.frame_start,
                           bpy.context.scene.frame_end):
            # temporarily save respective vector fields
            if get_forward_flow:
                file_path = temporary_fwd_flow_file_path + "%04d" % frame + ".exr"
                fwd_flow_field = load_image(file_path,
                                            num_channels=4).astype(np.float32)

                if not blender_image_coordinate_style:
                    fwd_flow_field[:, :, 1] = fwd_flow_field[:, :, 1] * -1

                fname = os.path.join(
                    output_dir,
                    forward_flow_output_file_prefix) + '%04d' % frame
                forward_flow = fwd_flow_field * -1  # invert forward flow to point at next frame
                np.save(fname + '.npy', forward_flow[:, :, :2])

            if get_backward_flow:
                file_path = temporary_bwd_flow_file_path + "%04d" % frame + ".exr"
                bwd_flow_field = load_image(file_path,
                                            num_channels=4).astype(np.float32)

                if not blender_image_coordinate_style:
                    bwd_flow_field[:, :, 1] = bwd_flow_field[:, :, 1] * -1

                fname = os.path.join(
                    output_dir,
                    backward_flow_output_file_prefix) + '%04d' % frame
                np.save(fname + '.npy', bwd_flow_field[:, :, :2])

    load_keys = set()
    # register desired outputs
    if get_forward_flow:
        Utility.register_output(output_dir, forward_flow_output_file_prefix,
                                forward_flow_output_key, '.npy', '2.0.0')
        load_keys.add(forward_flow_output_key)
    if get_backward_flow:
        Utility.register_output(output_dir, backward_flow_output_file_prefix,
                                backward_flow_output_key, '.npy', '2.0.0')
        load_keys.add(backward_flow_output_key)

    return WriterUtility.load_registered_outputs(
        load_keys) if return_data else {}
コード例 #10
0
def load_bop(bop_dataset_path: str, sys_paths: Union[List[str], str], temp_dir: str = None, model_type: str = "", cam_type: str = "", split: str = "test", scene_id: int = -1, obj_ids: list = [], sample_objects: bool = False, num_of_objs_to_sample: int = None, obj_instances_limit: int = -1, move_origin_to_x_y_plane: bool = False, source_frame: list = ["X", "-Y", "-Z"], mm2m: bool = False) -> List[MeshObject]:
    """ Loads the 3D models of any BOP dataset and allows replicating BOP scenes

    - Interfaces with the bob_toolkit, allows loading of train, val and test splits
    - Relative camera poses are loaded/computed with respect to a reference model
    - Sets real camera intrinsics

    :param bop_dataset_path: Full path to a specific bop dataset e.g. /home/user/bop/tless.
    :param sys_paths: System paths to append. Can be a string or a list of strings.
    :param temp_dir: A temp directory which is used for writing the temporary .ply file.
    :param model_type: Optionally, specify type of BOP model.  Available: [reconst, cad or eval].
    :param cam_type: Camera type. If not defined, dataset-specific default camera type is used.
    :param split: Optionally, test or val split depending on BOP dataset.
    :param scene_id: Optionally, specify BOP dataset scene to synthetically replicate. Default: -1 (no scene is replicated,
                     only BOP Objects are loaded).
    :param obj_ids: List of object ids to load. Default: [] (all objects from the given BOP dataset if scene_id is not
                    specified).
    :param sample_objects: Toggles object sampling from the specified dataset.
    :param num_of_objs_to_sample: Amount of objects to sample from the specified dataset. If this amount is bigger than the dataset
                                  actually contains, then all objects will be loaded.
    :param obj_instances_limit: Limits the amount of object copies when sampling. Default: -1 (no limit).
    :param move_origin_to_x_y_plane: Move center of the object to the lower side of the object, this will not work when used in combination with
                                     pose estimation tasks! This is designed for the use-case where BOP objects are used as filler objects in
                                     the background.
    :param source_frame: Can be used if the given positions and rotations are specified in frames different from the blender
                        frame. Has to be a list of three strings. Example: ['X', '-Z', 'Y']: Point (1,2,3) will be transformed
                        to (1, -3, 2). Available: ['X', 'Y', 'Z', '-X', '-Y', '-Z'].
    :param mm2m: Specify whether to convert poses and models to meters.
    :return: The list of loaded mesh objects.
    """
    # Make sure sys_paths is a list
    if not isinstance(sys_paths, list):
        sys_paths = [sys_paths]

    for sys_path in sys_paths:
        if 'bop_toolkit' in sys_path:
            sys.path.append(sys_path)

    if temp_dir is None:
        temp_dir = Utility.get_temporary_directory()

    scale = 0.001 if mm2m else 1
    bop_dataset_name = os.path.basename(bop_dataset_path)
    has_external_texture = bop_dataset_name in ["ycbv", "ruapc"]
    if obj_ids or sample_objects:
        allow_duplication = True
    else:
        allow_duplication = False

    datasets_path = os.path.dirname(bop_dataset_path)
    dataset = os.path.basename(bop_dataset_path)

    print("bob: {}, dataset_path: {}".format(bop_dataset_path, datasets_path))
    print("dataset: {}".format(dataset))

    try:
        from bop_toolkit_lib import dataset_params, inout
    except ImportError as error:
        print('ERROR: Please download the bop_toolkit package and add it to sys_paths in config!')
        print('https://github.com/thodan/bop_toolkit')
        raise error

    model_p = dataset_params.get_model_params(datasets_path, dataset, model_type=model_type if model_type else None)
    cam_p = dataset_params.get_camera_params(datasets_path, dataset, cam_type=cam_type if cam_type else None)

    try:
        split_p = dataset_params.get_split_params(datasets_path, dataset, split=split)
    except ValueError:
        raise Exception("Wrong path or {} split does not exist in {}.".format(split, dataset))

    bpy.context.scene.render.resolution_x = cam_p['im_size'][0]
    bpy.context.scene.render.resolution_y = cam_p['im_size'][1]

    loaded_objects = []

    # only load all/selected objects here, use other modules for setting poses
    # e.g. camera.CameraSampler / object.ObjectPoseSampler
    if scene_id == -1:

        # TLESS exception because images are cropped
        if bop_dataset_name in ['tless']:
            cam_p['K'][0, 2] = split_p['im_size'][0] / 2
            cam_p['K'][1, 2] = split_p['im_size'][1] / 2

        # set camera intrinsics
        CameraUtility.set_intrinsics_from_K_matrix(cam_p['K'], split_p['im_size'][0], split_p['im_size'][1])

        obj_ids = obj_ids if obj_ids else model_p['obj_ids']
        # if sampling is enabled
        if sample_objects:
            loaded_ids = {}
            loaded_amount = 0
            if obj_instances_limit != -1 and len(obj_ids) * obj_instances_limit < num_of_objs_to_sample:
                raise RuntimeError("{}'s {} split contains {} objects, {} object where requested to sample with "
                                   "an instances limit of {}. Raise the limit amount or decrease the requested "
                                   "amount of objects.".format(bop_dataset_path, split, len(obj_ids),
                                                               num_of_objs_to_sample,
                                                               obj_instances_limit))
            while loaded_amount != num_of_objs_to_sample:
                random_id = choice(obj_ids)
                if random_id not in loaded_ids.keys():
                    loaded_ids.update({random_id: 0})
                # if there is no limit or if there is one, but it is not reached for this particular object
                if obj_instances_limit == -1 or loaded_ids[random_id] < obj_instances_limit:
                    cur_obj = BopLoader._load_mesh(random_id, model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale)
                    loaded_ids[random_id] += 1
                    loaded_amount += 1
                    loaded_objects.append(cur_obj)
                else:
                    print("ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are "
                          "being requested".format(random_id, loaded_ids[random_id], obj_instances_limit,
                                                   loaded_amount, num_of_objs_to_sample))
        else:
            for obj_id in obj_ids:
                cur_obj = BopLoader._load_mesh(obj_id, model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale)
                loaded_objects.append(cur_obj)

    # replicate scene: load scene objects, object poses, camera intrinsics and camera poses
    else:
        sc_gt = inout.load_scene_gt(split_p['scene_gt_tpath'].format(**{'scene_id': scene_id}))
        sc_camera = inout.load_json(split_p['scene_camera_tpath'].format(**{'scene_id': scene_id}))
        for i, (cam_id, insts) in enumerate(sc_gt.items()):
            cam_K, cam_H_m2c_ref = BopLoader._get_ref_cam_extrinsics_intrinsics(sc_camera, cam_id, insts, scale)

            if i == 0:
                # define world = first camera
                cam_H_m2w_ref = cam_H_m2c_ref.copy()

                cur_objs = []
                # load scene objects and set their poses
                for inst in insts:
                    cur_objs.append(BopLoader._load_mesh(inst['obj_id'], model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale))
                    BopLoader.set_object_pose(cur_objs[-1], inst, scale)

            cam_H_c2w = BopLoader._compute_camera_to_world_trafo(cam_H_m2w_ref, cam_H_m2c_ref, source_frame)
            # set camera intrinsics
            CameraUtility.set_intrinsics_from_K_matrix(cam_K, split_p['im_size'][0], split_p['im_size'][1])

            # set camera extrinsics as next frame
            frame_id = CameraUtility.add_camera_pose(cam_H_c2w)

            # Add key frame for camera shift, as it changes from frame to frame in the tless replication
            cam = bpy.context.scene.camera.data
            cam.keyframe_insert(data_path='shift_x', frame=frame_id)
            cam.keyframe_insert(data_path='shift_y', frame=frame_id)

            # Copy object poses to key frame (to be sure)
            for cur_obj in cur_objs:
                BopLoader._insert_key_frames(cur_obj, frame_id)

    # move the origin of the object to the world origin and on top of the X-Y plane
    # makes it easier to place them later on, this does not change the `.location`
    # This is only useful if the BOP objects are not used in a pose estimation scenario.
    if move_origin_to_x_y_plane:
        for obj in loaded_objects:
            obj.move_origin_to_bottom_mean_point()

    return loaded_objects
コード例 #11
0
def render_segmap(output_dir: Optional[str] = None, temp_dir: Optional[str] = None,
                  map_by: Union[str, List[str]] = "class",
                  default_values: Optional[Dict[str, int]] = None, file_prefix: str = "segmap_",
                  output_key: str = "segmap", segcolormap_output_file_prefix: str = "instance_attribute_map_",
                  segcolormap_output_key: str = "segcolormap", use_alpha_channel: bool = False,
                  render_colorspace_size_per_dimension: int = 2048) -> \
        Dict[str, Union[np.ndarray, List[np.ndarray]]]:
    """ Renders segmentation maps for all frames

    :param output_dir: The directory to write images to.
    :param temp_dir: The directory to write intermediate data to.
    :param map_by: The attributes to be used for color mapping.
    :param default_values: The default values used for the keys used in attributes, if None is {"class": 0}.
    :param file_prefix: The prefix to use for writing the images.
    :param output_key: The key to use for registering the output.
    :param segcolormap_output_file_prefix: The prefix to use for writing the segmentation-color map csv.
    :param segcolormap_output_key: The key to use for registering the segmentation-color map output.
    :param use_alpha_channel: If true, the alpha channel stored in .png textures is used.
    :param render_colorspace_size_per_dimension: As we use float16 for storing the rendering, the interval of \
                                                 integers which can be precisely stored is [-2048, 2048]. As \
                                                 blender does not allow negative values for colors, we use \
                                                 [0, 2048] ** 3 as our color space which allows ~8 billion \
                                                 different colors/objects. This should be enough.
    :return: dict of lists of segmaps and (for instance segmentation) segcolormaps
    """

    if output_dir is None:
        output_dir = Utility.get_temporary_directory()
    if temp_dir is None:
        temp_dir = Utility.get_temporary_directory()
    if default_values is None:
        default_values = {"class": 0}

    with Utility.UndoAfterExecution():
        RendererUtility._render_init()
        RendererUtility.set_samples(1)
        RendererUtility.set_adaptive_sampling(0)
        RendererUtility.set_denoiser(None)
        RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)

        attributes = map_by
        if 'class' in default_values:
            default_values['cp_category_id'] = default_values['class']

        # Get objects with meshes (i.e. not lights or cameras)
        objs_with_mats = get_all_blender_mesh_objects()

        colors, num_splits_per_dimension, objects = _colorize_objects_for_instance_segmentation(
            objs_with_mats, use_alpha_channel,
            render_colorspace_size_per_dimension)

        bpy.context.scene.cycles.filter_width = 0.0

        if use_alpha_channel:
            MaterialLoaderUtility.add_alpha_channel_to_textures(
                blurry_edges=False)

        # Determine path for temporary and for final output
        temporary_segmentation_file_path = os.path.join(temp_dir, "seg_")
        final_segmentation_file_path = os.path.join(output_dir, file_prefix)

        RendererUtility.set_output_format("OPEN_EXR", 16)
        RendererUtility.render(temp_dir, "seg_", None, return_data=False)

        # Find optimal dtype of output based on max index
        for dtype in [np.uint8, np.uint16, np.uint32]:
            optimal_dtype = dtype
            if np.iinfo(optimal_dtype).max >= len(colors) - 1:
                break

        if isinstance(attributes, str):
            # only one result is requested
            result_channels = 1
            attributes = [attributes]
        elif isinstance(attributes, list):
            result_channels = len(attributes)
        else:
            raise Exception(
                "The type of this is not supported here: {}".format(
                    attributes))

        # define them for the avoid rendering case
        there_was_an_instance_rendering = False
        list_of_attributes: List[str] = []

        # Check if stereo is enabled
        if bpy.context.scene.render.use_multiview:
            suffixes = ["_L", "_R"]
        else:
            suffixes = [""]

        return_dict: Dict[str, Union[np.ndarray, List[np.ndarray]]] = {}

        save_in_csv_attributes: Dict[int, Dict[str, Any]] = {}
        # After rendering
        for frame in range(
                bpy.context.scene.frame_start,
                bpy.context.scene.frame_end):  # for each rendered frame
            save_in_csv_attributes: Dict[int, Dict[str, Any]] = {}
            for suffix in suffixes:
                file_path = temporary_segmentation_file_path + (
                    "%04d" % frame) + suffix + ".exr"
                segmentation = load_image(file_path)
                print(file_path, segmentation.shape)

                segmap = Utility.map_back_from_equally_spaced_equidistant_values(
                    segmentation, num_splits_per_dimension,
                    render_colorspace_size_per_dimension)
                segmap = segmap.astype(optimal_dtype)

                object_ids = np.unique(segmap)
                max_id = np.max(object_ids)
                if max_id >= len(objects):
                    raise Exception(
                        "There are more object colors than there are objects")
                combined_result_map = []
                there_was_an_instance_rendering = False
                list_of_attributes = []
                channels = []
                for channel_id in range(result_channels):
                    num_default_values = 0
                    resulting_map = np.zeros(
                        (segmap.shape[0], segmap.shape[1]),
                        dtype=optimal_dtype)
                    was_used = False
                    current_attribute = attributes[channel_id]
                    org_attribute = current_attribute

                    # if the class is used the category_id attribute is evaluated
                    if current_attribute == "class":
                        current_attribute = "cp_category_id"
                    # in the instance case the resulting ids are directly used
                    if current_attribute == "instance":
                        there_was_an_instance_rendering = True
                        resulting_map = segmap
                        was_used = True
                    else:
                        if current_attribute != "cp_category_id":
                            list_of_attributes.append(current_attribute)
                        # for the current attribute remove cp_ and _csv, if present
                        attribute = current_attribute
                        if attribute.startswith("cp_"):
                            attribute = attribute[len("cp_"):]
                        # check if a default value was specified
                        default_value_set = False
                        if current_attribute in default_values or attribute in default_values:
                            default_value_set = True
                            if current_attribute in default_values:
                                default_value = default_values[
                                    current_attribute]
                            elif attribute in default_values:
                                default_value = default_values[attribute]
                        # iterate over all object ids
                        for object_id in object_ids:
                            # Convert np.uint8 to int, such that the save_in_csv_attributes dict can later be serialized
                            object_id = int(object_id)
                            # get the corresponding object via the id
                            current_obj = objects[object_id]
                            # if the current obj has a attribute with that name -> get it
                            if hasattr(current_obj, attribute):
                                value = getattr(current_obj, attribute)
                            # if the current object has a custom property with that name -> get it
                            elif current_attribute.startswith(
                                    "cp_") and attribute in current_obj:
                                value = current_obj[attribute]
                            elif current_attribute.startswith("cf_"):
                                if current_attribute == "cf_basename":
                                    value = current_obj.name
                                    if "." in value:
                                        value = value[:value.rfind(".")]
                            elif default_value_set:
                                # if none of the above applies use the default value
                                value = default_value
                                num_default_values += 1
                            else:
                                # if the requested current_attribute is not a custom property or a attribute
                                # or there is a default value stored
                                # it throws an exception
                                raise Exception(
                                    "The obj: {} does not have the "
                                    "attribute: {}, striped: {}. Maybe try a default "
                                    "value.".format(current_obj.name,
                                                    current_attribute,
                                                    attribute))

                            # save everything which is not instance also in the .csv
                            if isinstance(
                                    value,
                                (int, float, np.integer, np.floating)):
                                was_used = True
                                resulting_map[segmap == object_id] = value

                            if object_id in save_in_csv_attributes:
                                save_in_csv_attributes[object_id][
                                    attribute] = value
                            else:
                                save_in_csv_attributes[object_id] = {
                                    attribute: value
                                }

                    if was_used and num_default_values < len(object_ids):
                        channels.append(org_attribute)
                        combined_result_map.append(resulting_map)
                        return_dict.setdefault(
                            "{}_segmaps{}".format(org_attribute, suffix),
                            []).append(resulting_map)

                fname = final_segmentation_file_path + ("%04d" %
                                                        frame) + suffix
                # combine all resulting images to one image
                resulting_map = np.stack(combined_result_map, axis=2)
                # remove the unneeded third dimension
                if resulting_map.shape[2] == 1:
                    resulting_map = resulting_map[:, :, 0]
                # TODO: Remove unnecessary save when we give up backwards compatibility
                np.save(fname, resulting_map)

            if there_was_an_instance_rendering:
                mappings = []
                for object_id, attribute_dict in save_in_csv_attributes.items(
                ):
                    mappings.append({"idx": object_id, **attribute_dict})
                return_dict.setdefault("instance_attribute_maps",
                                       []).append(mappings)

                # write color mappings to file
                # TODO: Remove unnecessary csv file when we give up backwards compatibility
                csv_file_path = os.path.join(
                    output_dir,
                    segcolormap_output_file_prefix + ("%04d.csv" % frame))
                with open(csv_file_path, 'w', newline='') as csvfile:
                    # get from the first element the used field names
                    fieldnames = ["idx"]
                    # get all used object element keys
                    for object_element in save_in_csv_attributes.values():
                        fieldnames.extend(list(object_element.keys()))
                        break
                    for channel_name in channels:
                        fieldnames.append("channel_{}".format(channel_name))
                    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                    writer.writeheader()
                    # save for each object all values in one row
                    for obj_idx, object_element in save_in_csv_attributes.items(
                    ):
                        object_element["idx"] = obj_idx
                        for i, channel_name in enumerate(channels):
                            object_element["channel_{}".format(
                                channel_name)] = i
                        writer.writerow(object_element)
            else:
                if len(list_of_attributes) > 0:
                    raise Exception(
                        "There were attributes specified in the may_by, which could not be saved as "
                        "there was no \"instance\" may_by key used. This is true for this/these "
                        "keys: {}".format(", ".join(list_of_attributes)))
                # if there was no instance rendering no .csv file is generated!
                # delete all saved infos about .csv
                save_in_csv_attributes = {}

    Utility.register_output(output_dir, file_prefix, output_key, ".npy",
                            "2.0.0")

    if save_in_csv_attributes:
        Utility.register_output(output_dir, segcolormap_output_file_prefix,
                                segcolormap_output_key, ".csv", "2.0.0")
    return return_dict