Ejemplo n.º 1
0
    def run(self):
        # if the rendering is not performed -> it is probably the debug case.
        do_undo = not self._avoid_output
        with Utility.UndoAfterExecution(perform_undo_op=do_undo):
            self._configure_renderer(use_denoiser=True,
                                     default_denoiser="Intel")

            # check if texture less render mode is active
            if self._texture_less_mode:
                MaterialLoaderUtility.change_to_texture_less_render(
                    self._use_alpha_channel)

            if self._use_alpha_channel:
                MaterialLoaderUtility.add_alpha_channel_to_textures(
                    blurry_edges=True)

            # motion blur
            if self._use_motion_blur:
                RendererUtility.enable_motion_blur(
                    self._motion_blur_length,
                    'TOP' if self._use_rolling_shutter else "NONE",
                    self._rolling_shutter_length)

            self._render("rgb_",
                         "colors",
                         enable_transparency=self.config.get_bool(
                             "transparent_background", False),
                         file_format=self._image_type)
Ejemplo n.º 2
0
    def test_blender_reference_after_undo(self):
        """ Test if the blender_data objects are still valid after an undo execution is done. 
        """
        with SilentMode():
            bproc.init()
            objs = bproc.loader.load_obj(
                os.path.join(test_path_manager.example_resources, "scene.obj"))

            for obj in objs:
                obj.set_cp("test", 0)

            with Utility.UndoAfterExecution():
                for obj in objs:
                    obj.set_cp("test", 1)

        for obj in objs:
            self.assertEqual(obj.get_cp("test"), 0)
Ejemplo n.º 3
0
    def run(self):
        with Utility.UndoAfterExecution():
            self._configure_renderer(default_samples=1)

            if not self._avoid_output:
                render_optical_flow(
                    self._determine_output_dir(),
                    self._temp_dir,
                    self.config.get_bool('forward_flow', False),
                    self.config.get_bool('backward_flow', False),
                    self.config.get_bool('blender_image_coordinate_style',
                                         False),
                    self.config.get_string('forward_flow_output_file_prefix',
                                           'forward_flow_'),
                    self.config.get_string("forward_flow_output_key",
                                           "forward_flow"),
                    self.config.get_string('backward_flow_output_file_prefix',
                                           'backward_flow_'),
                    self.config.get_string("backward_flow_output_key",
                                           "backward_flow"),
                    return_data=False)
Ejemplo n.º 4
0
    def run(self):

        # get the type of mappings which should be performed
        map_by = self.config.get_raw_dict("map_by", "class")

        default_values = self.config.get_raw_dict("default_values", {})

        with Utility.UndoAfterExecution():
            self._configure_renderer(default_samples=1)

            if not self._avoid_output:
                render_segmap(self._determine_output_dir(),
                              self._temp_dir,
                              map_by,
                              default_values,
                              self.config.get_string("output_file_prefix",
                                                     "segmap_"),
                              self.config.get_string("output_key", "segmap"),
                              self.config.get_string(
                                  "segcolormap_output_file_prefix",
                                  "instance_attribute_map"),
                              self.config.get_string("segcolormap_output_key",
                                                     "segcolormap"),
                              use_alpha_channel=self._use_alpha_channel)
Ejemplo n.º 5
0
def render_optical_flow(output_dir: str = None, temp_dir: str = None, get_forward_flow: bool = True,
                        get_backward_flow: bool = True, blender_image_coordinate_style: bool = False,
                        forward_flow_output_file_prefix: str = "forward_flow_",
                        forward_flow_output_key: str = "forward_flow",
                        backward_flow_output_file_prefix: str = "backward_flow_",
                        backward_flow_output_key: str = "backward_flow", return_data: bool = True) -> \
        Dict[str, Union[np.ndarray, List[np.ndarray]]]:
    """ Renders the optical flow (forward and backward) for all frames.

    :param output_dir: The directory to write images to.
    :param temp_dir: The directory to write intermediate data to.
    :param get_forward_flow: Whether to render forward optical flow.
    :param get_backward_flow: Whether to render backward optical flow.
    :param blender_image_coordinate_style: Whether to specify the image coordinate system at the bottom left (blender default; True) or top left (standard convention; False).
    :param forward_flow_output_file_prefix: The file prefix that should be used when writing forward flow to a file.
    :param forward_flow_output_key: The key which should be used for storing forward optical flow values.
    :param backward_flow_output_file_prefix: The file prefix that should be used when writing backward flow to a file.
    :param backward_flow_output_key: The key which should be used for storing backward optical flow values.
    :param return_data: Whether to load and return generated data. Backwards compatibility to config-based pipeline.
    :return: dict of lists of raw renderer outputs. Keys can be 'forward_flow', 'backward_flow'
    """
    if get_forward_flow is False and get_backward_flow is False:
        raise Exception(
            "Take the FlowRenderer Module out of the config if both forward and backward flow are set to False!"
        )

    if output_dir is None:
        output_dir = Utility.get_temporary_directory()
    if temp_dir is None:
        temp_dir = Utility.get_temporary_directory()

    with Utility.UndoAfterExecution():
        RendererUtility._render_init()
        RendererUtility.set_samples(1)
        RendererUtility.set_adaptive_sampling(0)
        RendererUtility.set_denoiser(None)
        RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)

        FlowRendererUtility._output_vector_field(get_forward_flow,
                                                 get_backward_flow, output_dir)

        # only need to render once; both fwd and bwd flow will be saved
        temporary_fwd_flow_file_path = os.path.join(temp_dir, 'fwd_flow_')
        temporary_bwd_flow_file_path = os.path.join(temp_dir, 'bwd_flow_')
        RendererUtility.render(temp_dir, "bwd_flow_", None, load_keys=set())

        # After rendering: convert to optical flow or calculate hsv visualization, if desired
        for frame in range(bpy.context.scene.frame_start,
                           bpy.context.scene.frame_end):
            # temporarily save respective vector fields
            if get_forward_flow:
                file_path = temporary_fwd_flow_file_path + "%04d" % frame + ".exr"
                fwd_flow_field = load_image(file_path,
                                            num_channels=4).astype(np.float32)

                if not blender_image_coordinate_style:
                    fwd_flow_field[:, :, 1] = fwd_flow_field[:, :, 1] * -1

                fname = os.path.join(
                    output_dir,
                    forward_flow_output_file_prefix) + '%04d' % frame
                forward_flow = fwd_flow_field * -1  # invert forward flow to point at next frame
                np.save(fname + '.npy', forward_flow[:, :, :2])

            if get_backward_flow:
                file_path = temporary_bwd_flow_file_path + "%04d" % frame + ".exr"
                bwd_flow_field = load_image(file_path,
                                            num_channels=4).astype(np.float32)

                if not blender_image_coordinate_style:
                    bwd_flow_field[:, :, 1] = bwd_flow_field[:, :, 1] * -1

                fname = os.path.join(
                    output_dir,
                    backward_flow_output_file_prefix) + '%04d' % frame
                np.save(fname + '.npy', bwd_flow_field[:, :, :2])

    load_keys = set()
    # register desired outputs
    if get_forward_flow:
        Utility.register_output(output_dir, forward_flow_output_file_prefix,
                                forward_flow_output_key, '.npy', '2.0.0')
        load_keys.add(forward_flow_output_key)
    if get_backward_flow:
        Utility.register_output(output_dir, backward_flow_output_file_prefix,
                                backward_flow_output_key, '.npy', '2.0.0')
        load_keys.add(backward_flow_output_key)

    return WriterUtility.load_registered_outputs(
        load_keys) if return_data else {}
Ejemplo n.º 6
0
def simulate_physics_and_fix_final_poses(
        min_simulation_time: float = 4.0,
        max_simulation_time: float = 40.0,
        check_object_interval: float = 2.0,
        object_stopped_location_threshold: float = 0.01,
        object_stopped_rotation_threshold: float = 0.1,
        substeps_per_frame: int = 10,
        solver_iters: int = 10):
    """ Simulates the current scene and in the end fixes the final poses of all active objects.

    The simulation is run for at least `min_simulation_time` seconds and at a maximum `max_simulation_time` seconds.
    Every `check_object_interval` seconds, it is checked if the maximum object movement in the last second is below a given threshold.
    If that is the case, the simulation is stopped.

    After performing the simulation, the simulation cache is removed, the rigid body components are disabled and the pose of the active objects is set to their final pose in the simulation.

    :param min_simulation_time: The minimum number of seconds to simulate.
    :param max_simulation_time: The maximum number of seconds to simulate.
    :param check_object_interval: The interval in seconds at which all objects should be checked if they are still moving. If all objects
                                  have stopped moving, than the simulation will be stopped.
    :param object_stopped_location_threshold: The maximum difference per second and per coordinate in the rotation Euler vector that is allowed. such
                                              that an object is still recognized as 'stopped moving'.
    :param object_stopped_rotation_threshold: The maximum difference per second and per coordinate in the rotation Euler vector that is allowed. such
                                              that an object is still recognized as 'stopped moving'.
    :param substeps_per_frame: Number of simulation steps taken per frame.
    :param solver_iters: Number of constraint solver iterations made per simulation step.
    """
    # Undo changes made in the simulation like origin adjustment and persisting the object's scale
    with Utility.UndoAfterExecution():
        # Run simulation and remember poses before and after
        obj_poses_before_sim = PhysicsSimulation._get_pose()
        origin_shifts = simulate_physics(min_simulation_time,
                                         max_simulation_time,
                                         check_object_interval,
                                         object_stopped_location_threshold,
                                         object_stopped_rotation_threshold,
                                         substeps_per_frame, solver_iters)
        obj_poses_after_sim = PhysicsSimulation._get_pose()

        # Make sure to remove the simulation cache as we are only interested in the final poses
        bpy.ops.ptcache.free_bake(
            {"point_cache": bpy.context.scene.rigidbody_world.point_cache})

    # Fix the pose of all objects to their pose at the and of the simulation (also revert origin shift)
    for obj in get_all_mesh_objects():
        if obj.has_rigidbody_enabled():
            # Skip objects that have parents with compound rigid body component
            has_compound_parent = obj.get_parent() is not None and isinstance(obj.get_parent(), MeshObject) \
                                  and obj.get_parent().get_rigidbody() is not None \
                                  and obj.get_parent().get_rigidbody().collision_shape == "COMPOUND"
            if obj.get_rigidbody(
            ).type == "ACTIVE" and not has_compound_parent:
                # compute relative object rotation before and after simulation
                R_obj_before_sim = mathutils.Euler(obj_poses_before_sim[
                    obj.get_name()]['rotation']).to_matrix()
                R_obj_after = mathutils.Euler(obj_poses_after_sim[
                    obj.get_name()]['rotation']).to_matrix()
                R_obj_rel = R_obj_before_sim @ R_obj_after.transposed()
                # Apply relative rotation to origin shift
                origin_shift = R_obj_rel.transposed() @ mathutils.Vector(
                    origin_shifts[obj.get_name()])

                # Fix pose of object to the one it had at the end of the simulation
                obj.set_location(
                    obj_poses_after_sim[obj.get_name()]['location'] -
                    origin_shift)
                obj.set_rotation_euler(
                    obj_poses_after_sim[obj.get_name()]['rotation'])

    # Deactivate the simulation so it does not influence object positions
    bpy.context.scene.rigidbody_world.enabled = False
    bpy.context.view_layer.update()
def render_segmap(output_dir: Optional[str] = None, temp_dir: Optional[str] = None,
                  map_by: Union[str, List[str]] = "class",
                  default_values: Optional[Dict[str, int]] = None, file_prefix: str = "segmap_",
                  output_key: str = "segmap", segcolormap_output_file_prefix: str = "instance_attribute_map_",
                  segcolormap_output_key: str = "segcolormap", use_alpha_channel: bool = False,
                  render_colorspace_size_per_dimension: int = 2048) -> \
        Dict[str, Union[np.ndarray, List[np.ndarray]]]:
    """ Renders segmentation maps for all frames

    :param output_dir: The directory to write images to.
    :param temp_dir: The directory to write intermediate data to.
    :param map_by: The attributes to be used for color mapping.
    :param default_values: The default values used for the keys used in attributes, if None is {"class": 0}.
    :param file_prefix: The prefix to use for writing the images.
    :param output_key: The key to use for registering the output.
    :param segcolormap_output_file_prefix: The prefix to use for writing the segmentation-color map csv.
    :param segcolormap_output_key: The key to use for registering the segmentation-color map output.
    :param use_alpha_channel: If true, the alpha channel stored in .png textures is used.
    :param render_colorspace_size_per_dimension: As we use float16 for storing the rendering, the interval of \
                                                 integers which can be precisely stored is [-2048, 2048]. As \
                                                 blender does not allow negative values for colors, we use \
                                                 [0, 2048] ** 3 as our color space which allows ~8 billion \
                                                 different colors/objects. This should be enough.
    :return: dict of lists of segmaps and (for instance segmentation) segcolormaps
    """

    if output_dir is None:
        output_dir = Utility.get_temporary_directory()
    if temp_dir is None:
        temp_dir = Utility.get_temporary_directory()
    if default_values is None:
        default_values = {"class": 0}

    with Utility.UndoAfterExecution():
        RendererUtility._render_init()
        RendererUtility.set_samples(1)
        RendererUtility.set_adaptive_sampling(0)
        RendererUtility.set_denoiser(None)
        RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)

        attributes = map_by
        if 'class' in default_values:
            default_values['cp_category_id'] = default_values['class']

        # Get objects with meshes (i.e. not lights or cameras)
        objs_with_mats = get_all_blender_mesh_objects()

        colors, num_splits_per_dimension, objects = _colorize_objects_for_instance_segmentation(
            objs_with_mats, use_alpha_channel,
            render_colorspace_size_per_dimension)

        bpy.context.scene.cycles.filter_width = 0.0

        if use_alpha_channel:
            MaterialLoaderUtility.add_alpha_channel_to_textures(
                blurry_edges=False)

        # Determine path for temporary and for final output
        temporary_segmentation_file_path = os.path.join(temp_dir, "seg_")
        final_segmentation_file_path = os.path.join(output_dir, file_prefix)

        RendererUtility.set_output_format("OPEN_EXR", 16)
        RendererUtility.render(temp_dir, "seg_", None, return_data=False)

        # Find optimal dtype of output based on max index
        for dtype in [np.uint8, np.uint16, np.uint32]:
            optimal_dtype = dtype
            if np.iinfo(optimal_dtype).max >= len(colors) - 1:
                break

        if isinstance(attributes, str):
            # only one result is requested
            result_channels = 1
            attributes = [attributes]
        elif isinstance(attributes, list):
            result_channels = len(attributes)
        else:
            raise Exception(
                "The type of this is not supported here: {}".format(
                    attributes))

        # define them for the avoid rendering case
        there_was_an_instance_rendering = False
        list_of_attributes: List[str] = []

        # Check if stereo is enabled
        if bpy.context.scene.render.use_multiview:
            suffixes = ["_L", "_R"]
        else:
            suffixes = [""]

        return_dict: Dict[str, Union[np.ndarray, List[np.ndarray]]] = {}

        save_in_csv_attributes: Dict[int, Dict[str, Any]] = {}
        # After rendering
        for frame in range(
                bpy.context.scene.frame_start,
                bpy.context.scene.frame_end):  # for each rendered frame
            save_in_csv_attributes: Dict[int, Dict[str, Any]] = {}
            for suffix in suffixes:
                file_path = temporary_segmentation_file_path + (
                    "%04d" % frame) + suffix + ".exr"
                segmentation = load_image(file_path)
                print(file_path, segmentation.shape)

                segmap = Utility.map_back_from_equally_spaced_equidistant_values(
                    segmentation, num_splits_per_dimension,
                    render_colorspace_size_per_dimension)
                segmap = segmap.astype(optimal_dtype)

                object_ids = np.unique(segmap)
                max_id = np.max(object_ids)
                if max_id >= len(objects):
                    raise Exception(
                        "There are more object colors than there are objects")
                combined_result_map = []
                there_was_an_instance_rendering = False
                list_of_attributes = []
                channels = []
                for channel_id in range(result_channels):
                    num_default_values = 0
                    resulting_map = np.zeros(
                        (segmap.shape[0], segmap.shape[1]),
                        dtype=optimal_dtype)
                    was_used = False
                    current_attribute = attributes[channel_id]
                    org_attribute = current_attribute

                    # if the class is used the category_id attribute is evaluated
                    if current_attribute == "class":
                        current_attribute = "cp_category_id"
                    # in the instance case the resulting ids are directly used
                    if current_attribute == "instance":
                        there_was_an_instance_rendering = True
                        resulting_map = segmap
                        was_used = True
                    else:
                        if current_attribute != "cp_category_id":
                            list_of_attributes.append(current_attribute)
                        # for the current attribute remove cp_ and _csv, if present
                        attribute = current_attribute
                        if attribute.startswith("cp_"):
                            attribute = attribute[len("cp_"):]
                        # check if a default value was specified
                        default_value_set = False
                        if current_attribute in default_values or attribute in default_values:
                            default_value_set = True
                            if current_attribute in default_values:
                                default_value = default_values[
                                    current_attribute]
                            elif attribute in default_values:
                                default_value = default_values[attribute]
                        # iterate over all object ids
                        for object_id in object_ids:
                            # Convert np.uint8 to int, such that the save_in_csv_attributes dict can later be serialized
                            object_id = int(object_id)
                            # get the corresponding object via the id
                            current_obj = objects[object_id]
                            # if the current obj has a attribute with that name -> get it
                            if hasattr(current_obj, attribute):
                                value = getattr(current_obj, attribute)
                            # if the current object has a custom property with that name -> get it
                            elif current_attribute.startswith(
                                    "cp_") and attribute in current_obj:
                                value = current_obj[attribute]
                            elif current_attribute.startswith("cf_"):
                                if current_attribute == "cf_basename":
                                    value = current_obj.name
                                    if "." in value:
                                        value = value[:value.rfind(".")]
                            elif default_value_set:
                                # if none of the above applies use the default value
                                value = default_value
                                num_default_values += 1
                            else:
                                # if the requested current_attribute is not a custom property or a attribute
                                # or there is a default value stored
                                # it throws an exception
                                raise Exception(
                                    "The obj: {} does not have the "
                                    "attribute: {}, striped: {}. Maybe try a default "
                                    "value.".format(current_obj.name,
                                                    current_attribute,
                                                    attribute))

                            # save everything which is not instance also in the .csv
                            if isinstance(
                                    value,
                                (int, float, np.integer, np.floating)):
                                was_used = True
                                resulting_map[segmap == object_id] = value

                            if object_id in save_in_csv_attributes:
                                save_in_csv_attributes[object_id][
                                    attribute] = value
                            else:
                                save_in_csv_attributes[object_id] = {
                                    attribute: value
                                }

                    if was_used and num_default_values < len(object_ids):
                        channels.append(org_attribute)
                        combined_result_map.append(resulting_map)
                        return_dict.setdefault(
                            "{}_segmaps{}".format(org_attribute, suffix),
                            []).append(resulting_map)

                fname = final_segmentation_file_path + ("%04d" %
                                                        frame) + suffix
                # combine all resulting images to one image
                resulting_map = np.stack(combined_result_map, axis=2)
                # remove the unneeded third dimension
                if resulting_map.shape[2] == 1:
                    resulting_map = resulting_map[:, :, 0]
                # TODO: Remove unnecessary save when we give up backwards compatibility
                np.save(fname, resulting_map)

            if there_was_an_instance_rendering:
                mappings = []
                for object_id, attribute_dict in save_in_csv_attributes.items(
                ):
                    mappings.append({"idx": object_id, **attribute_dict})
                return_dict.setdefault("instance_attribute_maps",
                                       []).append(mappings)

                # write color mappings to file
                # TODO: Remove unnecessary csv file when we give up backwards compatibility
                csv_file_path = os.path.join(
                    output_dir,
                    segcolormap_output_file_prefix + ("%04d.csv" % frame))
                with open(csv_file_path, 'w', newline='') as csvfile:
                    # get from the first element the used field names
                    fieldnames = ["idx"]
                    # get all used object element keys
                    for object_element in save_in_csv_attributes.values():
                        fieldnames.extend(list(object_element.keys()))
                        break
                    for channel_name in channels:
                        fieldnames.append("channel_{}".format(channel_name))
                    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                    writer.writeheader()
                    # save for each object all values in one row
                    for obj_idx, object_element in save_in_csv_attributes.items(
                    ):
                        object_element["idx"] = obj_idx
                        for i, channel_name in enumerate(channels):
                            object_element["channel_{}".format(
                                channel_name)] = i
                        writer.writerow(object_element)
            else:
                if len(list_of_attributes) > 0:
                    raise Exception(
                        "There were attributes specified in the may_by, which could not be saved as "
                        "there was no \"instance\" may_by key used. This is true for this/these "
                        "keys: {}".format(", ".join(list_of_attributes)))
                # if there was no instance rendering no .csv file is generated!
                # delete all saved infos about .csv
                save_in_csv_attributes = {}

    Utility.register_output(output_dir, file_prefix, output_key, ".npy",
                            "2.0.0")

    if save_in_csv_attributes:
        Utility.register_output(output_dir, segcolormap_output_file_prefix,
                                segcolormap_output_key, ".csv", "2.0.0")
    return return_dict