示例#1
0
    def render(output_dir: Union[str, None] = None, file_prefix: str = "rgb_", output_key: str = "colors",
               load_keys: Set = None, return_data: bool = True) -> Dict[str, List[np.ndarray]]:
        """ Render all frames.

        This will go through all frames from scene.frame_start to scene.frame_end and render each of them.

        :param output_dir: The directory to write files to, if this is None the temporary directory is used. \
                           The temporary directory is usually in the shared memory (only true for linux).
        :param file_prefix: The prefix to use for writing the images.
        :param output_key: The key to use for registering the output.
        :param load_keys: Set of output keys to load when available
        :param return_data: Whether to load and return generated data. Backwards compatibility to config-based pipeline.
        :return: dict of lists of raw renderer output. Keys can be 'distance', 'colors', 'normals'
        """
        if output_dir is None:
            output_dir = Utility.get_temporary_directory()
        if load_keys is None:
            load_keys = {'colors', 'distance', 'normals'}

        if output_key is not None:
            Utility.add_output_entry({
                "key": output_key,
                "path": os.path.join(output_dir, file_prefix) + "%04d" +
                        RendererUtility.map_file_format_to_file_ending(bpy.context.scene.render.image_settings.file_format),
                "version": "2.0.0"
            })
            load_keys.add(output_key)

        bpy.context.scene.render.filepath = os.path.join(output_dir, file_prefix)

        # Skip if there is nothing to render
        if bpy.context.scene.frame_end != bpy.context.scene.frame_start:
            if len(get_all_blender_mesh_objects()) == 0:
                raise Exception("There are no mesh-objects to render, "
                                "please load an object before invoking the renderer.")
            # As frame_end is pointing to the next free frame, decrease it by one, as
            # blender will render all frames in [frame_start, frame_ned]
            bpy.context.scene.frame_end -= 1
            bpy.ops.render.render(animation=True, write_still=True)
            # Revert changes
            bpy.context.scene.frame_end += 1
        
        return WriterUtility.load_registered_outputs(load_keys) if return_data else {}
示例#2
0
    def render(output_dir: str,
               temp_dir: str,
               get_forward_flow: bool,
               get_backward_flow: bool,
               blender_image_coordinate_style: bool = False,
               forward_flow_output_file_prefix: str = "forward_flow_",
               forward_flow_output_key: str = "forward_flow",
               backward_flow_output_file_prefix: str = "backward_flow_",
               backward_flow_output_key: str = "backward_flow",
               return_data: bool = True) -> Dict[str, List[np.ndarray]]:
        """ Renders the optical flow (forward and backward) for all frames.

        :param output_dir: The directory to write images to.
        :param temp_dir: The directory to write intermediate data to.
        :param get_forward_flow: Whether to render forward optical flow.
        :param get_backward_flow: Whether to render backward optical flow.
        :param blender_image_coordinate_style: Whether to specify the image coordinate system at the bottom left (blender default; True) or top left (standard convention; False).
        :param forward_flow_output_file_prefix: The file prefix that should be used when writing forward flow to a file.
        :param forward_flow_output_key: The key which should be used for storing forward optical flow values.
        :param backward_flow_output_file_prefix: The file prefix that should be used when writing backward flow to a file.
        :param backward_flow_output_key: The key which should be used for storing backward optical flow values.
        :param return_data: Whether to load and return generated data. Backwards compatibility to config-based pipeline.
        :return: dict of lists of raw renderer outputs. Keys can be 'forward_flow', 'backward_flow'
        """
        if get_forward_flow is False and get_backward_flow is False:
            raise Exception(
                "Take the FlowRenderer Module out of the config if both forward and backward flow are set to False!"
            )

        with Utility.UndoAfterExecution():
            RendererUtility.init()
            RendererUtility.set_samples(1)
            RendererUtility.set_adaptive_sampling(0)
            RendererUtility.set_denoiser(None)
            RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)

            FlowRendererUtility._output_vector_field(get_forward_flow,
                                                     get_backward_flow,
                                                     output_dir)

            # only need to render once; both fwd and bwd flow will be saved
            temporary_fwd_flow_file_path = os.path.join(temp_dir, 'fwd_flow_')
            temporary_bwd_flow_file_path = os.path.join(temp_dir, 'bwd_flow_')
            RendererUtility.render(temp_dir, "bwd_flow_", None)

            # After rendering: convert to optical flow or calculate hsv visualization, if desired
            for frame in range(bpy.context.scene.frame_start,
                               bpy.context.scene.frame_end):
                # temporarily save respective vector fields
                if get_forward_flow:
                    file_path = temporary_fwd_flow_file_path + "%04d" % frame + ".exr"
                    fwd_flow_field = load_image(
                        file_path, num_channels=4).astype(np.float32)

                    if not blender_image_coordinate_style:
                        fwd_flow_field[:, :, 1] = fwd_flow_field[:, :, 1] * -1

                    fname = os.path.join(
                        output_dir,
                        forward_flow_output_file_prefix) + '%04d' % frame
                    forward_flow = fwd_flow_field * -1  # invert forward flow to point at next frame
                    np.save(fname + '.npy', forward_flow[:, :, :2])

                if get_backward_flow:
                    file_path = temporary_bwd_flow_file_path + "%04d" % frame + ".exr"
                    bwd_flow_field = load_image(
                        file_path, num_channels=4).astype(np.float32)

                    if not blender_image_coordinate_style:
                        bwd_flow_field[:, :, 1] = bwd_flow_field[:, :, 1] * -1

                    fname = os.path.join(
                        output_dir,
                        backward_flow_output_file_prefix) + '%04d' % frame
                    np.save(fname + '.npy', bwd_flow_field[:, :, :2])

        load_keys = set()
        # register desired outputs
        if get_forward_flow:
            Utility.register_output(output_dir,
                                    forward_flow_output_file_prefix,
                                    forward_flow_output_key, '.npy', '2.0.0')
            load_keys.add(forward_flow_output_key)
        if get_backward_flow:
            Utility.register_output(output_dir,
                                    backward_flow_output_file_prefix,
                                    backward_flow_output_key, '.npy', '2.0.0')
            load_keys.add(backward_flow_output_key)

        return WriterUtility.load_registered_outputs(
            load_keys) if return_data else {}
    def render(output_dir: str, temp_dir: str, used_attributes: Union[str, List[str]],
               used_default_values: Union[Dict[str, str]] = None, file_prefix: str = "segmap_",
               output_key: str = "segmap", segcolormap_output_file_prefix: str = "class_inst_col_map",
               segcolormap_output_key: str = "segcolormap", use_alpha_channel: bool = False,
               render_colorspace_size_per_dimension: int = 2048, return_data: bool = True) -> Dict[str, List[np.ndarray]]:
        """ Renders segmentation maps for all frames

        :param output_dir: The directory to write images to.
        :param temp_dir: The directory to write intermediate data to.
        :param used_attributes: The attributes to be used for color mapping.
        :param used_default_values: The default values used for the keys used in used_attributes.
        :param file_prefix: The prefix to use for writing the images.
        :param output_key: The key to use for registering the output.
        :param segcolormap_output_file_prefix: The prefix to use for writing the segmentation-color map csv.
        :param segcolormap_output_key: The key to use for registering the segmentation-color map output.
        :param use_alpha_channel: If true, the alpha channel stored in .png textures is used.
        :param render_colorspace_size_per_dimension: As we use float16 for storing the rendering, the interval of \
                                                     integers which can be precisely stored is [-2048, 2048]. As \
                                                     blender does not allow negative values for colors, we use \
                                                     [0, 2048] ** 3 as our color space which allows ~8 billion \
                                                     different colors/objects. This should be enough.
        :param return_data: Whether to load and return generated data. Backwards compatibility to config-based pipeline.
        :return: dict of lists of segmaps and (for instance segmentation) segcolormaps
        """
        with Utility.UndoAfterExecution():
            RendererUtility.init()
            RendererUtility.set_samples(1)
            RendererUtility.set_adaptive_sampling(0)
            RendererUtility.set_denoiser(None)
            RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0)

            # Get objects with meshes (i.e. not lights or cameras)
            objs_with_mats = get_all_blender_mesh_objects()

            colors, num_splits_per_dimension, used_objects = \
                SegMapRendererUtility._colorize_objects_for_instance_segmentation(objs_with_mats,
                                                                                  use_alpha_channel,
                                                                                  render_colorspace_size_per_dimension)

            bpy.context.scene.cycles.filter_width = 0.0

            if use_alpha_channel:
                MaterialLoaderUtility.add_alpha_channel_to_textures(blurry_edges=False)

            # Determine path for temporary and for final output
            temporary_segmentation_file_path = os.path.join(temp_dir, "seg_")
            final_segmentation_file_path = os.path.join(output_dir, file_prefix)

            RendererUtility.set_output_format("OPEN_EXR", 16)
            RendererUtility.render(temp_dir, "seg_", None)

            # Find optimal dtype of output based on max index
            for dtype in [np.uint8, np.uint16, np.uint32]:
                optimal_dtype = dtype
                if np.iinfo(optimal_dtype).max >= len(colors) - 1:
                    break
            if used_default_values is None:
                used_default_values = {}
            elif 'class' in used_default_values:
                used_default_values['cp_category_id'] = used_default_values['class']

            if isinstance(used_attributes, str):
                # only one result is requested
                result_channels = 1
                used_attributes = [used_attributes]
            elif isinstance(used_attributes, list):
                result_channels = len(used_attributes)
            else:
                raise Exception("The type of this is not supported here: {}".format(used_attributes))

            save_in_csv_attributes = {}
            # define them for the avoid rendering case
            there_was_an_instance_rendering = False
            list_of_used_attributes = []

            # Check if stereo is enabled
            if bpy.context.scene.render.use_multiview:
                suffixes = ["_L", "_R"]
            else:
                suffixes = [""]

            # After rendering
            for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end):  # for each rendered frame
                for suffix in suffixes:
                    file_path = temporary_segmentation_file_path + ("%04d" % frame) + suffix + ".exr"
                    segmentation = load_image(file_path)
                    print(file_path, segmentation.shape)

                    segmap = Utility.map_back_from_equally_spaced_equidistant_values(segmentation,
                                                                                     num_splits_per_dimension,
                                                                                     render_colorspace_size_per_dimension)
                    segmap = segmap.astype(optimal_dtype)

                    used_object_ids = np.unique(segmap)
                    max_id = np.max(used_object_ids)
                    if max_id >= len(used_objects):
                        raise Exception("There are more object colors than there are objects")
                    combined_result_map = []
                    there_was_an_instance_rendering = False
                    list_of_used_attributes = []
                    used_channels = []
                    for channel_id in range(result_channels):
                        resulting_map = np.empty((segmap.shape[0], segmap.shape[1]))
                        was_used = False
                        current_attribute = used_attributes[channel_id]
                        org_attribute = current_attribute

                        # if the class is used the category_id attribute is evaluated
                        if current_attribute == "class":
                            current_attribute = "cp_category_id"
                        # in the instance case the resulting ids are directly used
                        if current_attribute == "instance":
                            there_was_an_instance_rendering = True
                            resulting_map = segmap
                            was_used = True
                            # a non default value was also used
                            non_default_value_was_used = True
                        else:
                            if current_attribute != "cp_category_id":
                                list_of_used_attributes.append(current_attribute)
                            # for the current attribute remove cp_ and _csv, if present
                            used_attribute = current_attribute
                            if used_attribute.startswith("cp_"):
                                used_attribute = used_attribute[len("cp_"):]
                            # check if a default value was specified
                            default_value_set = False
                            if current_attribute in used_default_values or used_attribute in used_default_values:
                                default_value_set = True
                                if current_attribute in used_default_values:
                                    default_value = used_default_values[current_attribute]
                                elif used_attribute in used_default_values:
                                    default_value = used_default_values[used_attribute]
                            last_state_save_in_csv = None
                            # this avoids that for certain attributes only the default value is written
                            non_default_value_was_used = False
                            # iterate over all object ids
                            for object_id in used_object_ids:
                                is_default_value = False
                                # get the corresponding object via the id
                                current_obj = used_objects[object_id]
                                # if the current obj has a attribute with that name -> get it
                                if hasattr(current_obj, used_attribute):
                                    used_value = getattr(current_obj, used_attribute)
                                # if the current object has a custom property with that name -> get it
                                elif current_attribute.startswith("cp_") and used_attribute in current_obj:
                                    used_value = current_obj[used_attribute]
                                elif current_attribute.startswith("cf_"):
                                    if current_attribute == "cf_basename":
                                        used_value = current_obj.name
                                        if "." in used_value:
                                            used_value = used_value[:used_value.rfind(".")]
                                elif default_value_set:
                                    # if none of the above applies use the default value
                                    used_value = default_value
                                    is_default_value = True
                                else:
                                    # if the requested current_attribute is not a custom property or a attribute
                                    # or there is a default value stored
                                    # it throws an exception
                                    raise Exception("The obj: {} does not have the "
                                                    "attribute: {}, striped: {}. Maybe try a default "
                                                    "value.".format(current_obj.name, current_attribute, used_attribute))

                                # check if the value should be saved as an image or in the csv file
                                save_in_csv = False
                                try:
                                    resulting_map[segmap == object_id] = used_value
                                    was_used = True
                                    if not is_default_value:
                                        non_default_value_was_used = True
                                    # save everything which is not instance also in the .csv
                                    if current_attribute != "instance":
                                        save_in_csv = True
                                except ValueError:
                                    save_in_csv = True

                                if last_state_save_in_csv is not None and last_state_save_in_csv != save_in_csv:
                                    raise Exception("During creating the mapping, the saving to an image or a csv file "
                                                    "switched, this might indicated that the used default value, does "
                                                    "not have the same type as the returned value, "
                                                    "for: {}".format(current_attribute))
                                last_state_save_in_csv = save_in_csv
                                if save_in_csv:
                                    if object_id in save_in_csv_attributes:
                                        save_in_csv_attributes[object_id][used_attribute] = used_value
                                    else:
                                        save_in_csv_attributes[object_id] = {used_attribute: used_value}
                        if was_used and non_default_value_was_used:
                            used_channels.append(org_attribute)
                            combined_result_map.append(resulting_map)

                    fname = final_segmentation_file_path + ("%04d" % frame) + suffix
                    # combine all resulting images to one image
                    resulting_map = np.stack(combined_result_map, axis=2)
                    # remove the unneeded third dimension
                    if resulting_map.shape[2] == 1:
                        resulting_map = resulting_map[:, :, 0]
                    np.save(fname, resulting_map)

            if not there_was_an_instance_rendering:
                if len(list_of_used_attributes) > 0:
                    raise Exception("There were attributes specified in the may_by, which could not be saved as "
                                    "there was no \"instance\" may_by key used. This is true for this/these "
                                    "keys: {}".format(", ".join(list_of_used_attributes)))
                # if there was no instance rendering no .csv file is generated!
                # delete all saved infos about .csv
                save_in_csv_attributes = {}

            # write color mappings to file
            if save_in_csv_attributes:
                csv_file_path = os.path.join(output_dir, segcolormap_output_file_prefix + ".csv")
                with open(csv_file_path, 'w', newline='') as csvfile:
                    # get from the first element the used field names
                    fieldnames = ["idx"]
                    # get all used object element keys
                    for object_element in save_in_csv_attributes.values():
                        fieldnames.extend(list(object_element.keys()))
                        break
                    for channel_name in used_channels:
                        fieldnames.append("channel_{}".format(channel_name))
                    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                    writer.writeheader()
                    # save for each object all values in one row
                    for obj_idx, object_element in save_in_csv_attributes.items():
                        object_element["idx"] = obj_idx
                        for i, channel_name in enumerate(used_channels):
                            object_element["channel_{}".format(channel_name)] = i
                        writer.writerow(object_element)

        Utility.register_output(output_dir, file_prefix, output_key, ".npy", "2.0.0")
        load_keys = {output_key}
        if save_in_csv_attributes:
            Utility.register_output(output_dir,
                                    segcolormap_output_file_prefix,
                                    segcolormap_output_key,
                                    ".csv",
                                    "2.0.0",
                                    unique_for_camposes=False)
            load_keys.add(segcolormap_output_key)
        
        return WriterUtility.load_registered_outputs(load_keys) if return_data else {}