def write_attributes_to_file(self, item_writer, items, default_file_prefix, default_output_key, default_attributes, version="1.0.0"): """ Writes the state of the given items to a file with the configured prefix. This method also registers the corresponding output. :param item_writer: The item writer object to use. Type: object. :param items: The list of items. Type: list. :param default_file_prefix: The default file name prefix to use. Type: string. :param default_output_key: The default output key to use. Type: string. :param default_attributes: The default attributes to write, if no attributes are specified in the config. Type: list. :param version: The version to use when registering the output. Type: string. """ if self._avoid_output: print("Avoid output is on, no output produced!") return file_prefix = self.config.get_string("output_file_prefix", default_file_prefix) path_prefix = os.path.join(self._determine_output_dir(), file_prefix) item_writer.write_items_to_file( path_prefix, items, self.config.get_list("attributes_to_write", default_attributes), world_frame_change=self.destination_frame) Utility.register_output( self._determine_output_dir(), file_prefix, self.config.get_string("output_key", default_output_key), ".npy", version)
def run(self): """ Does the stereo global matching in the following steps: 1. Collect camera object and its state, 2. For each frame, load left and right images and call the `sgm()` methode. 3. Write the results to a numpy file. """ if self._avoid_output: print("Avoid output is on, no output produced!") return if GlobalStorage.is_in_storage("renderer_distance_end"): depth_max = GlobalStorage.get("renderer_distance_end") else: raise RuntimeError( "A distance rendering has to be executed before this module is executed, " "else the `renderer_distance_end` is not set!") rgb_output_path = Utility.find_registered_output_by_key( self.rgb_output_key)["path"] color_images = [] for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): path_split = rgb_output_path.split(".") path_l = "{}_L.{}".format(path_split[0], path_split[1]) path_r = "{}_R.{}".format(path_split[0], path_split[1]) imgL = load_image(path_l % frame) imgR = load_image(path_r % frame) color_images.append(np.stack((imgL, imgR), 0)) depth, disparity = stereo_global_matching( color_images=color_images, depth_max=depth_max, window_size=self.config.get_int("window_size", 7), num_disparities=self.config.get_int("num_disparities", 32), min_disparity=self.config.get_int("min_disparity", 0), disparity_filter=self.config.get_bool("disparity_filter", True), depth_completion=self.config.get_bool("depth_completion", True)) for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): np.save( os.path.join(self._determine_output_dir(), "stereo-depth_%04d") % frame, depth[frame]) if self.config.get_bool("output_disparity", False): np.save( os.path.join(self._determine_output_dir(), "disparity_%04d") % frame, disparity[frame]) Utility.register_output(self._determine_output_dir(), "stereo-depth_", "stereo-depth", ".npy", "1.0.0") if self.config.get_bool("output_disparity", False): Utility.register_output(self._determine_output_dir(), "disparity_", "disparity", ".npy", "1.0.0")
def render_optical_flow(output_dir: str = None, temp_dir: str = None, get_forward_flow: bool = True, get_backward_flow: bool = True, blender_image_coordinate_style: bool = False, forward_flow_output_file_prefix: str = "forward_flow_", forward_flow_output_key: str = "forward_flow", backward_flow_output_file_prefix: str = "backward_flow_", backward_flow_output_key: str = "backward_flow", return_data: bool = True) -> \ Dict[str, Union[np.ndarray, List[np.ndarray]]]: """ Renders the optical flow (forward and backward) for all frames. :param output_dir: The directory to write images to. :param temp_dir: The directory to write intermediate data to. :param get_forward_flow: Whether to render forward optical flow. :param get_backward_flow: Whether to render backward optical flow. :param blender_image_coordinate_style: Whether to specify the image coordinate system at the bottom left (blender default; True) or top left (standard convention; False). :param forward_flow_output_file_prefix: The file prefix that should be used when writing forward flow to a file. :param forward_flow_output_key: The key which should be used for storing forward optical flow values. :param backward_flow_output_file_prefix: The file prefix that should be used when writing backward flow to a file. :param backward_flow_output_key: The key which should be used for storing backward optical flow values. :param return_data: Whether to load and return generated data. Backwards compatibility to config-based pipeline. :return: dict of lists of raw renderer outputs. Keys can be 'forward_flow', 'backward_flow' """ if get_forward_flow is False and get_backward_flow is False: raise Exception( "Take the FlowRenderer Module out of the config if both forward and backward flow are set to False!" ) if output_dir is None: output_dir = Utility.get_temporary_directory() if temp_dir is None: temp_dir = Utility.get_temporary_directory() with Utility.UndoAfterExecution(): RendererUtility._render_init() RendererUtility.set_samples(1) RendererUtility.set_adaptive_sampling(0) RendererUtility.set_denoiser(None) RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0) FlowRendererUtility._output_vector_field(get_forward_flow, get_backward_flow, output_dir) # only need to render once; both fwd and bwd flow will be saved temporary_fwd_flow_file_path = os.path.join(temp_dir, 'fwd_flow_') temporary_bwd_flow_file_path = os.path.join(temp_dir, 'bwd_flow_') RendererUtility.render(temp_dir, "bwd_flow_", None, load_keys=set()) # After rendering: convert to optical flow or calculate hsv visualization, if desired for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): # temporarily save respective vector fields if get_forward_flow: file_path = temporary_fwd_flow_file_path + "%04d" % frame + ".exr" fwd_flow_field = load_image(file_path, num_channels=4).astype(np.float32) if not blender_image_coordinate_style: fwd_flow_field[:, :, 1] = fwd_flow_field[:, :, 1] * -1 fname = os.path.join( output_dir, forward_flow_output_file_prefix) + '%04d' % frame forward_flow = fwd_flow_field * -1 # invert forward flow to point at next frame np.save(fname + '.npy', forward_flow[:, :, :2]) if get_backward_flow: file_path = temporary_bwd_flow_file_path + "%04d" % frame + ".exr" bwd_flow_field = load_image(file_path, num_channels=4).astype(np.float32) if not blender_image_coordinate_style: bwd_flow_field[:, :, 1] = bwd_flow_field[:, :, 1] * -1 fname = os.path.join( output_dir, backward_flow_output_file_prefix) + '%04d' % frame np.save(fname + '.npy', bwd_flow_field[:, :, :2]) load_keys = set() # register desired outputs if get_forward_flow: Utility.register_output(output_dir, forward_flow_output_file_prefix, forward_flow_output_key, '.npy', '2.0.0') load_keys.add(forward_flow_output_key) if get_backward_flow: Utility.register_output(output_dir, backward_flow_output_file_prefix, backward_flow_output_key, '.npy', '2.0.0') load_keys.add(backward_flow_output_key) return WriterUtility.load_registered_outputs( load_keys) if return_data else {}
def render_segmap(output_dir: Optional[str] = None, temp_dir: Optional[str] = None, map_by: Union[str, List[str]] = "class", default_values: Optional[Dict[str, int]] = None, file_prefix: str = "segmap_", output_key: str = "segmap", segcolormap_output_file_prefix: str = "instance_attribute_map_", segcolormap_output_key: str = "segcolormap", use_alpha_channel: bool = False, render_colorspace_size_per_dimension: int = 2048) -> \ Dict[str, Union[np.ndarray, List[np.ndarray]]]: """ Renders segmentation maps for all frames :param output_dir: The directory to write images to. :param temp_dir: The directory to write intermediate data to. :param map_by: The attributes to be used for color mapping. :param default_values: The default values used for the keys used in attributes, if None is {"class": 0}. :param file_prefix: The prefix to use for writing the images. :param output_key: The key to use for registering the output. :param segcolormap_output_file_prefix: The prefix to use for writing the segmentation-color map csv. :param segcolormap_output_key: The key to use for registering the segmentation-color map output. :param use_alpha_channel: If true, the alpha channel stored in .png textures is used. :param render_colorspace_size_per_dimension: As we use float16 for storing the rendering, the interval of \ integers which can be precisely stored is [-2048, 2048]. As \ blender does not allow negative values for colors, we use \ [0, 2048] ** 3 as our color space which allows ~8 billion \ different colors/objects. This should be enough. :return: dict of lists of segmaps and (for instance segmentation) segcolormaps """ if output_dir is None: output_dir = Utility.get_temporary_directory() if temp_dir is None: temp_dir = Utility.get_temporary_directory() if default_values is None: default_values = {"class": 0} with Utility.UndoAfterExecution(): RendererUtility._render_init() RendererUtility.set_samples(1) RendererUtility.set_adaptive_sampling(0) RendererUtility.set_denoiser(None) RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0) attributes = map_by if 'class' in default_values: default_values['cp_category_id'] = default_values['class'] # Get objects with meshes (i.e. not lights or cameras) objs_with_mats = get_all_blender_mesh_objects() colors, num_splits_per_dimension, objects = _colorize_objects_for_instance_segmentation( objs_with_mats, use_alpha_channel, render_colorspace_size_per_dimension) bpy.context.scene.cycles.filter_width = 0.0 if use_alpha_channel: MaterialLoaderUtility.add_alpha_channel_to_textures( blurry_edges=False) # Determine path for temporary and for final output temporary_segmentation_file_path = os.path.join(temp_dir, "seg_") final_segmentation_file_path = os.path.join(output_dir, file_prefix) RendererUtility.set_output_format("OPEN_EXR", 16) RendererUtility.render(temp_dir, "seg_", None, return_data=False) # Find optimal dtype of output based on max index for dtype in [np.uint8, np.uint16, np.uint32]: optimal_dtype = dtype if np.iinfo(optimal_dtype).max >= len(colors) - 1: break if isinstance(attributes, str): # only one result is requested result_channels = 1 attributes = [attributes] elif isinstance(attributes, list): result_channels = len(attributes) else: raise Exception( "The type of this is not supported here: {}".format( attributes)) # define them for the avoid rendering case there_was_an_instance_rendering = False list_of_attributes: List[str] = [] # Check if stereo is enabled if bpy.context.scene.render.use_multiview: suffixes = ["_L", "_R"] else: suffixes = [""] return_dict: Dict[str, Union[np.ndarray, List[np.ndarray]]] = {} save_in_csv_attributes: Dict[int, Dict[str, Any]] = {} # After rendering for frame in range( bpy.context.scene.frame_start, bpy.context.scene.frame_end): # for each rendered frame save_in_csv_attributes: Dict[int, Dict[str, Any]] = {} for suffix in suffixes: file_path = temporary_segmentation_file_path + ( "%04d" % frame) + suffix + ".exr" segmentation = load_image(file_path) print(file_path, segmentation.shape) segmap = Utility.map_back_from_equally_spaced_equidistant_values( segmentation, num_splits_per_dimension, render_colorspace_size_per_dimension) segmap = segmap.astype(optimal_dtype) object_ids = np.unique(segmap) max_id = np.max(object_ids) if max_id >= len(objects): raise Exception( "There are more object colors than there are objects") combined_result_map = [] there_was_an_instance_rendering = False list_of_attributes = [] channels = [] for channel_id in range(result_channels): num_default_values = 0 resulting_map = np.zeros( (segmap.shape[0], segmap.shape[1]), dtype=optimal_dtype) was_used = False current_attribute = attributes[channel_id] org_attribute = current_attribute # if the class is used the category_id attribute is evaluated if current_attribute == "class": current_attribute = "cp_category_id" # in the instance case the resulting ids are directly used if current_attribute == "instance": there_was_an_instance_rendering = True resulting_map = segmap was_used = True else: if current_attribute != "cp_category_id": list_of_attributes.append(current_attribute) # for the current attribute remove cp_ and _csv, if present attribute = current_attribute if attribute.startswith("cp_"): attribute = attribute[len("cp_"):] # check if a default value was specified default_value_set = False if current_attribute in default_values or attribute in default_values: default_value_set = True if current_attribute in default_values: default_value = default_values[ current_attribute] elif attribute in default_values: default_value = default_values[attribute] # iterate over all object ids for object_id in object_ids: # Convert np.uint8 to int, such that the save_in_csv_attributes dict can later be serialized object_id = int(object_id) # get the corresponding object via the id current_obj = objects[object_id] # if the current obj has a attribute with that name -> get it if hasattr(current_obj, attribute): value = getattr(current_obj, attribute) # if the current object has a custom property with that name -> get it elif current_attribute.startswith( "cp_") and attribute in current_obj: value = current_obj[attribute] elif current_attribute.startswith("cf_"): if current_attribute == "cf_basename": value = current_obj.name if "." in value: value = value[:value.rfind(".")] elif default_value_set: # if none of the above applies use the default value value = default_value num_default_values += 1 else: # if the requested current_attribute is not a custom property or a attribute # or there is a default value stored # it throws an exception raise Exception( "The obj: {} does not have the " "attribute: {}, striped: {}. Maybe try a default " "value.".format(current_obj.name, current_attribute, attribute)) # save everything which is not instance also in the .csv if isinstance( value, (int, float, np.integer, np.floating)): was_used = True resulting_map[segmap == object_id] = value if object_id in save_in_csv_attributes: save_in_csv_attributes[object_id][ attribute] = value else: save_in_csv_attributes[object_id] = { attribute: value } if was_used and num_default_values < len(object_ids): channels.append(org_attribute) combined_result_map.append(resulting_map) return_dict.setdefault( "{}_segmaps{}".format(org_attribute, suffix), []).append(resulting_map) fname = final_segmentation_file_path + ("%04d" % frame) + suffix # combine all resulting images to one image resulting_map = np.stack(combined_result_map, axis=2) # remove the unneeded third dimension if resulting_map.shape[2] == 1: resulting_map = resulting_map[:, :, 0] # TODO: Remove unnecessary save when we give up backwards compatibility np.save(fname, resulting_map) if there_was_an_instance_rendering: mappings = [] for object_id, attribute_dict in save_in_csv_attributes.items( ): mappings.append({"idx": object_id, **attribute_dict}) return_dict.setdefault("instance_attribute_maps", []).append(mappings) # write color mappings to file # TODO: Remove unnecessary csv file when we give up backwards compatibility csv_file_path = os.path.join( output_dir, segcolormap_output_file_prefix + ("%04d.csv" % frame)) with open(csv_file_path, 'w', newline='') as csvfile: # get from the first element the used field names fieldnames = ["idx"] # get all used object element keys for object_element in save_in_csv_attributes.values(): fieldnames.extend(list(object_element.keys())) break for channel_name in channels: fieldnames.append("channel_{}".format(channel_name)) writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() # save for each object all values in one row for obj_idx, object_element in save_in_csv_attributes.items( ): object_element["idx"] = obj_idx for i, channel_name in enumerate(channels): object_element["channel_{}".format( channel_name)] = i writer.writerow(object_element) else: if len(list_of_attributes) > 0: raise Exception( "There were attributes specified in the may_by, which could not be saved as " "there was no \"instance\" may_by key used. This is true for this/these " "keys: {}".format(", ".join(list_of_attributes))) # if there was no instance rendering no .csv file is generated! # delete all saved infos about .csv save_in_csv_attributes = {} Utility.register_output(output_dir, file_prefix, output_key, ".npy", "2.0.0") if save_in_csv_attributes: Utility.register_output(output_dir, segcolormap_output_file_prefix, segcolormap_output_key, ".csv", "2.0.0") return return_dict