def load_output_file(file_path: str, write_alpha_channel: bool = False) -> np.ndarray: """ Tries to read in the file with the given path into a numpy array. :param file_path: The file path. Type: string. :param write_alpha_channel: Whether to load the alpha channel as well. Type: bool. Default: False :return: A numpy array containing the data of the file. """ if not os.path.exists(file_path): raise Exception("File not found: " + file_path) file_ending = file_path[file_path.rfind(".") + 1:].lower() if file_ending in ["exr", "png", "jpg"]: # num_channels is 4 if transparent_background is true in config return load_image(file_path, num_channels=3 + (1 if write_alpha_channel else 0)) elif file_ending in ["npy", "npz"]: return np.load(file_path) elif file_ending in ["csv"]: return WriterUtility._load_csv(file_path) else: raise NotImplementedError("File with ending " + file_ending + " cannot be loaded.")
def _load_and_postprocess(self, file_path, key): """ Loads an image and post process it. :param file_path: Image path. Type: string. :param key: The image's key with regards to the hdf5 file. Type: string. :return: The post-processed image that was loaded using the file path. """ data = load_image(Utility.resolve_path(file_path)) data = self._apply_postprocessing(key, data) print("Key: " + key + " - shape: " + str(data.shape) + " - dtype: " + str(data.dtype) + " - path: " + file_path) return data
def _load_file(self, file_path): """ Tries to read in the file with the given path into a numpy array. :param file_path: The file path. Type: string. :return: A numpy array containing the data of the file. """ if not os.path.exists(file_path): raise Exception("File not found: " + file_path) file_ending = file_path[file_path.rfind(".") + 1:].lower() if file_ending in ["exr", "png", "jpg"]: #num_channels is 4 if transparent_background is true in config return load_image(file_path, num_channels = 3 + self.config.get_bool("write_alpha_channel", False)) elif file_ending in ["npy", "npz"]: return self._load_npy(file_path) elif file_ending in ["csv"]: return self._load_csv(file_path) else: raise NotImplementedError("File with ending " + file_ending + " cannot be loaded.")
def _load_file(self, file_path): """ Tries to read in the file with the given path into a numpy array. :param file_path: The file path. Type: string. :return: A numpy array containing the data of the file. """ if not os.path.exists(file_path): raise Exception("File not found: " + file_path) file_ending = file_path[file_path.rfind(".") + 1:].lower() if file_ending in ["exr", "png", "jpg"]: return load_image(file_path) elif file_ending in ["npy", "npz"]: return self._load_npy(file_path) elif file_ending in ["csv"]: return self._load_csv(file_path) else: raise NotImplementedError("File with ending " + file_ending + " cannot be loaded.")
def run(self): """ Does the stereo global matching in the following steps: 1. Collect camera object and its state, 2. For each frame, load left and right images and call the `sgm()` methode. 3. Write the results to a numpy file. """ if self._avoid_output: print("Avoid output is on, no output produced!") return if GlobalStorage.is_in_storage("renderer_distance_end"): self.depth_max = GlobalStorage.get("renderer_distance_end") else: raise RuntimeError( "A distance rendering has to be executed before this module is executed, " "else the `renderer_distance_end` is not set!") self.rgb_output_path = Utility.find_registered_output_by_key( self.rgb_output_key)["path"] # Collect camera and camera object cam_ob = bpy.context.scene.camera cam = cam_ob.data self.width = bpy.context.scene.render.resolution_x self.height = bpy.context.scene.render.resolution_y print('Resolution: {}, {}'.format(self.width, self.height)) self.baseline = cam.stereo.interocular_distance if not self.baseline: raise Exception( "Stereo parameters are not set. Make sure to enable RGB stereo rendering before this module." ) if self.config.get_bool("infer_focal_length_from_fov", False): fov = cam.angle_x if cam.angle_x else cam.angle if not fov: raise Exception("Could not obtain field of view angle") self.focal_length = float( (1.0 / tan(fov / 2.0)) * (float(self.width) / 2.0)) else: self.focal_length = self.config.get_float("focal_length", 0.0) if self.focal_length == 0.0: raise Exception( "Focal length set to 0. This is either intentional or because no value was set by the user. Either way, this needs to be corrected by setting a value > 0 or enabling 'infer_focal_length_from_fov'." ) for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): path_split = self.rgb_output_path.split(".") path_l = "{}_L.{}".format(path_split[0], path_split[1]) path_r = "{}_R.{}".format(path_split[0], path_split[1]) imgL = load_image(path_l % frame) imgR = load_image(path_r % frame) depth, disparity = self.sgm(imgL, imgR) np.save( os.path.join(self.output_dir, "stereo-depth_%04d") % frame, depth) if self.config.get_bool("output_disparity", False): np.save( os.path.join(self.output_dir, "disparity_%04d") % frame, disparity) Utility.register_output(self._determine_output_dir(), "stereo-depth_", "stereo-depth", ".npy", "1.0.0") if self.config.get_bool("output_disparity", False): Utility.register_output(self._determine_output_dir(), "disparity_", "disparity", ".npy", "1.0.0")
def run(self): # determine whether to get optical flow or scene flow - get scene flow per default get_forward_flow = self.config.get_bool('forward_flow', False) get_backward_flow = self.config.get_bool('backward_flow', False) if get_forward_flow is False and get_backward_flow is False: raise Exception( "Take the FlowRenderer Module out of the config if both forward and backward flow are set to False!" ) with Utility.UndoAfterExecution(): self._configure_renderer( default_samples=self.config.get_int("samples", 1)) self._output_vector_field() # only need to render once; both fwd and bwd flow will be saved temporary_fwd_flow_file_path = os.path.join( self._temp_dir, 'fwd_flow_') temporary_bwd_flow_file_path = os.path.join( self._temp_dir, 'bwd_flow_') self._render("bwd_flow_", custom_file_path=temporary_bwd_flow_file_path) # After rendering: convert to optical flow or calculate hsv visualization, if desired if not self._avoid_rendering: for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): # temporarily save respective vector fields if get_forward_flow: file_path = temporary_fwd_flow_file_path + "%04d" % frame + ".exr" fwd_flow_field = load_image( file_path, num_channels=4).astype(np.float32) if not self.config.get_bool( 'blender_image_coordinate_style', False): fwd_flow_field[:, :, 1] = fwd_flow_field[:, :, 1] * -1 fname = os.path.join( self._determine_output_dir(), self.config.get_string( 'forward_flow_output_file_prefix', 'forward_flow_')) + '%04d' % frame forward_flow = fwd_flow_field * -1 # invert forward flow to point at next frame np.save(fname + '.npy', forward_flow[:, :, :2]) if get_backward_flow: file_path = temporary_bwd_flow_file_path + "%04d" % frame + ".exr" bwd_flow_field = load_image( file_path, num_channels=4).astype(np.float32) if not self.config.get_bool( 'blender_image_coordinate_style', False): bwd_flow_field[:, :, 1] = bwd_flow_field[:, :, 1] * -1 fname = os.path.join( self._determine_output_dir(), self.config.get_string( 'backward_flow_output_file_prefix', 'backward_flow_')) + '%04d' % frame np.save(fname + '.npy', bwd_flow_field[:, :, :2]) # register desired outputs if get_forward_flow: self._register_output(default_prefix=self.config.get_string( 'forward_flow_output_file_prefix', 'forward_flow_'), default_key=self.config.get_string( "forward_flow_output_key", "forward_flow"), suffix='.npy', version='2.0.0') if get_backward_flow: self._register_output(default_prefix=self.config.get_string( 'backward_flow_output_file_prefix', 'backward_flow_'), default_key=self.config.get_string( "backward_flow_output_key", "backward_flow"), suffix='.npy', version='2.0.0')
def render(output_dir, temp_dir, get_forward_flow, get_backward_flow, blender_image_coordinate_style=False, forward_flow_output_file_prefix="forward_flow_", forward_flow_output_key="forward_flow", backward_flow_output_file_prefix="backward_flow_", backward_flow_output_key="backward_flow"): """ Renders the optical flow (forward and backward) for all frames. :param output_dir: The directory to write images to. :param temp_dir: The directory to write intermediate data to. :param get_forward_flow: Whether to render forward optical flow. :param get_backward_flow: Whether to render backward optical flow. :param blender_image_coordinate_style: Whether to specify the image coordinate system at the bottom left (blender default; True) or top left (standard convention; False). :param forward_flow_output_file_prefix: The file prefix that should be used when writing forward flow to a file. :param forward_flow_output_key: The key which should be used for storing forward optical flow values. :param backward_flow_output_file_prefix: The file prefix that should be used when writing backward flow to a file. :param backward_flow_output_key: The key which should be used for storing backward optical flow values. """ if get_forward_flow is False and get_backward_flow is False: raise Exception( "Take the FlowRenderer Module out of the config if both forward and backward flow are set to False!" ) with Utility.UndoAfterExecution(): RendererUtility.init() RendererUtility.set_samples(1) RendererUtility.set_adaptive_sampling(0) RendererUtility.set_denoiser(None) RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0) FlowRendererUtility._output_vector_field(get_forward_flow, get_backward_flow, output_dir) # only need to render once; both fwd and bwd flow will be saved temporary_fwd_flow_file_path = os.path.join(temp_dir, 'fwd_flow_') temporary_bwd_flow_file_path = os.path.join(temp_dir, 'bwd_flow_') RendererUtility.render(temp_dir, "bwd_flow_", None) # After rendering: convert to optical flow or calculate hsv visualization, if desired for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): # temporarily save respective vector fields if get_forward_flow: file_path = temporary_fwd_flow_file_path + "%04d" % frame + ".exr" fwd_flow_field = load_image( file_path, num_channels=4).astype(np.float32) if not blender_image_coordinate_style: fwd_flow_field[:, :, 1] = fwd_flow_field[:, :, 1] * -1 fname = os.path.join( output_dir, forward_flow_output_file_prefix) + '%04d' % frame forward_flow = fwd_flow_field * -1 # invert forward flow to point at next frame np.save(fname + '.npy', forward_flow[:, :, :2]) if get_backward_flow: file_path = temporary_bwd_flow_file_path + "%04d" % frame + ".exr" bwd_flow_field = load_image( file_path, num_channels=4).astype(np.float32) if not blender_image_coordinate_style: bwd_flow_field[:, :, 1] = bwd_flow_field[:, :, 1] * -1 fname = os.path.join( output_dir, backward_flow_output_file_prefix) + '%04d' % frame np.save(fname + '.npy', bwd_flow_field[:, :, :2]) # register desired outputs if get_forward_flow: Utility.register_output(output_dir, forward_flow_output_file_prefix, forward_flow_output_key, '.npy', '2.0.0') if get_backward_flow: Utility.register_output(output_dir, backward_flow_output_file_prefix, backward_flow_output_key, '.npy', '2.0.0')
def run(self): with Utility.UndoAfterExecution(): self._configure_renderer(default_samples=1) # Get objects with meshes (i.e. not lights or cameras) objs_with_mats = get_all_mesh_objects() colors, num_splits_per_dimension, used_objects = self._colorize_objects_for_instance_segmentation( objs_with_mats) bpy.context.scene.render.image_settings.file_format = "OPEN_EXR" bpy.context.scene.render.image_settings.color_depth = "16" bpy.context.view_layer.cycles.use_denoising = False bpy.context.scene.cycles.filter_width = 0.0 if self._use_alpha_channel: self.add_alpha_channel_to_textures(blurry_edges=False) # Determine path for temporary and for final output temporary_segmentation_file_path = os.path.join(self._temp_dir, "seg_") final_segmentation_file_path = os.path.join(self._determine_output_dir(), self.config.get_string("output_file_prefix", "segmap_")) # Render the temporary output self._render("seg_", custom_file_path=temporary_segmentation_file_path) # Find optimal dtype of output based on max index for dtype in [np.uint8, np.uint16, np.uint32]: optimal_dtype = dtype if np.iinfo(optimal_dtype).max >= len(colors) - 1: break # get the type of mappings which should be performed used_attributes = self.config.get_raw_dict("map_by", "class") used_default_values = self.config.get_raw_dict("default_values", {}) if 'class' in used_default_values: used_default_values['cp_category_id'] = used_default_values['class'] if isinstance(used_attributes, str): # only one result is requested result_channels = 1 used_attributes = [used_attributes] elif isinstance(used_attributes, list): result_channels = len(used_attributes) else: raise Exception("The type of this is not supported here: {}".format(used_attributes)) save_in_csv_attributes = {} # define them for the avoid rendering case there_was_an_instance_rendering = False list_of_used_attributes = [] # After rendering if not self._avoid_rendering: for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): # for each rendered frame file_path = temporary_segmentation_file_path + "%04d" % frame + ".exr" segmentation = load_image(file_path) segmap = Utility.map_back_from_equally_spaced_equidistant_values(segmentation, num_splits_per_dimension, self.render_colorspace_size_per_dimension) segmap = segmap.astype(optimal_dtype) used_object_ids = np.unique(segmap) max_id = np.max(used_object_ids) if max_id >= len(used_objects): raise Exception("There are more object colors than there are objects") combined_result_map = [] there_was_an_instance_rendering = False list_of_used_attributes = [] used_channels = [] for channel_id in range(result_channels): resulting_map = np.empty((segmap.shape[0], segmap.shape[1])) was_used = False current_attribute = used_attributes[channel_id] org_attribute = current_attribute # if the class is used the category_id attribute is evaluated if current_attribute == "class": current_attribute = "cp_category_id" # in the instance case the resulting ids are directly used if current_attribute == "instance": there_was_an_instance_rendering = True resulting_map = segmap was_used = True # a non default value was also used non_default_value_was_used = True else: if current_attribute != "cp_category_id": list_of_used_attributes.append(current_attribute) # for the current attribute remove cp_ and _csv, if present used_attribute = current_attribute if used_attribute.startswith("cp_"): used_attribute = used_attribute[len("cp_"):] # check if a default value was specified default_value_set = False if current_attribute in used_default_values or used_attribute in used_default_values: default_value_set = True if current_attribute in used_default_values: default_value = used_default_values[current_attribute] elif used_attribute in used_default_values: default_value = used_default_values[used_attribute] last_state_save_in_csv = None # this avoids that for certain attributes only the default value is written non_default_value_was_used = False # iterate over all object ids for object_id in used_object_ids: is_default_value = False # get the corresponding object via the id current_obj = used_objects[object_id] # if the current obj has a attribute with that name -> get it if hasattr(current_obj, used_attribute): used_value = getattr(current_obj, used_attribute) # if the current object has a custom property with that name -> get it elif current_attribute.startswith("cp_") and used_attribute in current_obj: used_value = current_obj[used_attribute] elif current_attribute.startswith("cf_"): if current_attribute == "cf_basename": used_value = current_obj.name if "." in used_value: used_value = used_value[:used_value.rfind(".")] elif default_value_set: # if none of the above applies use the default value used_value = default_value is_default_value = True else: # if the requested current_attribute is not a custom property or a attribute # or there is a default value stored # it throws an exception raise Exception("The obj: {} does not have the " "attribute: {}, striped: {}. Maybe try a default " "value.".format(current_obj.name, current_attribute, used_attribute)) # check if the value should be saved as an image or in the csv file save_in_csv = False try: resulting_map[segmap == object_id] = used_value was_used = True if not is_default_value: non_default_value_was_used = True # save everything which is not instance also in the .csv if current_attribute != "instance": save_in_csv = True except ValueError: save_in_csv = True if last_state_save_in_csv is not None and last_state_save_in_csv != save_in_csv: raise Exception("During creating the mapping, the saving to an image or a csv file " "switched, this might indicated that the used default value, does " "not have the same type as the returned value, " "for: {}".format(current_attribute)) last_state_save_in_csv = save_in_csv if save_in_csv: if object_id in save_in_csv_attributes: save_in_csv_attributes[object_id][used_attribute] = used_value else: save_in_csv_attributes[object_id] = {used_attribute: used_value} if was_used and non_default_value_was_used: used_channels.append(org_attribute) combined_result_map.append(resulting_map) fname = final_segmentation_file_path + "%04d" % frame # combine all resulting images to one image resulting_map = np.stack(combined_result_map, axis=2) # remove the unneeded third dimension if resulting_map.shape[2] == 1: resulting_map = resulting_map[:, :, 0] np.save(fname, resulting_map) if not there_was_an_instance_rendering: if len(list_of_used_attributes) > 0: raise Exception("There were attributes specified in the may_by, which could not be saved as " "there was no \"instance\" may_by key used. This is true for this/these " "keys: {}".format(", ".join(list_of_used_attributes))) # if there was no instance rendering no .csv file is generated! # delete all saved infos about .csv save_in_csv_attributes = {} # write color mappings to file if save_in_csv_attributes and not self._avoid_rendering: csv_file_path = os.path.join(self._determine_output_dir(), self.config.get_string("segcolormap_output_file_prefix", "class_inst_col_map") + ".csv") with open(csv_file_path, 'w', newline='') as csvfile: # get from the first element the used field names fieldnames = ["idx"] # get all used object element keys for object_element in save_in_csv_attributes.values(): fieldnames.extend(list(object_element.keys())) break for channel_name in used_channels: fieldnames.append("channel_{}".format(channel_name)) writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() # save for each object all values in one row for obj_idx, object_element in save_in_csv_attributes.items(): object_element["idx"] = obj_idx for i, channel_name in enumerate(used_channels): object_element["channel_{}".format(channel_name)] = i writer.writerow(object_element) self._register_output("segmap_", "segmap", ".npy", "2.0.0") if save_in_csv_attributes: self._register_output("class_inst_col_map", "segcolormap", ".csv", "2.0.0", unique_for_camposes=False, output_key_parameter_name="segcolormap_output_key", output_file_prefix_parameter_name="segcolormap_output_file_prefix")
def render(output_dir, temp_dir, used_attributes, used_default_values={}, file_prefix="segmap_", output_key="segmap", segcolormap_output_file_prefix="class_inst_col_map", segcolormap_output_key="segcolormap", use_alpha_channel=False, render_colorspace_size_per_dimension=2048): """ Renders segmentation maps for all frames. :param output_dir: The directory to write images to. :param temp_dir: The directory to write intermediate data to. :param used_attributes: The attributes to be used for color mapping. :param used_default_values: The default values used for the keys used in used_attributes. :param file_prefix: The prefix to use for writing the images. :param output_key: The key to use for registering the output. :param segcolormap_output_file_prefix: The prefix to use for writing the segmation-color map csv. :param segcolormap_output_key: The key to use for registering the segmation-color map output. :param use_alpha_channel: If true, the alpha channel stored in .png textures is used. :param render_colorspace_size_per_dimension: As we use float16 for storing the rendering, the interval of integers which can be precisely stored is [-2048, 2048]. As blender does not allow negative values for colors, we use [0, 2048] ** 3 as our color space which allows ~8 billion different colors/objects. This should be enough. """ with Utility.UndoAfterExecution(): RendererUtility.init() RendererUtility.set_samples(1) RendererUtility.set_adaptive_sampling(0) RendererUtility.set_denoiser(None) RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0) # Get objects with meshes (i.e. not lights or cameras) objs_with_mats = get_all_blender_mesh_objects() colors, num_splits_per_dimension, used_objects = SegMapRendererUtility._colorize_objects_for_instance_segmentation( objs_with_mats, use_alpha_channel, render_colorspace_size_per_dimension) bpy.context.scene.cycles.filter_width = 0.0 if use_alpha_channel: MaterialLoaderUtility.add_alpha_channel_to_textures( blurry_edges=False) # Determine path for temporary and for final output temporary_segmentation_file_path = os.path.join(temp_dir, "seg_") final_segmentation_file_path = os.path.join( output_dir, file_prefix) RendererUtility.set_output_format("OPEN_EXR", 16) RendererUtility.render(temp_dir, "seg_", None) # Find optimal dtype of output based on max index for dtype in [np.uint8, np.uint16, np.uint32]: optimal_dtype = dtype if np.iinfo(optimal_dtype).max >= len(colors) - 1: break if 'class' in used_default_values: used_default_values['cp_category_id'] = used_default_values[ 'class'] if isinstance(used_attributes, str): # only one result is requested result_channels = 1 used_attributes = [used_attributes] elif isinstance(used_attributes, list): result_channels = len(used_attributes) else: raise Exception( "The type of this is not supported here: {}".format( used_attributes)) save_in_csv_attributes = {} # define them for the avoid rendering case there_was_an_instance_rendering = False list_of_used_attributes = [] # Check if stereo is enabled if bpy.context.scene.render.use_multiview: suffixes = ["_L", "_R"] else: suffixes = [""] # After rendering for frame in range( bpy.context.scene.frame_start, bpy.context.scene.frame_end): # for each rendered frame for suffix in suffixes: file_path = temporary_segmentation_file_path + ( "%04d" % frame) + suffix + ".exr" segmentation = load_image(file_path) print(file_path, segmentation.shape) segmap = Utility.map_back_from_equally_spaced_equidistant_values( segmentation, num_splits_per_dimension, render_colorspace_size_per_dimension) segmap = segmap.astype(optimal_dtype) used_object_ids = np.unique(segmap) max_id = np.max(used_object_ids) if max_id >= len(used_objects): raise Exception( "There are more object colors than there are objects" ) combined_result_map = [] there_was_an_instance_rendering = False list_of_used_attributes = [] used_channels = [] for channel_id in range(result_channels): resulting_map = np.empty( (segmap.shape[0], segmap.shape[1])) was_used = False current_attribute = used_attributes[channel_id] org_attribute = current_attribute # if the class is used the category_id attribute is evaluated if current_attribute == "class": current_attribute = "cp_category_id" # in the instance case the resulting ids are directly used if current_attribute == "instance": there_was_an_instance_rendering = True resulting_map = segmap was_used = True # a non default value was also used non_default_value_was_used = True else: if current_attribute != "cp_category_id": list_of_used_attributes.append( current_attribute) # for the current attribute remove cp_ and _csv, if present used_attribute = current_attribute if used_attribute.startswith("cp_"): used_attribute = used_attribute[len("cp_"):] # check if a default value was specified default_value_set = False if current_attribute in used_default_values or used_attribute in used_default_values: default_value_set = True if current_attribute in used_default_values: default_value = used_default_values[ current_attribute] elif used_attribute in used_default_values: default_value = used_default_values[ used_attribute] last_state_save_in_csv = None # this avoids that for certain attributes only the default value is written non_default_value_was_used = False # iterate over all object ids for object_id in used_object_ids: is_default_value = False # get the corresponding object via the id current_obj = used_objects[object_id] # if the current obj has a attribute with that name -> get it if hasattr(current_obj, used_attribute): used_value = getattr( current_obj, used_attribute) # if the current object has a custom property with that name -> get it elif current_attribute.startswith( "cp_" ) and used_attribute in current_obj: used_value = current_obj[used_attribute] elif current_attribute.startswith("cf_"): if current_attribute == "cf_basename": used_value = current_obj.name if "." in used_value: used_value = used_value[:used_value .rfind("." )] elif default_value_set: # if none of the above applies use the default value used_value = default_value is_default_value = True else: # if the requested current_attribute is not a custom property or a attribute # or there is a default value stored # it throws an exception raise Exception( "The obj: {} does not have the " "attribute: {}, striped: {}. Maybe try a default " "value.".format( current_obj.name, current_attribute, used_attribute)) # check if the value should be saved as an image or in the csv file save_in_csv = False try: resulting_map[segmap == object_id] = used_value was_used = True if not is_default_value: non_default_value_was_used = True # save everything which is not instance also in the .csv if current_attribute != "instance": save_in_csv = True except ValueError: save_in_csv = True if last_state_save_in_csv is not None and last_state_save_in_csv != save_in_csv: raise Exception( "During creating the mapping, the saving to an image or a csv file " "switched, this might indicated that the used default value, does " "not have the same type as the returned value, " "for: {}".format(current_attribute)) last_state_save_in_csv = save_in_csv if save_in_csv: if object_id in save_in_csv_attributes: save_in_csv_attributes[object_id][ used_attribute] = used_value else: save_in_csv_attributes[object_id] = { used_attribute: used_value } if was_used and non_default_value_was_used: used_channels.append(org_attribute) combined_result_map.append(resulting_map) fname = final_segmentation_file_path + ("%04d" % frame) + suffix # combine all resulting images to one image resulting_map = np.stack(combined_result_map, axis=2) # remove the unneeded third dimension if resulting_map.shape[2] == 1: resulting_map = resulting_map[:, :, 0] np.save(fname, resulting_map) if not there_was_an_instance_rendering: if len(list_of_used_attributes) > 0: raise Exception( "There were attributes specified in the may_by, which could not be saved as " "there was no \"instance\" may_by key used. This is true for this/these " "keys: {}".format(", ".join(list_of_used_attributes))) # if there was no instance rendering no .csv file is generated! # delete all saved infos about .csv save_in_csv_attributes = {} # write color mappings to file if save_in_csv_attributes: csv_file_path = os.path.join( output_dir, segcolormap_output_file_prefix + ".csv") with open(csv_file_path, 'w', newline='') as csvfile: # get from the first element the used field names fieldnames = ["idx"] # get all used object element keys for object_element in save_in_csv_attributes.values(): fieldnames.extend(list(object_element.keys())) break for channel_name in used_channels: fieldnames.append("channel_{}".format(channel_name)) writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() # save for each object all values in one row for obj_idx, object_element in save_in_csv_attributes.items( ): object_element["idx"] = obj_idx for i, channel_name in enumerate(used_channels): object_element["channel_{}".format( channel_name)] = i writer.writerow(object_element) Utility.register_output(output_dir, file_prefix, output_key, ".npy", "2.0.0") if save_in_csv_attributes: Utility.register_output(output_dir, segcolormap_output_file_prefix, segcolormap_output_key, ".csv", "2.0.0", unique_for_camposes=False)
def run(self): with Utility.UndoAfterExecution(): self._configure_renderer(default_samples=1) # get current method for color mapping, instance or class method = self.config.get_string("map_by", "class") # Get objects with materials (i.e. not lights or cameras) objs_with_mats = [ obj for obj in bpy.context.scene.objects if hasattr(obj.data, 'materials') ] if method.lower() == "class": colors, num_splits_per_dimension, color_map = self._colorize_objects_for_semantic_segmentation( objs_with_mats) elif method.lower() == "instance": colors, num_splits_per_dimension, color_map = self._colorize_objects_for_instance_segmentation( objs_with_mats) else: raise Exception( "Invalid mapping method: {}, possible for map_by are: class, instance" .format(method)) bpy.context.scene.render.image_settings.file_format = "OPEN_EXR" bpy.context.scene.render.image_settings.color_depth = "16" bpy.context.view_layer.cycles.use_denoising = False bpy.context.scene.cycles.filter_width = 0.0 if self._use_alpha_channel: self.add_alpha_channel_to_textures(blurry_edges=False) # Determine path for temporary and for final output temporary_segmentation_file_path = os.path.join( self._temp_dir, "seg_") final_segmentation_file_path = os.path.join( self._determine_output_dir(), self.config.get_string("output_file_prefix", "segmap_")) # Render the temporary output self._render("seg_", custom_file_path=temporary_segmentation_file_path) # Find optimal dtype of output based on max index for dtype in [np.uint8, np.uint16, np.uint32]: optimal_dtype = dtype if np.iinfo(optimal_dtype).max >= len(colors) - 1: break # After rendering for frame in range( bpy.context.scene.frame_start, bpy.context.scene.frame_end): # for each rendered frame file_path = temporary_segmentation_file_path + "%04d" % frame + ".exr" segmentation = load_image(file_path) segmap = Utility.map_back_from_equally_spaced_equidistant_values( segmentation, num_splits_per_dimension, self.render_colorspace_size_per_dimension) segmap = segmap.astype(optimal_dtype) fname = final_segmentation_file_path + "%04d" % frame np.save(fname, segmap) # write color mappings to file if color_map is not None: with open(os.path.join( self._determine_output_dir(), self.config.get_string( "segcolormap_output_file_prefix", "class_inst_col_map") + ".csv"), 'w', newline='') as csvfile: fieldnames = list(color_map[0].keys()) writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for mapping in color_map: writer.writerow(mapping) self._register_output("segmap_", "segmap", ".npy", "1.0.0") if color_map is not None: self._register_output( "class_inst_col_map", "segcolormap", ".csv", "1.0.0", unique_for_camposes=False, output_key_parameter_name="segcolormap_output_key", output_file_prefix_parameter_name= "segcolormap_output_file_prefix")
def _write_frames(self): """ Writes images, GT annotations and camera info. """ # Paths to the already existing chunk folders (such folders may exist # when appending to an existing dataset). chunk_dirs = sorted(glob.glob(os.path.join(self.chunks_dir, '*'))) chunk_dirs = [d for d in chunk_dirs if os.path.isdir(d)] # Get ID's of the last already existing chunk and frame. curr_chunk_id = 0 curr_frame_id = 0 if len(chunk_dirs): last_chunk_dir = sorted(chunk_dirs)[-1] last_chunk_gt_fpath = os.path.join(last_chunk_dir, 'scene_gt.json') chunk_gt = load_json(last_chunk_gt_fpath, keys_to_int=True) # Last chunk and frame ID's. last_chunk_id = int(os.path.basename(last_chunk_dir)) last_frame_id = int(sorted(chunk_gt.keys())[-1]) # Current chunk and frame ID's. curr_chunk_id = last_chunk_id curr_frame_id = last_frame_id + 1 if curr_frame_id % self.frames_per_chunk == 0: curr_chunk_id += 1 curr_frame_id = 0 # Initialize structures for the GT annotations and camera info. chunk_gt = {} chunk_camera = {} if curr_frame_id != 0: # Load GT and camera info of the chunk we are appending to. chunk_gt = load_json( self.chunk_gt_tpath.format(chunk_id=curr_chunk_id), keys_to_int=True) chunk_camera = load_json( self.chunk_camera_tpath.format(chunk_id=curr_chunk_id), keys_to_int=True) # Go through all frames. num_new_frames = bpy.context.scene.frame_end - bpy.context.scene.frame_start for frame_id in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): # Activate frame. bpy.context.scene.frame_set(frame_id) # Reset data structures and prepare folders for a new chunk. if curr_frame_id == 0: chunk_gt = {} chunk_camera = {} os.makedirs( os.path.dirname( self.rgb_tpath.format(chunk_id=curr_chunk_id, im_id=0))) os.makedirs( os.path.dirname( self.depth_tpath.format(chunk_id=curr_chunk_id, im_id=0))) # Get GT annotations and camera info for the current frame. chunk_gt[curr_frame_id] = self._get_frame_gt() chunk_camera[curr_frame_id] = self._get_frame_camera() # Copy the resulting RGB image. rgb_output = self._find_registered_output_by_key("colors") if rgb_output is None: raise Exception("RGB image has not been rendered.") rgb_fpath = self.rgb_tpath.format(chunk_id=curr_chunk_id, im_id=curr_frame_id) shutil.copyfile(rgb_output['path'] % frame_id, rgb_fpath) # Load the resulting depth image. depth_output = self._find_registered_output_by_key("depth") if depth_output is None: raise Exception("Depth image has not been rendered.") depth = load_image(depth_output['path'] % frame_id, num_channels=1) depth = depth.squeeze(axis=2) # Scale the depth to retain a higher precision (the depth is saved # as a 16-bit PNG image with range 0-65535). depth_mm = 1000.0 * depth # [m] -> [mm] depth_mm_scaled = depth_mm / float(self.depth_scale) # Save the scaled depth image. depth_fpath = self.depth_tpath.format(chunk_id=curr_chunk_id, im_id=curr_frame_id) save_depth(depth_fpath, depth_mm_scaled) # Save the chunk info if we are at the end of a chunk or at the last new frame. if ((curr_frame_id + 1) % self.frames_per_chunk == 0) or\ (frame_id == num_new_frames - 1): # Save GT annotations. save_json(self.chunk_gt_tpath.format(chunk_id=curr_chunk_id), chunk_gt) # Save camera info. save_json( self.chunk_camera_tpath.format(chunk_id=curr_chunk_id), chunk_camera) # Update ID's. curr_chunk_id += 1 curr_frame_id = 0 else: curr_frame_id += 1 return
def run(self): if self._avoid_rendering: print("Avoid rendering is on, no output produced!") return self.rgb_output_path = self._find_registered_output_by_key( self.rgb_output_key)["path"] # Collect camera and camera object cam_ob = bpy.context.scene.camera cam = cam_ob.data if not 'loaded_resolution' in cam: self.width = self.config.get_int("resolution_x", 512) self.height = self.config.get_int("resolution_y", 512) bpy.context.scene.render.pixel_aspect_x = self.config.get_float( "pixel_aspect_x", 1) elif 'loaded_resolution' in cam: self.width, self.height = cam['loaded_resolution'] else: raise Exception("Resolution missing in stereo global matching!") print('Resolution: {}, {}'.format( bpy.context.scene.render.resolution_x, bpy.context.scene.render.resolution_y)) self.baseline = cam.stereo.interocular_distance if not self.baseline: raise Exception( "Stereo parameters are not set. Make sure to enable RGB stereo rendering before this module." ) if self.config.get_bool("infer_focal_length_from_fov", False): fov = cam.angle_x if cam.angle_x else cam.angle if not fov: raise Exception("Could not obtain field of view angle") self.focal_length = float( (1.0 / tan(fov / 2.0)) * (float(self.width) / 2.0)) else: self.focal_length = self.config.get_float("focal_length", 0.0) if self.focal_length == 0.0: raise Exception( "Focal length set to 0. This is either intentional or because no value was set by the user. Either way, this needs to be corrected by setting a value > 0 or enabling 'infer_focal_length_from_fov'." ) for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): path_split = self.rgb_output_path.split(".") path_l = "{}_L.{}".format(path_split[0], path_split[1]) path_r = "{}_R.{}".format(path_split[0], path_split[1]) imgL = load_image(path_l % frame) imgR = load_image(path_r % frame) depth, disparity = self.sgm(imgL, imgR) np.save( os.path.join(self.output_dir, "stereo-depth_%04d") % frame, depth) if self.config.get_bool("output_disparity", False): np.save( os.path.join(self.output_dir, "disparity_%04d") % frame, disparity) self._register_output("stereo-depth_", "stereo-depth", ".npy", "1.0.0") if self.config.get_bool("output_disparity", False): self._register_output("disparity_", "disparity", ".npy", "1.0.0")