def run(self): # if the rendering is not performed -> it is probably the debug case. do_undo = not self._avoid_rendering with Utility.UndoAfterExecution(perform_undo_op=do_undo): self._configure_renderer(use_denoiser=True, default_denoiser="Intel") # In case a previous renderer changed these settings bpy.context.scene.render.image_settings.color_mode = "RGB" bpy.context.scene.render.image_settings.file_format = self._image_type bpy.context.scene.render.image_settings.color_depth = "8" # only influences jpg quality bpy.context.scene.render.image_settings.quality = 95 # check if texture less render mode is active if self._texture_less_mode: self.change_to_texture_less_render() if self._use_alpha_channel: self.add_alpha_channel_to_textures(blurry_edges=True) self._render("rgb_") if self._image_type == 'PNG': self._register_output("rgb_", "colors", ".png", "1.0.0") elif self._image_type == 'JPEG': self._register_output("rgb_", "colors", ".jpg", "1.0.0") else: raise Exception("Unknown Image Type " + self._image_type)
def run(self): with Utility.UndoAfterExecution(): self._configure_renderer() new_mat = self._create_normal_material() # render normals bpy.context.scene.cycles.samples = 1 # this gives the best result for emission shader bpy.context.view_layer.cycles.use_denoising = False for obj in bpy.context.scene.objects: if len(obj.material_slots) > 0: for i in range(len(obj.material_slots)): if self._use_alpha_channel: obj.data.materials[ i] = self.add_alpha_texture_node( obj.material_slots[i].material, new_mat) else: obj.data.materials[i] = new_mat elif hasattr(obj.data, 'materials'): obj.data.materials.append(new_mat) # Set the color channel depth of the output to 32bit bpy.context.scene.render.image_settings.file_format = "OPEN_EXR" bpy.context.scene.render.image_settings.color_depth = "32" if self._use_alpha_channel: self.add_alpha_channel_to_textures(blurry_edges=False) self._render("normal_") self._register_output("normal_", "normals", ".exr", "2.0.0")
def run(self): # if the rendering is not performed -> it is probably the debug case. do_undo = not self._avoid_output with Utility.UndoAfterExecution(perform_undo_op=do_undo): self._configure_renderer(use_denoiser=True, default_denoiser="Intel") # check if texture less render mode is active if self._texture_less_mode: MaterialLoaderUtility.change_to_texture_less_render( self._use_alpha_channel) if self._use_alpha_channel: MaterialLoaderUtility.add_alpha_channel_to_textures( blurry_edges=True) # motion blur if self._use_motion_blur: RendererUtility.enable_motion_blur( self._motion_blur_length, 'TOP' if self._use_rolling_shutter else "NONE", self._rolling_shutter_length) self._render("rgb_", "colors", enable_transparency=self.config.get_bool( "transparent_background", False), file_format=self._image_type)
def run(self): # get the type of mappings which should be performed used_attributes = self.config.get_raw_dict("map_by", "class") used_default_values = self.config.get_raw_dict("default_values", {}) if 'class' in used_default_values: used_default_values['cp_category_id'] = used_default_values[ 'class'] with Utility.UndoAfterExecution(): self._configure_renderer(default_samples=1) if not self._avoid_output: SegMapRendererUtility.render( self._determine_output_dir(), self._temp_dir, used_attributes, used_default_values, self.config.get_string("output_file_prefix", "segmap_"), self.config.get_string("output_key", "segmap"), self.config.get_string("segcolormap_output_file_prefix", "class_inst_col_map"), self.config.get_string("segcolormap_output_key", "segcolormap"), use_alpha_channel=self._use_alpha_channel, return_data=False)
def run(self): # if the rendering is not performed -> it is probably the debug case. do_undo = not self._avoid_rendering with Utility.UndoAfterExecution(perform_undo_op=do_undo): self._configure_renderer(use_denoiser=True, default_denoiser="Intel") # In case a previous renderer changed these settings #Store as RGB by default unless the user specifies store_alpha as true in yaml bpy.context.scene.render.image_settings.color_mode = "RGBA" if self.config.get_bool( "transparent_background", False) else "RGB" #set the background as transparent if transparent_background is true in yaml bpy.context.scene.render.film_transparent = self.config.get_bool( "transparent_background", False) bpy.context.scene.render.image_settings.file_format = self._image_type bpy.context.scene.render.image_settings.color_depth = "8" # only influences jpg quality bpy.context.scene.render.image_settings.quality = 95 # check if texture less render mode is active if self._texture_less_mode: self.change_to_texture_less_render() if self._use_alpha_channel: self.add_alpha_channel_to_textures(blurry_edges=True) # motion blur if self._use_motion_blur: bpy.context.scene.render.use_motion_blur = True bpy.context.scene.render.motion_blur_shutter = self._motion_blur_length # rolling shutter if self._use_rolling_shutter: bpy.context.scene.cycles.rolling_shutter_type = 'TOP' bpy.context.scene.cycles.rolling_shutter_duration = self._rolling_shutter_length if not self._use_motion_blur: raise UserWarning( "Cannot enable rolling shutter because motion blur is not enabled, " "see setting use_motion_blur in renderer.RgbRenderer module." ) if self._motion_blur_length <= 0: raise UserWarning( "Cannot enable rolling shutter because no motion blur length is specified, " "see setting motion_blur_length in renderer.RgbRenderer module." ) self._render("rgb_") if self._image_type == 'PNG': self._register_output("rgb_", "colors", ".png", "1.0.0") elif self._image_type == 'JPEG': self._register_output("rgb_", "colors", ".jpg", "1.0.0") else: raise Exception("Unknown Image Type " + self._image_type)
def run(self): with Utility.UndoAfterExecution(): self._configure_renderer() # In case a previous renderer changed these settings bpy.context.scene.render.image_settings.color_mode = "RGB" bpy.context.scene.render.image_settings.file_format = "PNG" bpy.context.scene.render.image_settings.color_depth = "8" self._render("rgb_") self._register_output("rgb_", "colors", ".png", "1.0.0")
def run(self): with Utility.UndoAfterExecution(): self._configure_renderer(default_denoiser="Intel") # In case a previous renderer changed these settings bpy.context.scene.render.image_settings.color_mode = "RGB" bpy.context.scene.render.image_settings.file_format = "PNG" bpy.context.scene.render.image_settings.color_depth = "8" # check if texture less render mode is active if self._texture_less_mode: self.change_to_texture_less_render() if self._use_alpha_channel: self.add_alpha_channel_to_textures(blurry_edges=True) self._render("rgb_") self._register_output("rgb_", "colors", ".png", "1.0.0")
def run(self): with Utility.UndoAfterExecution(): self._configure_renderer(default_samples=1) if not self._avoid_rendering: FlowRendererUtility.render( self._determine_output_dir(), self._temp_dir, self.config.get_bool('forward_flow', False), self.config.get_bool('backward_flow', False), self.config.get_bool('blender_image_coordinate_style', False), self.config.get_string('forward_flow_output_file_prefix', 'forward_flow_'), self.config.get_string("forward_flow_output_key", "forward_flow"), self.config.get_string('backward_flow_output_file_prefix', 'backward_flow_'), self.config.get_string("backward_flow_output_key", "backward_flow"))
def run(self): # if the rendering is not performed -> it is probably the debug case. do_undo = not self._avoid_rendering with Utility.UndoAfterExecution(perform_undo_op=do_undo): self._configure_renderer(default_denoiser="Intel", default_samples=64) # In case a previous renderer changed these settings bpy.context.scene.render.image_settings.color_mode = "RGB" bpy.context.scene.render.image_settings.file_format = "JPEG" bpy.context.scene.render.image_settings.color_depth = "8" bpy.context.scene.render.image_settings.quality = 98 # check if texture less render mode is active if self._texture_less_mode: self.change_to_texture_less_render() if self._use_alpha_channel: self.add_alpha_channel_to_textures(blurry_edges=True) self._render("") self._register_output("", "colors", ".jpg", "1.0.0")
def run(self): # determine whether to get optical flow or scene flow - get scene flow per default get_forward_flow = self.config.get_bool('forward_flow', False) get_backward_flow = self.config.get_bool('backward_flow', False) if get_forward_flow is False and get_backward_flow is False: raise Exception( "Take the FlowRenderer Module out of the config if both forward and backward flow are set to False!" ) with Utility.UndoAfterExecution(): self._configure_renderer( default_samples=self.config.get_int("samples", 1)) self._output_vector_field() # only need to render once; both fwd and bwd flow will be saved temporary_fwd_flow_file_path = os.path.join( self._temp_dir, 'fwd_flow_') temporary_bwd_flow_file_path = os.path.join( self._temp_dir, 'bwd_flow_') self._render("bwd_flow_", custom_file_path=temporary_bwd_flow_file_path) # After rendering: convert to optical flow or calculate hsv visualization, if desired if not self._avoid_rendering: for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): # temporarily save respective vector fields if get_forward_flow: file_path = temporary_fwd_flow_file_path + "%04d" % frame + ".exr" fwd_flow_field = load_image( file_path, num_channels=4).astype(np.float32) if not self.config.get_bool( 'blender_image_coordinate_style', False): fwd_flow_field[:, :, 1] = fwd_flow_field[:, :, 1] * -1 fname = os.path.join( self._determine_output_dir(), self.config.get_string( 'forward_flow_output_file_prefix', 'forward_flow_')) + '%04d' % frame forward_flow = fwd_flow_field * -1 # invert forward flow to point at next frame np.save(fname + '.npy', forward_flow[:, :, :2]) if get_backward_flow: file_path = temporary_bwd_flow_file_path + "%04d" % frame + ".exr" bwd_flow_field = load_image( file_path, num_channels=4).astype(np.float32) if not self.config.get_bool( 'blender_image_coordinate_style', False): bwd_flow_field[:, :, 1] = bwd_flow_field[:, :, 1] * -1 fname = os.path.join( self._determine_output_dir(), self.config.get_string( 'backward_flow_output_file_prefix', 'backward_flow_')) + '%04d' % frame np.save(fname + '.npy', bwd_flow_field[:, :, :2]) # register desired outputs if get_forward_flow: self._register_output(default_prefix=self.config.get_string( 'forward_flow_output_file_prefix', 'forward_flow_'), default_key=self.config.get_string( "forward_flow_output_key", "forward_flow"), suffix='.npy', version='2.0.0') if get_backward_flow: self._register_output(default_prefix=self.config.get_string( 'backward_flow_output_file_prefix', 'backward_flow_'), default_key=self.config.get_string( "backward_flow_output_key", "backward_flow"), suffix='.npy', version='2.0.0')
def render(output_dir, temp_dir, get_forward_flow, get_backward_flow, blender_image_coordinate_style=False, forward_flow_output_file_prefix="forward_flow_", forward_flow_output_key="forward_flow", backward_flow_output_file_prefix="backward_flow_", backward_flow_output_key="backward_flow"): """ Renders the optical flow (forward and backward) for all frames. :param output_dir: The directory to write images to. :param temp_dir: The directory to write intermediate data to. :param get_forward_flow: Whether to render forward optical flow. :param get_backward_flow: Whether to render backward optical flow. :param blender_image_coordinate_style: Whether to specify the image coordinate system at the bottom left (blender default; True) or top left (standard convention; False). :param forward_flow_output_file_prefix: The file prefix that should be used when writing forward flow to a file. :param forward_flow_output_key: The key which should be used for storing forward optical flow values. :param backward_flow_output_file_prefix: The file prefix that should be used when writing backward flow to a file. :param backward_flow_output_key: The key which should be used for storing backward optical flow values. """ if get_forward_flow is False and get_backward_flow is False: raise Exception( "Take the FlowRenderer Module out of the config if both forward and backward flow are set to False!" ) with Utility.UndoAfterExecution(): RendererUtility.init() RendererUtility.set_samples(1) RendererUtility.set_adaptive_sampling(0) RendererUtility.set_denoiser(None) RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0) FlowRendererUtility._output_vector_field(get_forward_flow, get_backward_flow, output_dir) # only need to render once; both fwd and bwd flow will be saved temporary_fwd_flow_file_path = os.path.join(temp_dir, 'fwd_flow_') temporary_bwd_flow_file_path = os.path.join(temp_dir, 'bwd_flow_') RendererUtility.render(temp_dir, "bwd_flow_", None) # After rendering: convert to optical flow or calculate hsv visualization, if desired for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): # temporarily save respective vector fields if get_forward_flow: file_path = temporary_fwd_flow_file_path + "%04d" % frame + ".exr" fwd_flow_field = load_image( file_path, num_channels=4).astype(np.float32) if not blender_image_coordinate_style: fwd_flow_field[:, :, 1] = fwd_flow_field[:, :, 1] * -1 fname = os.path.join( output_dir, forward_flow_output_file_prefix) + '%04d' % frame forward_flow = fwd_flow_field * -1 # invert forward flow to point at next frame np.save(fname + '.npy', forward_flow[:, :, :2]) if get_backward_flow: file_path = temporary_bwd_flow_file_path + "%04d" % frame + ".exr" bwd_flow_field = load_image( file_path, num_channels=4).astype(np.float32) if not blender_image_coordinate_style: bwd_flow_field[:, :, 1] = bwd_flow_field[:, :, 1] * -1 fname = os.path.join( output_dir, backward_flow_output_file_prefix) + '%04d' % frame np.save(fname + '.npy', bwd_flow_field[:, :, :2]) # register desired outputs if get_forward_flow: Utility.register_output(output_dir, forward_flow_output_file_prefix, forward_flow_output_key, '.npy', '2.0.0') if get_backward_flow: Utility.register_output(output_dir, backward_flow_output_file_prefix, backward_flow_output_key, '.npy', '2.0.0')
def run(self): with Utility.UndoAfterExecution(): self._configure_renderer(default_samples=1) # Get objects with meshes (i.e. not lights or cameras) objs_with_mats = get_all_mesh_objects() colors, num_splits_per_dimension, used_objects = self._colorize_objects_for_instance_segmentation( objs_with_mats) bpy.context.scene.render.image_settings.file_format = "OPEN_EXR" bpy.context.scene.render.image_settings.color_depth = "16" bpy.context.view_layer.cycles.use_denoising = False bpy.context.scene.cycles.filter_width = 0.0 if self._use_alpha_channel: self.add_alpha_channel_to_textures(blurry_edges=False) # Determine path for temporary and for final output temporary_segmentation_file_path = os.path.join(self._temp_dir, "seg_") final_segmentation_file_path = os.path.join(self._determine_output_dir(), self.config.get_string("output_file_prefix", "segmap_")) # Render the temporary output self._render("seg_", custom_file_path=temporary_segmentation_file_path) # Find optimal dtype of output based on max index for dtype in [np.uint8, np.uint16, np.uint32]: optimal_dtype = dtype if np.iinfo(optimal_dtype).max >= len(colors) - 1: break # get the type of mappings which should be performed used_attributes = self.config.get_raw_dict("map_by", "class") used_default_values = self.config.get_raw_dict("default_values", {}) if 'class' in used_default_values: used_default_values['cp_category_id'] = used_default_values['class'] if isinstance(used_attributes, str): # only one result is requested result_channels = 1 used_attributes = [used_attributes] elif isinstance(used_attributes, list): result_channels = len(used_attributes) else: raise Exception("The type of this is not supported here: {}".format(used_attributes)) save_in_csv_attributes = {} # define them for the avoid rendering case there_was_an_instance_rendering = False list_of_used_attributes = [] # After rendering if not self._avoid_rendering: for frame in range(bpy.context.scene.frame_start, bpy.context.scene.frame_end): # for each rendered frame file_path = temporary_segmentation_file_path + "%04d" % frame + ".exr" segmentation = load_image(file_path) segmap = Utility.map_back_from_equally_spaced_equidistant_values(segmentation, num_splits_per_dimension, self.render_colorspace_size_per_dimension) segmap = segmap.astype(optimal_dtype) used_object_ids = np.unique(segmap) max_id = np.max(used_object_ids) if max_id >= len(used_objects): raise Exception("There are more object colors than there are objects") combined_result_map = [] there_was_an_instance_rendering = False list_of_used_attributes = [] used_channels = [] for channel_id in range(result_channels): resulting_map = np.empty((segmap.shape[0], segmap.shape[1])) was_used = False current_attribute = used_attributes[channel_id] org_attribute = current_attribute # if the class is used the category_id attribute is evaluated if current_attribute == "class": current_attribute = "cp_category_id" # in the instance case the resulting ids are directly used if current_attribute == "instance": there_was_an_instance_rendering = True resulting_map = segmap was_used = True # a non default value was also used non_default_value_was_used = True else: if current_attribute != "cp_category_id": list_of_used_attributes.append(current_attribute) # for the current attribute remove cp_ and _csv, if present used_attribute = current_attribute if used_attribute.startswith("cp_"): used_attribute = used_attribute[len("cp_"):] # check if a default value was specified default_value_set = False if current_attribute in used_default_values or used_attribute in used_default_values: default_value_set = True if current_attribute in used_default_values: default_value = used_default_values[current_attribute] elif used_attribute in used_default_values: default_value = used_default_values[used_attribute] last_state_save_in_csv = None # this avoids that for certain attributes only the default value is written non_default_value_was_used = False # iterate over all object ids for object_id in used_object_ids: is_default_value = False # get the corresponding object via the id current_obj = used_objects[object_id] # if the current obj has a attribute with that name -> get it if hasattr(current_obj, used_attribute): used_value = getattr(current_obj, used_attribute) # if the current object has a custom property with that name -> get it elif current_attribute.startswith("cp_") and used_attribute in current_obj: used_value = current_obj[used_attribute] elif current_attribute.startswith("cf_"): if current_attribute == "cf_basename": used_value = current_obj.name if "." in used_value: used_value = used_value[:used_value.rfind(".")] elif default_value_set: # if none of the above applies use the default value used_value = default_value is_default_value = True else: # if the requested current_attribute is not a custom property or a attribute # or there is a default value stored # it throws an exception raise Exception("The obj: {} does not have the " "attribute: {}, striped: {}. Maybe try a default " "value.".format(current_obj.name, current_attribute, used_attribute)) # check if the value should be saved as an image or in the csv file save_in_csv = False try: resulting_map[segmap == object_id] = used_value was_used = True if not is_default_value: non_default_value_was_used = True # save everything which is not instance also in the .csv if current_attribute != "instance": save_in_csv = True except ValueError: save_in_csv = True if last_state_save_in_csv is not None and last_state_save_in_csv != save_in_csv: raise Exception("During creating the mapping, the saving to an image or a csv file " "switched, this might indicated that the used default value, does " "not have the same type as the returned value, " "for: {}".format(current_attribute)) last_state_save_in_csv = save_in_csv if save_in_csv: if object_id in save_in_csv_attributes: save_in_csv_attributes[object_id][used_attribute] = used_value else: save_in_csv_attributes[object_id] = {used_attribute: used_value} if was_used and non_default_value_was_used: used_channels.append(org_attribute) combined_result_map.append(resulting_map) fname = final_segmentation_file_path + "%04d" % frame # combine all resulting images to one image resulting_map = np.stack(combined_result_map, axis=2) # remove the unneeded third dimension if resulting_map.shape[2] == 1: resulting_map = resulting_map[:, :, 0] np.save(fname, resulting_map) if not there_was_an_instance_rendering: if len(list_of_used_attributes) > 0: raise Exception("There were attributes specified in the may_by, which could not be saved as " "there was no \"instance\" may_by key used. This is true for this/these " "keys: {}".format(", ".join(list_of_used_attributes))) # if there was no instance rendering no .csv file is generated! # delete all saved infos about .csv save_in_csv_attributes = {} # write color mappings to file if save_in_csv_attributes and not self._avoid_rendering: csv_file_path = os.path.join(self._determine_output_dir(), self.config.get_string("segcolormap_output_file_prefix", "class_inst_col_map") + ".csv") with open(csv_file_path, 'w', newline='') as csvfile: # get from the first element the used field names fieldnames = ["idx"] # get all used object element keys for object_element in save_in_csv_attributes.values(): fieldnames.extend(list(object_element.keys())) break for channel_name in used_channels: fieldnames.append("channel_{}".format(channel_name)) writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() # save for each object all values in one row for obj_idx, object_element in save_in_csv_attributes.items(): object_element["idx"] = obj_idx for i, channel_name in enumerate(used_channels): object_element["channel_{}".format(channel_name)] = i writer.writerow(object_element) self._register_output("segmap_", "segmap", ".npy", "2.0.0") if save_in_csv_attributes: self._register_output("class_inst_col_map", "segcolormap", ".csv", "2.0.0", unique_for_camposes=False, output_key_parameter_name="segcolormap_output_key", output_file_prefix_parameter_name="segcolormap_output_file_prefix")
def render(output_dir, temp_dir, used_attributes, used_default_values={}, file_prefix="segmap_", output_key="segmap", segcolormap_output_file_prefix="class_inst_col_map", segcolormap_output_key="segcolormap", use_alpha_channel=False, render_colorspace_size_per_dimension=2048): """ Renders segmentation maps for all frames. :param output_dir: The directory to write images to. :param temp_dir: The directory to write intermediate data to. :param used_attributes: The attributes to be used for color mapping. :param used_default_values: The default values used for the keys used in used_attributes. :param file_prefix: The prefix to use for writing the images. :param output_key: The key to use for registering the output. :param segcolormap_output_file_prefix: The prefix to use for writing the segmation-color map csv. :param segcolormap_output_key: The key to use for registering the segmation-color map output. :param use_alpha_channel: If true, the alpha channel stored in .png textures is used. :param render_colorspace_size_per_dimension: As we use float16 for storing the rendering, the interval of integers which can be precisely stored is [-2048, 2048]. As blender does not allow negative values for colors, we use [0, 2048] ** 3 as our color space which allows ~8 billion different colors/objects. This should be enough. """ with Utility.UndoAfterExecution(): RendererUtility.init() RendererUtility.set_samples(1) RendererUtility.set_adaptive_sampling(0) RendererUtility.set_denoiser(None) RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0) # Get objects with meshes (i.e. not lights or cameras) objs_with_mats = get_all_blender_mesh_objects() colors, num_splits_per_dimension, used_objects = SegMapRendererUtility._colorize_objects_for_instance_segmentation( objs_with_mats, use_alpha_channel, render_colorspace_size_per_dimension) bpy.context.scene.cycles.filter_width = 0.0 if use_alpha_channel: MaterialLoaderUtility.add_alpha_channel_to_textures( blurry_edges=False) # Determine path for temporary and for final output temporary_segmentation_file_path = os.path.join(temp_dir, "seg_") final_segmentation_file_path = os.path.join( output_dir, file_prefix) RendererUtility.set_output_format("OPEN_EXR", 16) RendererUtility.render(temp_dir, "seg_", None) # Find optimal dtype of output based on max index for dtype in [np.uint8, np.uint16, np.uint32]: optimal_dtype = dtype if np.iinfo(optimal_dtype).max >= len(colors) - 1: break if 'class' in used_default_values: used_default_values['cp_category_id'] = used_default_values[ 'class'] if isinstance(used_attributes, str): # only one result is requested result_channels = 1 used_attributes = [used_attributes] elif isinstance(used_attributes, list): result_channels = len(used_attributes) else: raise Exception( "The type of this is not supported here: {}".format( used_attributes)) save_in_csv_attributes = {} # define them for the avoid rendering case there_was_an_instance_rendering = False list_of_used_attributes = [] # Check if stereo is enabled if bpy.context.scene.render.use_multiview: suffixes = ["_L", "_R"] else: suffixes = [""] # After rendering for frame in range( bpy.context.scene.frame_start, bpy.context.scene.frame_end): # for each rendered frame for suffix in suffixes: file_path = temporary_segmentation_file_path + ( "%04d" % frame) + suffix + ".exr" segmentation = load_image(file_path) print(file_path, segmentation.shape) segmap = Utility.map_back_from_equally_spaced_equidistant_values( segmentation, num_splits_per_dimension, render_colorspace_size_per_dimension) segmap = segmap.astype(optimal_dtype) used_object_ids = np.unique(segmap) max_id = np.max(used_object_ids) if max_id >= len(used_objects): raise Exception( "There are more object colors than there are objects" ) combined_result_map = [] there_was_an_instance_rendering = False list_of_used_attributes = [] used_channels = [] for channel_id in range(result_channels): resulting_map = np.empty( (segmap.shape[0], segmap.shape[1])) was_used = False current_attribute = used_attributes[channel_id] org_attribute = current_attribute # if the class is used the category_id attribute is evaluated if current_attribute == "class": current_attribute = "cp_category_id" # in the instance case the resulting ids are directly used if current_attribute == "instance": there_was_an_instance_rendering = True resulting_map = segmap was_used = True # a non default value was also used non_default_value_was_used = True else: if current_attribute != "cp_category_id": list_of_used_attributes.append( current_attribute) # for the current attribute remove cp_ and _csv, if present used_attribute = current_attribute if used_attribute.startswith("cp_"): used_attribute = used_attribute[len("cp_"):] # check if a default value was specified default_value_set = False if current_attribute in used_default_values or used_attribute in used_default_values: default_value_set = True if current_attribute in used_default_values: default_value = used_default_values[ current_attribute] elif used_attribute in used_default_values: default_value = used_default_values[ used_attribute] last_state_save_in_csv = None # this avoids that for certain attributes only the default value is written non_default_value_was_used = False # iterate over all object ids for object_id in used_object_ids: is_default_value = False # get the corresponding object via the id current_obj = used_objects[object_id] # if the current obj has a attribute with that name -> get it if hasattr(current_obj, used_attribute): used_value = getattr( current_obj, used_attribute) # if the current object has a custom property with that name -> get it elif current_attribute.startswith( "cp_" ) and used_attribute in current_obj: used_value = current_obj[used_attribute] elif current_attribute.startswith("cf_"): if current_attribute == "cf_basename": used_value = current_obj.name if "." in used_value: used_value = used_value[:used_value .rfind("." )] elif default_value_set: # if none of the above applies use the default value used_value = default_value is_default_value = True else: # if the requested current_attribute is not a custom property or a attribute # or there is a default value stored # it throws an exception raise Exception( "The obj: {} does not have the " "attribute: {}, striped: {}. Maybe try a default " "value.".format( current_obj.name, current_attribute, used_attribute)) # check if the value should be saved as an image or in the csv file save_in_csv = False try: resulting_map[segmap == object_id] = used_value was_used = True if not is_default_value: non_default_value_was_used = True # save everything which is not instance also in the .csv if current_attribute != "instance": save_in_csv = True except ValueError: save_in_csv = True if last_state_save_in_csv is not None and last_state_save_in_csv != save_in_csv: raise Exception( "During creating the mapping, the saving to an image or a csv file " "switched, this might indicated that the used default value, does " "not have the same type as the returned value, " "for: {}".format(current_attribute)) last_state_save_in_csv = save_in_csv if save_in_csv: if object_id in save_in_csv_attributes: save_in_csv_attributes[object_id][ used_attribute] = used_value else: save_in_csv_attributes[object_id] = { used_attribute: used_value } if was_used and non_default_value_was_used: used_channels.append(org_attribute) combined_result_map.append(resulting_map) fname = final_segmentation_file_path + ("%04d" % frame) + suffix # combine all resulting images to one image resulting_map = np.stack(combined_result_map, axis=2) # remove the unneeded third dimension if resulting_map.shape[2] == 1: resulting_map = resulting_map[:, :, 0] np.save(fname, resulting_map) if not there_was_an_instance_rendering: if len(list_of_used_attributes) > 0: raise Exception( "There were attributes specified in the may_by, which could not be saved as " "there was no \"instance\" may_by key used. This is true for this/these " "keys: {}".format(", ".join(list_of_used_attributes))) # if there was no instance rendering no .csv file is generated! # delete all saved infos about .csv save_in_csv_attributes = {} # write color mappings to file if save_in_csv_attributes: csv_file_path = os.path.join( output_dir, segcolormap_output_file_prefix + ".csv") with open(csv_file_path, 'w', newline='') as csvfile: # get from the first element the used field names fieldnames = ["idx"] # get all used object element keys for object_element in save_in_csv_attributes.values(): fieldnames.extend(list(object_element.keys())) break for channel_name in used_channels: fieldnames.append("channel_{}".format(channel_name)) writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() # save for each object all values in one row for obj_idx, object_element in save_in_csv_attributes.items( ): object_element["idx"] = obj_idx for i, channel_name in enumerate(used_channels): object_element["channel_{}".format( channel_name)] = i writer.writerow(object_element) Utility.register_output(output_dir, file_prefix, output_key, ".npy", "2.0.0") if save_in_csv_attributes: Utility.register_output(output_dir, segcolormap_output_file_prefix, segcolormap_output_key, ".csv", "2.0.0", unique_for_camposes=False)
def simulate_and_fix_final_poses( min_simulation_time: float = 4.0, max_simulation_time: float = 40.0, check_object_interval: float = 2.0, object_stopped_location_threshold: float = 0.01, object_stopped_rotation_threshold: float = 0.1, substeps_per_frame: int = 10, solver_iters: int = 10): """ Simulates the current scene and in the end fixes the final poses of all active objects. The simulation is run for at least `min_simulation_time` seconds and at a maximum `max_simulation_time` seconds. Every `check_object_interval` seconds, it is checked if the maximum object movement in the last second is below a given threshold. If that is the case, the simulation is stopped. After performing the simulation, the simulation cache is removed, the rigid body components are disabled and the pose of the active objects is set to their final pose in the simulation. :param min_simulation_time: The minimum number of seconds to simulate. :param max_simulation_time: The maximum number of seconds to simulate. :param check_object_interval: The interval in seconds at which all objects should be checked if they are still moving. If all objects have stopped moving, than the simulation will be stopped. :param object_stopped_location_threshold: The maximum difference per second and per coordinate in the rotation Euler vector that is allowed. such that an object is still recognized as 'stopped moving'. :param object_stopped_rotation_threshold: The maximum difference per second and per coordinate in the rotation Euler vector that is allowed. such that an object is still recognized as 'stopped moving'. :param substeps_per_frame: Number of simulation steps taken per frame. :param solver_iters: Number of constraint solver iterations made per simulation step. """ # Undo changes made in the simulation like origin adjustment and persisting the object's scale with Utility.UndoAfterExecution(): # Run simulation and remember poses before and after obj_poses_before_sim = PhysicsSimulation._get_pose() origin_shifts = PhysicsSimulation.simulate( min_simulation_time, max_simulation_time, check_object_interval, object_stopped_location_threshold, object_stopped_rotation_threshold, substeps_per_frame, solver_iters) obj_poses_after_sim = PhysicsSimulation._get_pose() # Make sure to remove the simulation cache as we are only interested in the final poses bpy.ops.ptcache.free_bake( {"point_cache": bpy.context.scene.rigidbody_world.point_cache}) # Fix the pose of all objects to their pose at the and of the simulation (also revert origin shift) objects_with_physics = [ MeshObject(obj) for obj in get_all_blender_mesh_objects() if obj.rigid_body is not None ] for obj in objects_with_physics: # Skip objects that have parents with compound rigid body component has_compound_parent = obj.get_parent( ) is not None and obj.get_parent().get_rigidbody( ) is not None and obj.get_parent().get_rigidbody( ).collision_shape == "COMPOUND" if obj.get_rigidbody( ).type == "ACTIVE" and not has_compound_parent: # compute relative object rotation before and after simulation R_obj_before_sim = mathutils.Euler(obj_poses_before_sim[ obj.get_name()]['rotation']).to_matrix() R_obj_after = mathutils.Euler(obj_poses_after_sim[ obj.get_name()]['rotation']).to_matrix() R_obj_rel = R_obj_before_sim @ R_obj_after.transposed() # Apply relative rotation to origin shift origin_shift = R_obj_rel.transposed() @ mathutils.Vector( origin_shifts[obj.get_name()]) # Fix pose of object to the one it had at the end of the simulation obj.set_location( obj_poses_after_sim[obj.get_name()]['location'] - origin_shift) obj.set_rotation_euler( obj_poses_after_sim[obj.get_name()]['rotation']) for obj in objects_with_physics: # Disable the rigidbody element of the object obj.disable_rigidbody()
def run(self): with Utility.UndoAfterExecution(): self._configure_renderer(default_samples=1) # get current method for color mapping, instance or class method = self.config.get_string("map_by", "class") # Get objects with materials (i.e. not lights or cameras) objs_with_mats = [ obj for obj in bpy.context.scene.objects if hasattr(obj.data, 'materials') ] if method.lower() == "class": colors, num_splits_per_dimension, color_map = self._colorize_objects_for_semantic_segmentation( objs_with_mats) elif method.lower() == "instance": colors, num_splits_per_dimension, color_map = self._colorize_objects_for_instance_segmentation( objs_with_mats) else: raise Exception( "Invalid mapping method: {}, possible for map_by are: class, instance" .format(method)) bpy.context.scene.render.image_settings.file_format = "OPEN_EXR" bpy.context.scene.render.image_settings.color_depth = "16" bpy.context.view_layer.cycles.use_denoising = False bpy.context.scene.cycles.filter_width = 0.0 if self._use_alpha_channel: self.add_alpha_channel_to_textures(blurry_edges=False) # Determine path for temporary and for final output temporary_segmentation_file_path = os.path.join( self._temp_dir, "seg_") final_segmentation_file_path = os.path.join( self._determine_output_dir(), self.config.get_string("output_file_prefix", "segmap_")) # Render the temporary output self._render("seg_", custom_file_path=temporary_segmentation_file_path) # Find optimal dtype of output based on max index for dtype in [np.uint8, np.uint16, np.uint32]: optimal_dtype = dtype if np.iinfo(optimal_dtype).max >= len(colors) - 1: break # After rendering for frame in range( bpy.context.scene.frame_start, bpy.context.scene.frame_end): # for each rendered frame file_path = temporary_segmentation_file_path + "%04d" % frame + ".exr" segmentation = load_image(file_path) segmap = Utility.map_back_from_equally_spaced_equidistant_values( segmentation, num_splits_per_dimension, self.render_colorspace_size_per_dimension) segmap = segmap.astype(optimal_dtype) fname = final_segmentation_file_path + "%04d" % frame np.save(fname, segmap) # write color mappings to file if color_map is not None: with open(os.path.join( self._determine_output_dir(), self.config.get_string( "segcolormap_output_file_prefix", "class_inst_col_map") + ".csv"), 'w', newline='') as csvfile: fieldnames = list(color_map[0].keys()) writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for mapping in color_map: writer.writerow(mapping) self._register_output("segmap_", "segmap", ".npy", "1.0.0") if color_map is not None: self._register_output( "class_inst_col_map", "segcolormap", ".csv", "1.0.0", unique_for_camposes=False, output_key_parameter_name="segcolormap_output_key", output_file_prefix_parameter_name= "segcolormap_output_file_prefix")