def get_all_mesh_objects() -> List["MeshObject"]: """ Returns all mesh objects in scene :return: List of all MeshObjects """ return convert_to_meshes(get_all_blender_mesh_objects())
def run(self): """ Collect all mesh objects and writes their id, name and pose.""" objects = [] for object in get_all_blender_mesh_objects(): objects.append(object) self.write_attributes_to_file(self.object_writer, objects, "object_states_", "object_states", ["name", "location", "rotation_euler", "matrix_world"])
def _add_rigidbody(self): """ Adds a rigidbody element to all mesh objects and sets their physics attributes depending on their custom properties """ # Temporary function which returns either the value set in the custom properties (if set) or the fallback value. def get_physics_attribute(obj, cp_name, default_value): if cp_name in obj: return obj[cp_name] else: return default_value # Go over all mesh objects and set their physics attributes based on the custom properties or (if not set) based on the module config for obj in get_all_blender_mesh_objects(): mesh_obj = MeshObject(obj) # Skip if the object has already an active rigid body component if mesh_obj.get_rigidbody() is None: if "physics" not in obj: raise Exception( "The obj: '{}' has no physics attribute, each object needs one." .format(obj.name)) # Collect physics attributes collision_shape = get_physics_attribute( obj, "physics_collision_shape", self.collision_shape) collision_margin = get_physics_attribute( obj, "physics_collision_margin", self.collision_margin) mass = get_physics_attribute(obj, "physics_mass", None if self.mass_scaling else 1) collision_mesh_source = get_physics_attribute( obj, "physics_collision_mesh_source", self.collision_mesh_source) friction = get_physics_attribute(obj, "physics_friction", self.friction) angular_damping = get_physics_attribute( obj, "physics_angular_damping", self.angular_damping) linear_damping = get_physics_attribute( obj, "physics_linear_damping", self.linear_damping) # Set physics attributes mesh_obj.enable_rigidbody( active=obj["physics"], collision_shape="COMPOUND" if collision_shape == "CONVEX_DECOMPOSITION" else collision_shape, collision_margin=collision_margin, mass=mass, mass_factor=self.mass_factor, collision_mesh_source=collision_mesh_source, friction=friction, angular_damping=angular_damping, linear_damping=linear_damping) # Check if object needs decomposition if collision_shape == "CONVEX_DECOMPOSITION": mesh_obj.build_convex_decomposition_collision_shape( self.vhacd_path, self._temp_dir, self.convex_decomposition_cache_path)
def run(self): """ :return: Point of interest in the scene. Type: mathutils.Vector. """ # For every selected object in the scene selected_objects = convert_to_meshes( self.config.get_list("selector", get_all_blender_mesh_objects())) if len(selected_objects) == 0: raise Exception("No objects were selected!") return compute_poi(selected_objects)
def run(self): """ Collect ShapeNet attributes and write them to a file.""" shapenet_objects = [ obj for obj in get_all_blender_mesh_objects() if "used_synset_id" in obj ] self.write_attributes_to_file(self.object_writer, shapenet_objects, "shapenet_", "shapenet", ["used_synset_id", "used_source_id"])
def run(self): """ Samples positions and rotations of selected object inside the sampling volume while performing mesh and bounding box collision checks in the following steps: 1. While we have objects remaining and have not run out of tries - sample a point. 2. If no collisions are found keep the point. """ objects_to_sample = self.config.get_list( "objects_to_sample", get_all_blender_mesh_objects()) objects_to_check_collisions = self.config.get_list( "objects_to_check_collisions", get_all_blender_mesh_objects()) max_tries = self.config.get_int("max_iterations", 1000) def sample_pose(obj: MeshObject): obj.set_location(self.config.get_vector3d("pos_sampler")) obj.set_rotation_euler(self.config.get_vector3d("rot_sampler")) sample_poses(objects_to_sample=convert_to_meshes(objects_to_sample), sample_pose_func=sample_pose, objects_to_check_collisions=convert_to_meshes( objects_to_check_collisions), max_tries=max_tries)
def render(output_dir: Optional[str] = None, file_prefix: str = "rgb_", output_key: Optional[str] = "colors", load_keys: Optional[Set[str]] = None, return_data: bool = True, keys_with_alpha_channel: Optional[Set[str]] = None) -> Dict[str, Union[np.ndarray, List[np.ndarray]]]: """ Render all frames. This will go through all frames from scene.frame_start to scene.frame_end and render each of them. :param output_dir: The directory to write files to, if this is None the temporary directory is used. \ The temporary directory is usually in the shared memory (only true for linux). :param file_prefix: The prefix to use for writing the images. :param output_key: The key to use for registering the output. :param load_keys: Set of output keys to load when available :param return_data: Whether to load and return generated data. Backwards compatibility to config-based pipeline. :param keys_with_alpha_channel: A set containing all keys whose alpha channels should be loaded. :return: dict of lists of raw renderer output. Keys can be 'distance', 'colors', 'normals' """ if output_dir is None: output_dir = Utility.get_temporary_directory() if load_keys is None: load_keys = {'colors', 'distance', 'depth', 'normals', 'diffuse'} keys_with_alpha_channel = {'colors'} if bpy.context.scene.render.film_transparent else None if output_key is not None: Utility.add_output_entry({ "key": output_key, "path": os.path.join(output_dir, file_prefix) + "%04d" + map_file_format_to_file_ending(bpy.context.scene.render.image_settings.file_format), "version": "2.0.0" }) load_keys.add(output_key) bpy.context.scene.render.filepath = os.path.join(output_dir, file_prefix) # Skip if there is nothing to render if bpy.context.scene.frame_end != bpy.context.scene.frame_start: if len(get_all_blender_mesh_objects()) == 0: raise Exception("There are no mesh-objects to render, " "please load an object before invoking the renderer.") # As frame_end is pointing to the next free frame, decrease it by one, as # blender will render all frames in [frame_start, frame_ned] bpy.context.scene.frame_end -= 1 bpy.ops.render.render(animation=True, write_still=True) # Revert changes bpy.context.scene.frame_end += 1 return WriterUtility.load_registered_outputs(load_keys, keys_with_alpha_channel) if return_data else {}
def run(self): total_noof_cams = self.config.get_int("total_noof_cams", 10) noof_cams_per_scene = self.config.get_int("noof_cams_per_scene", 5) for i in range(total_noof_cams): if i % noof_cams_per_scene == 0: # sample new object poses self._object_pose_sampler.run() # get current keyframe id frame_id = bpy.context.scene.frame_end # TODO: Use Getter for selecting objects for obj in get_all_blender_mesh_objects(): # insert keyframes for current object poses self.insert_key_frames(obj, frame_id) # sample new camera poses self._camera_pose_sampler.run()
def _get_pose() -> dict: """ Returns position and rotation values of all objects in the scene with ACTIVE rigid_body type. :return: Dict of form {obj_name:{'location':[x, y, z], 'rotation':[x_rot, y_rot, z_rot]}}. """ objects_poses = {} objects_with_physics = [ obj for obj in get_all_blender_mesh_objects() if obj.rigid_body is not None ] for obj in objects_with_physics: if obj.rigid_body.type == 'ACTIVE': location = bpy.context.scene.objects[ obj.name].matrix_world.translation.copy() rotation = mathutils.Vector(bpy.context.scene.objects[ obj.name].matrix_world.to_euler()) objects_poses.update( {obj.name: { 'location': location, 'rotation': rotation }}) return objects_poses
def write_bop(output_dir: str, depths: Optional[List[np.ndarray]] = None, colors: Optional[List[np.ndarray]] = None, color_file_format: str = "PNG", dataset: str = "", append_to_existing_output: bool = True, depth_scale: float = 1.0, jpg_quality: int = 95, save_world2cam: bool = True, ignore_dist_thres: float = 100., m2mm: bool = True, frames_per_chunk: int = 1000): """Write the BOP data :param output_dir: Path to the output directory. :param depths: List of depth images in m to save :param colors: List of color images to save :param color_file_format: File type to save color images. Available: "PNG", "JPEG" :param jpg_quality: If color_file_format is "JPEG", save with the given quality. :param dataset: Only save annotations for objects of the specified bop dataset. Saves all object poses if undefined. :param append_to_existing_output: If true, the new frames will be appended to the existing ones. :param depth_scale: Multiply the uint16 output depth image with this factor to get depth in mm. Used to trade-off between depth accuracy and maximum depth value. Default corresponds to 65.54m maximum depth and 1mm accuracy. :param save_world2cam: If true, camera to world transformations "cam_R_w2c", "cam_t_w2c" are saved in scene_camera.json :param ignore_dist_thres: Distance between camera and object after which object is ignored. Mostly due to failed physics. :param m2mm: Original bop annotations and models are in mm. If true, we convert the gt annotations to mm here. This is needed if BopLoader option mm2m is used. :param frames_per_chunk: Number of frames saved in each chunk (called scene in BOP) """ if depths is None: depths = [] if colors is None: colors = [] # Output paths. dataset_dir = os.path.join(output_dir, dataset) chunks_dir = os.path.join(dataset_dir, 'train_pbr') camera_path = os.path.join(dataset_dir, 'camera.json') # Create the output directory structure. if not os.path.exists(dataset_dir): os.makedirs(dataset_dir) os.makedirs(chunks_dir) elif not append_to_existing_output: raise Exception( "The output folder already exists: {}.".format(dataset_dir)) all_mesh_objects = get_all_blender_mesh_objects() # Select objects from the specified dataset. if dataset: dataset_objects = [] for obj in all_mesh_objects: if "bop_dataset_name" in obj: if obj["bop_dataset_name"] == dataset: dataset_objects.append(obj) else: dataset_objects = all_mesh_objects # Check if there is any object from the specified dataset. if not dataset_objects: raise Exception( "The scene does not contain any object from the " "specified dataset: {}. Either remove the dataset parameter " "or assign custom property 'bop_dataset_name' to selected objects". format(dataset)) # Save the data. BopWriterUtility._write_camera(camera_path, depth_scale=depth_scale) BopWriterUtility._write_frames(chunks_dir, dataset_objects=dataset_objects, depths=depths, colors=colors, color_file_format=color_file_format, frames_per_chunk=frames_per_chunk, m2mm=m2mm, ignore_dist_thres=ignore_dist_thres, save_world2cam=save_world2cam, depth_scale=depth_scale, jpg_quality=jpg_quality)
def _sample_cam_poses(self, config): """ Samples camera poses according to the given config :param config: The config object """ cam_ob = bpy.context.scene.camera cam = cam_ob.data # Set global parameters self.sqrt_number_of_rays = config.get_int("sqrt_number_of_rays", 10) self.max_tries = config.get_int("max_tries", 10000) self.proximity_checks = config.get_raw_dict("proximity_checks", {}) self.excluded_objects_in_proximity_check = config.get_list( "excluded_objs_in_proximity_check", []) self.min_interest_score = config.get_float("min_interest_score", 0.0) self.interest_score_range = config.get_float("interest_score_range", self.min_interest_score) self.interest_score_step = config.get_float("interest_score_step", 0.1) self.special_objects = config.get_list("special_objects", []) self.special_objects_weight = config.get_float( "special_objects_weight", 2) self._above_objects = convert_to_meshes( config.get_list("check_if_pose_above_object_list", [])) self.check_visible_objects = convert_to_meshes( config.get_list("check_if_objects_visible", [])) # Set camera intrinsics self._set_cam_intrinsics( cam, Config(self.config.get_raw_dict("intrinsics", {}))) if self.proximity_checks: # needs to build an bvh tree mesh_objects = [ MeshObject(obj) for obj in get_all_blender_mesh_objects() if obj not in self.excluded_objects_in_proximity_check ] self.bvh_tree = create_bvh_tree_multi_objects(mesh_objects) if self.interest_score_step <= 0.0: raise Exception( "Must have an interest score step size bigger than 0") # Determine the number of camera poses to sample number_of_poses = config.get_int("number_of_samples", 1) print("Sampling " + str(number_of_poses) + " cam poses") # Start with max interest score self.interest_score = self.interest_score_range # Init all_tries = 0 tries = 0 existing_poses = [] for i in range(number_of_poses): # Do until a valid pose has been found or the max number of tries has been reached while tries < self.max_tries: tries += 1 all_tries += 1 # Sample a new cam pose and check if its valid if self.sample_and_validate_cam_pose(config, existing_poses): break # If max tries has been reached if tries >= self.max_tries: # Decrease interest score and try again, if we have not yet reached minimum continue_trying, self.interest_score = CameraValidation.decrease_interest_score( self.interest_score, self.min_interest_score, self.interest_score_step) if continue_trying: tries = 0 print(str(all_tries) + " tries were necessary")
def render_segmap(output_dir: Optional[str] = None, temp_dir: Optional[str] = None, map_by: Union[str, List[str]] = "class", default_values: Optional[Dict[str, int]] = None, file_prefix: str = "segmap_", output_key: str = "segmap", segcolormap_output_file_prefix: str = "instance_attribute_map_", segcolormap_output_key: str = "segcolormap", use_alpha_channel: bool = False, render_colorspace_size_per_dimension: int = 2048) -> \ Dict[str, Union[np.ndarray, List[np.ndarray]]]: """ Renders segmentation maps for all frames :param output_dir: The directory to write images to. :param temp_dir: The directory to write intermediate data to. :param map_by: The attributes to be used for color mapping. :param default_values: The default values used for the keys used in attributes, if None is {"class": 0}. :param file_prefix: The prefix to use for writing the images. :param output_key: The key to use for registering the output. :param segcolormap_output_file_prefix: The prefix to use for writing the segmentation-color map csv. :param segcolormap_output_key: The key to use for registering the segmentation-color map output. :param use_alpha_channel: If true, the alpha channel stored in .png textures is used. :param render_colorspace_size_per_dimension: As we use float16 for storing the rendering, the interval of \ integers which can be precisely stored is [-2048, 2048]. As \ blender does not allow negative values for colors, we use \ [0, 2048] ** 3 as our color space which allows ~8 billion \ different colors/objects. This should be enough. :return: dict of lists of segmaps and (for instance segmentation) segcolormaps """ if output_dir is None: output_dir = Utility.get_temporary_directory() if temp_dir is None: temp_dir = Utility.get_temporary_directory() if default_values is None: default_values = {"class": 0} with Utility.UndoAfterExecution(): RendererUtility._render_init() RendererUtility.set_samples(1) RendererUtility.set_adaptive_sampling(0) RendererUtility.set_denoiser(None) RendererUtility.set_light_bounces(1, 0, 0, 1, 0, 8, 0) attributes = map_by if 'class' in default_values: default_values['cp_category_id'] = default_values['class'] # Get objects with meshes (i.e. not lights or cameras) objs_with_mats = get_all_blender_mesh_objects() colors, num_splits_per_dimension, objects = _colorize_objects_for_instance_segmentation( objs_with_mats, use_alpha_channel, render_colorspace_size_per_dimension) bpy.context.scene.cycles.filter_width = 0.0 if use_alpha_channel: MaterialLoaderUtility.add_alpha_channel_to_textures( blurry_edges=False) # Determine path for temporary and for final output temporary_segmentation_file_path = os.path.join(temp_dir, "seg_") final_segmentation_file_path = os.path.join(output_dir, file_prefix) RendererUtility.set_output_format("OPEN_EXR", 16) RendererUtility.render(temp_dir, "seg_", None, return_data=False) # Find optimal dtype of output based on max index for dtype in [np.uint8, np.uint16, np.uint32]: optimal_dtype = dtype if np.iinfo(optimal_dtype).max >= len(colors) - 1: break if isinstance(attributes, str): # only one result is requested result_channels = 1 attributes = [attributes] elif isinstance(attributes, list): result_channels = len(attributes) else: raise Exception( "The type of this is not supported here: {}".format( attributes)) # define them for the avoid rendering case there_was_an_instance_rendering = False list_of_attributes: List[str] = [] # Check if stereo is enabled if bpy.context.scene.render.use_multiview: suffixes = ["_L", "_R"] else: suffixes = [""] return_dict: Dict[str, Union[np.ndarray, List[np.ndarray]]] = {} save_in_csv_attributes: Dict[int, Dict[str, Any]] = {} # After rendering for frame in range( bpy.context.scene.frame_start, bpy.context.scene.frame_end): # for each rendered frame save_in_csv_attributes: Dict[int, Dict[str, Any]] = {} for suffix in suffixes: file_path = temporary_segmentation_file_path + ( "%04d" % frame) + suffix + ".exr" segmentation = load_image(file_path) print(file_path, segmentation.shape) segmap = Utility.map_back_from_equally_spaced_equidistant_values( segmentation, num_splits_per_dimension, render_colorspace_size_per_dimension) segmap = segmap.astype(optimal_dtype) object_ids = np.unique(segmap) max_id = np.max(object_ids) if max_id >= len(objects): raise Exception( "There are more object colors than there are objects") combined_result_map = [] there_was_an_instance_rendering = False list_of_attributes = [] channels = [] for channel_id in range(result_channels): num_default_values = 0 resulting_map = np.zeros( (segmap.shape[0], segmap.shape[1]), dtype=optimal_dtype) was_used = False current_attribute = attributes[channel_id] org_attribute = current_attribute # if the class is used the category_id attribute is evaluated if current_attribute == "class": current_attribute = "cp_category_id" # in the instance case the resulting ids are directly used if current_attribute == "instance": there_was_an_instance_rendering = True resulting_map = segmap was_used = True else: if current_attribute != "cp_category_id": list_of_attributes.append(current_attribute) # for the current attribute remove cp_ and _csv, if present attribute = current_attribute if attribute.startswith("cp_"): attribute = attribute[len("cp_"):] # check if a default value was specified default_value_set = False if current_attribute in default_values or attribute in default_values: default_value_set = True if current_attribute in default_values: default_value = default_values[ current_attribute] elif attribute in default_values: default_value = default_values[attribute] # iterate over all object ids for object_id in object_ids: # Convert np.uint8 to int, such that the save_in_csv_attributes dict can later be serialized object_id = int(object_id) # get the corresponding object via the id current_obj = objects[object_id] # if the current obj has a attribute with that name -> get it if hasattr(current_obj, attribute): value = getattr(current_obj, attribute) # if the current object has a custom property with that name -> get it elif current_attribute.startswith( "cp_") and attribute in current_obj: value = current_obj[attribute] elif current_attribute.startswith("cf_"): if current_attribute == "cf_basename": value = current_obj.name if "." in value: value = value[:value.rfind(".")] elif default_value_set: # if none of the above applies use the default value value = default_value num_default_values += 1 else: # if the requested current_attribute is not a custom property or a attribute # or there is a default value stored # it throws an exception raise Exception( "The obj: {} does not have the " "attribute: {}, striped: {}. Maybe try a default " "value.".format(current_obj.name, current_attribute, attribute)) # save everything which is not instance also in the .csv if isinstance( value, (int, float, np.integer, np.floating)): was_used = True resulting_map[segmap == object_id] = value if object_id in save_in_csv_attributes: save_in_csv_attributes[object_id][ attribute] = value else: save_in_csv_attributes[object_id] = { attribute: value } if was_used and num_default_values < len(object_ids): channels.append(org_attribute) combined_result_map.append(resulting_map) return_dict.setdefault( "{}_segmaps{}".format(org_attribute, suffix), []).append(resulting_map) fname = final_segmentation_file_path + ("%04d" % frame) + suffix # combine all resulting images to one image resulting_map = np.stack(combined_result_map, axis=2) # remove the unneeded third dimension if resulting_map.shape[2] == 1: resulting_map = resulting_map[:, :, 0] # TODO: Remove unnecessary save when we give up backwards compatibility np.save(fname, resulting_map) if there_was_an_instance_rendering: mappings = [] for object_id, attribute_dict in save_in_csv_attributes.items( ): mappings.append({"idx": object_id, **attribute_dict}) return_dict.setdefault("instance_attribute_maps", []).append(mappings) # write color mappings to file # TODO: Remove unnecessary csv file when we give up backwards compatibility csv_file_path = os.path.join( output_dir, segcolormap_output_file_prefix + ("%04d.csv" % frame)) with open(csv_file_path, 'w', newline='') as csvfile: # get from the first element the used field names fieldnames = ["idx"] # get all used object element keys for object_element in save_in_csv_attributes.values(): fieldnames.extend(list(object_element.keys())) break for channel_name in channels: fieldnames.append("channel_{}".format(channel_name)) writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() # save for each object all values in one row for obj_idx, object_element in save_in_csv_attributes.items( ): object_element["idx"] = obj_idx for i, channel_name in enumerate(channels): object_element["channel_{}".format( channel_name)] = i writer.writerow(object_element) else: if len(list_of_attributes) > 0: raise Exception( "There were attributes specified in the may_by, which could not be saved as " "there was no \"instance\" may_by key used. This is true for this/these " "keys: {}".format(", ".join(list_of_attributes))) # if there was no instance rendering no .csv file is generated! # delete all saved infos about .csv save_in_csv_attributes = {} Utility.register_output(output_dir, file_prefix, output_key, ".npy", "2.0.0") if save_in_csv_attributes: Utility.register_output(output_dir, segcolormap_output_file_prefix, segcolormap_output_key, ".csv", "2.0.0") return return_dict