def run(self): # Set language if necessary if bpy.context.preferences.view.language != "en_US": print("Setting blender language settings to english during this run") bpy.context.preferences.view.language = "en_US" prefs = bpy.context.preferences.addons['cycles'].preferences # Use cycles bpy.context.scene.render.engine = 'CYCLES' if platform == "darwin": # there is no gpu support in mac os so use the cpu with maximum power bpy.context.scene.cycles.device = "CPU" bpy.context.scene.render.threads = multiprocessing.cpu_count() else: bpy.context.scene.cycles.device = "GPU" preferences = bpy.context.preferences.addons['cycles'].preferences for device_type in preferences.get_device_types(bpy.context): preferences.get_devices_for_type(device_type[0]) for gpu_type in ["OPTIX", "CUDA"]: found = False for device in preferences.devices: if device.type == gpu_type: bpy.context.preferences.addons['cycles'].preferences.compute_device_type = gpu_type print('Device {} of type {} found and used.'.format(device.name, device.type)) found = True break if found: break # make sure that all visible GPUs are used for group in prefs.get_devices(): for d in group: d.use = True # setting the frame end, will be changed by the camera loader modules bpy.context.scene.frame_end = 0 # Sets background color world = bpy.data.worlds['World'] world.color[:3] = self.config.get_list("horizon_color", [0.535, 0.633, 0.608]) # Create the camera cam = bpy.data.cameras.new("Camera") cam_ob = bpy.data.objects.new("Camera", cam) bpy.context.scene.collection.objects.link(cam_ob) bpy.context.scene.camera = cam_ob # Set default intrinsics CameraUtility.set_intrinsics_from_blender_params(DefaultConfig.fov, DefaultConfig.resolution_x, DefaultConfig.resolution_y, DefaultConfig.clip_start, DefaultConfig.clip_end, DefaultConfig.pixel_aspect_x, DefaultConfig.pixel_aspect_y, DefaultConfig.shift_x, DefaultConfig.shift_y, "FOV") CameraUtility.set_stereo_parameters(DefaultConfig.stereo_convergence_mode, DefaultConfig.stereo_convergence_distance, DefaultConfig.stereo_interocular_distance) random_seed = os.getenv("BLENDER_PROC_RANDOM_SEED") if random_seed: print("Got random seed: {}".format(random_seed)) try: random_seed = int(random_seed) except ValueError as e: raise e random.seed(random_seed) np_random.seed(random_seed)
def sample_and_validate_cam_pose(self, cam, cam_ob, config): """ Samples a new camera pose, sets the parameters of the given camera object accordingly and validates it. :param cam: The camera which contains only camera specific attributes. :param cam_ob: The object linked to the camera which determines general properties like location/orientation :param config: The config object describing how to sample :return: True, if the sampled pose was valid """ # Sample/set intrinsics self._set_cam_intrinsics( cam, Config(self.config.get_raw_dict("intrinsics", {}))) # Sample camera extrinsics (we do not set them yet for performance reasons) cam2world_matrix = self._cam2world_matrix_from_cam_extrinsics(config) # Make sure the sampled location is inside the room => overwrite x and y and add offset to z cam2world_matrix.translation[0] = random.uniform( self.bounding_box["min"][0], self.bounding_box["max"][0]) cam2world_matrix.translation[1] = random.uniform( self.bounding_box["min"][1], self.bounding_box["max"][1]) cam2world_matrix.translation[2] += self.floor_height_values[ random.randrange(0, len(self.floor_height_values))] # Check if sampled pose is valid if self._is_pose_valid(cam, cam_ob, cam2world_matrix): # Set camera extrinsics as the pose is valid CameraUtility.add_camera_pose(cam2world_matrix) return True else: return False
def sample_and_validate_cam_pose(self, cam, cam_ob, config): """ Samples a new camera pose, sets the parameters of the given camera object accordingly and validates it. :param cam: The camera which contains only camera specific attributes. :param cam_ob: The object linked to the camera which determines general properties like location/orientation :param config: The config object describing how to sample :return: True, if the sampled pose was valid """ # Sample used floor obj floor_obj = random.choice(self.used_floors) # Sample/set intrinsics self._set_cam_intrinsics(cam, Config(self.config.get_raw_dict("intrinsics", {}))) # Sample camera extrinsics (we do not set them yet for performance reasons) cam2world_matrix = self._cam2world_matrix_from_cam_extrinsics(config) # Make sure the sampled location is inside the room => overwrite x and y and add offset to z bounding_box = get_bounds(floor_obj) min_corner = np.min(bounding_box, axis=0) max_corner = np.max(bounding_box, axis=0) cam2world_matrix.translation[0] = random.uniform(min_corner[0], max_corner[0]) cam2world_matrix.translation[1] = random.uniform(min_corner[1], max_corner[1]) cam2world_matrix.translation[2] += floor_obj.location[2] # Check if sampled pose is valid if self._is_pose_valid(floor_obj, cam, cam_ob, cam2world_matrix): # Set camera extrinsics as the pose is valid CameraUtility.add_camera_pose(cam2world_matrix) return True else: return False
def set_default_parameters(): """ Loads and sets default parameters defined in DefaultConfig.py """ # Set default intrinsics CameraUtility.set_intrinsics_from_blender_params( DefaultConfig.fov, DefaultConfig.resolution_x, DefaultConfig.resolution_y, DefaultConfig.clip_start, DefaultConfig.clip_end, DefaultConfig.pixel_aspect_x, DefaultConfig.pixel_aspect_y, DefaultConfig.shift_x, DefaultConfig.shift_y, "FOV") CameraUtility.set_stereo_parameters( DefaultConfig.stereo_convergence_mode, DefaultConfig.stereo_convergence_distance, DefaultConfig.stereo_interocular_distance) # Init renderer RendererUtility.init() RendererUtility.set_samples(DefaultConfig.samples) addon_utils.enable("render_auto_tile_size") RendererUtility.toggle_auto_tile_size(True) # Set number of cpu cores used for rendering (1 thread is always used for coordination => 1 # cpu thread means GPU-only rendering) RendererUtility.set_cpu_threads(1) RendererUtility.set_denoiser(DefaultConfig.denoiser) RendererUtility.set_simplify_subdivision_render( DefaultConfig.simplify_subdivision_render) RendererUtility.set_light_bounces( DefaultConfig.diffuse_bounces, DefaultConfig.glossy_bounces, DefaultConfig.ao_bounces_render, DefaultConfig.max_bounces, DefaultConfig.transmission_bounces, DefaultConfig.transparency_bounces, DefaultConfig.volume_bounces)
def _set_cam_extrinsics(self, cam_ob, config): """ Sets camera extrinsics according to the config. :param cam_ob: The object linked to the camera which determines general properties like location/orientation :param config: A configuration object with cam extrinsics. """ cam2world_matrix = self._cam2world_matrix_from_cam_extrinsics(config) CameraUtility.add_camera_pose(cam2world_matrix)
def _cam2world_matrix_from_cam_extrinsics(self, config: Config) -> np.ndarray: """ Determines camera extrinsics by using the given config and returns them in form of a cam to world frame transformation matrix. :param config: The configuration object. :return: The 4x4 cam to world transformation matrix. """ if not config.has_param("cam2world_matrix"): position = MathUtility.change_coordinate_frame_of_point( config.get_vector3d("location", [0, 0, 0]), self.source_frame) # position = Vector((-0.01111459918320179, -0.051188092678785324, 0.19301876425743103)) rotation_format = config.get_string("rotation/format", "euler") value = config.get_vector3d("rotation/value", [0, 0, 0]) # Transform to blender coord frame value = MathUtility.change_coordinate_frame_of_point( value, self.source_frame) if rotation_format == "euler": # Rotation, specified as euler angles rotation_matrix = Euler(value, 'XYZ').to_matrix() elif rotation_format == "forward_vec": # Convert forward vector to euler angle (Assume Up = Z) rotation_matrix = CameraUtility.rotation_from_forward_vec( value) elif rotation_format == "look_at": # Convert forward vector to euler angle (Assume Up = Z) rotation_matrix = CameraUtility.rotation_from_forward_vec( value - position) else: raise Exception("No such rotation format:" + str(rotation_format)) if rotation_format == "look_at" or rotation_format == "forward_vec": inplane_rot = config.get_float("rotation/inplane_rot", 0.0) rotation_matrix = np.matmul( rotation_matrix, Euler((0.0, 0.0, inplane_rot)).to_matrix()) extra_rot = config.get_vector("rotation/extra_rot", mathutils.Vector([0., 0., 0.])) #extra_rot = Vector([0.3,-0.3,-0.7841]) rotation_matrix = rotation_matrix @ Euler( extra_rot).to_matrix() # cam2world_matrix = Matrix.Translation(Vector(position)) @ rotation_matrix.to_4x4() cam2world_matrix = MathUtility.build_transformation_mat( position, rotation_matrix) else: cam2world_matrix = np.array( config.get_list("cam2world_matrix")).reshape(4, 4).astype( np.float32) cam2world_matrix = MathUtility.change_target_coordinate_frame_of_transformation_matrix( cam2world_matrix, self.source_frame) return cam2world_matrix
def _set_cam_extrinsics(self, config, frame=None): """ Sets camera extrinsics according to the config. :param frame: Optional, the frame to set the camera pose to. :param config: A configuration object with cam extrinsics. """ if config.has_param("frame"): frame = config.get_int("frame") cam2world_matrix = self._cam2world_matrix_from_cam_extrinsics(config) CameraUtility.add_camera_pose(cam2world_matrix, frame)
def _get_attribute(self, cam_pose, attribute_name): """ Returns the value of the requested attribute for the given object. :param cam_pose: The mesh object. :param attribute_name: The attribute name. Type: string. :return: The attribute value. """ cam, cam_ob = cam_pose if attribute_name == "fov_x": return cam.angle_x elif attribute_name == "fov_y": return cam.angle_y elif attribute_name == "shift_x": return cam.shift_x elif attribute_name == "shift_y": return cam.shift_y elif attribute_name == "half_fov_x": return cam.angle_x * 0.5 elif attribute_name == "half_fov_y": return cam.angle_y * 0.5 elif attribute_name == "cam_K": return [[x for x in c] for c in CameraUtility.get_intrinsics_as_K_matrix()] else: return super()._get_attribute(cam_ob, attribute_name)
def sample_and_validate_cam_pose(self, cam, cam_ob, config): """ Samples a new camera pose, sets the parameters of the given camera object accordingly and validates it. :param cam: The camera which contains only camera specific attributes. :param cam_ob: The object linked to the camera which determines general properties like location/orientation :param config: The config object describing how to sample :return: True, if the sampled pose was valid """ # Sample camera extrinsics (we do not set them yet for performance reasons) cam2world_matrix = self._cam2world_matrix_from_cam_extrinsics(config) if self._is_pose_valid(cam, cam_ob, cam2world_matrix): # Set camera extrinsics as the pose is valid CameraUtility.add_camera_pose(cam2world_matrix) return True else: return False
def _get_frame_camera(self): """ Returns camera parameters for the active camera. """ return { 'cam_K': np.hstack(CameraUtility.get_intrinsics_as_K_matrix()).tolist(), 'depth_scale': self.depth_scale }
def _cam2world_matrix_from_cam_extrinsics(self, config): """ Determines camera extrinsics by using the given config and returns them in form of a cam to world frame transformation matrix. :param config: The configuration object. :return: The cam to world transformation matrix. """ if not config.has_param("cam2world_matrix"): position = MathUtility.transform_point_to_blender_coord_frame( config.get_vector3d("location", [0, 0, 0]), self.source_frame) # Rotation rotation_format = config.get_string("rotation/format", "euler") value = config.get_vector3d("rotation/value", [0, 0, 0]) # Transform to blender coord frame value = MathUtility.transform_point_to_blender_coord_frame( Vector(value), self.source_frame) if rotation_format == "euler": # Rotation, specified as euler angles rotation_matrix = Euler(value, 'XYZ').to_matrix() elif rotation_format == "forward_vec": # Convert forward vector to euler angle (Assume Up = Z) rotation_matrix = CameraUtility.rotation_from_forward_vec( value) elif rotation_format == "look_at": # Convert forward vector to euler angle (Assume Up = Z) rotation_matrix = CameraUtility.rotation_from_forward_vec( (value - position).normalized()) else: raise Exception("No such rotation format:" + str(rotation_format)) if rotation_format == "look_at" or rotation_format == "forward_vec": inplane_rot = config.get_float("rotation/inplane_rot", 0.0) rotation_matrix = rotation_matrix @ Euler( (0.0, 0.0, inplane_rot)).to_matrix() cam2world_matrix = Matrix.Translation( Vector(position)) @ rotation_matrix.to_4x4() else: cam2world_matrix = Matrix( np.array(config.get_list("cam2world_matrix")).reshape( 4, 4).astype(np.float32)) cam2world_matrix = Utility.transform_matrix_to_blender_coord_frame( cam2world_matrix, self.source_frame) return cam2world_matrix
def get_cam_attribute( cam_ob: bpy.context.scene.camera, attribute_name: str, local_frame_change: Union[None, List[str]] = None, world_frame_change: Union[None, List[str]] = None) -> Any: """ Returns the value of the requested attribute for the given object. :param cam_ob: The camera object. :param attribute_name: The attribute name. :param local_frame_change: Can be used to change the local coordinate frame of matrices. Default: ["X", "Y", "Z"] :param world_frame_change: Can be used to change the world coordinate frame of points and matrices. Default: ["X", "Y", "Z"] :return: The attribute value. """ if attribute_name == "fov_x": return CameraUtility.get_fov()[0] elif attribute_name == "fov_y": return CameraUtility.get_fov()[1] elif attribute_name == "shift_x": return cam_ob.data.shift_x elif attribute_name == "shift_y": return cam_ob.data.shift_y elif attribute_name == "half_fov_x": return CameraUtility.get_fov()[0] * 0.5 elif attribute_name == "half_fov_y": return CameraUtility.get_fov()[1] * 0.5 elif attribute_name == "cam_K": return [[x for x in c] for c in CameraUtility.get_intrinsics_as_K_matrix()] else: if attribute_name == "cam2world_matrix": return WriterUtility.get_common_attribute( cam_ob, "matrix_world", local_frame_change, world_frame_change) else: return WriterUtility.get_common_attribute( cam_ob, attribute_name, local_frame_change, world_frame_change)
def sample_and_validate_cam_pose(self, config: Config, existing_poses: List[np.ndarray]) -> bool: """ Samples a new camera pose, sets the parameters of the given camera object accordingly and validates it. :param config: The config object describing how to sample :param existing_poses: A list of already sampled valid poses. :return: True, if the sampled pose was valid """ # Sample camera extrinsics (we do not set them yet for performance reasons) cam2world_matrix = self._sample_pose(config) if self._is_pose_valid(cam2world_matrix, existing_poses): # Set camera extrinsics as the pose is valid frame = CameraUtility.add_camera_pose(cam2world_matrix) # Optional callback self._on_new_pose_added(cam2world_matrix, frame) # Add to the list of added cam poses existing_poses.append(cam2world_matrix) return True else: return False
def _write_camera(self): """ Writes camera.json into dataset_dir. """ width = bpy.context.scene.render.resolution_x height = bpy.context.scene.render.resolution_y cam_K = CameraUtility.get_intrinsics_as_K_matrix() camera = { 'cx': cam_K[0][2], 'cy': cam_K[1][2], 'depth_scale': self.depth_scale, 'fx': cam_K[0][0], 'fy': cam_K[1][1], 'height': height, 'width': width } save_json(self.camera_path, camera) return
def sample_cam_pose_nearby(self, cam, cam_ob, config, location, rotation): # Compute room id of last sampled pose group_id = cam_ob["room_id"] room_obj, floor_obj = self.rooms[group_id] # Sample/set intrinsics self._set_cam_intrinsics(cam, config) # Sample camera extrinsics multiple times until rotation diff between the new pose and the last sampled pose is small enough for i in range(10000): cam2world_matrix = self._cam2world_matrix_from_cam_extrinsics(config) # Compute relative rotation angle R1 = np.array(cam2world_matrix.to_quaternion().to_matrix()) R2 = np.array(rotation.to_matrix()) R_ab = np.matmul(R1.T, R2) angle = np.arccos(np.clip((np.trace(R_ab) - 1) / 2, -1, 1)) / np.pi * 180 # Check if it is small enough if angle < 15: break # If no valid pose could have been found return if angle >= 15: return False, True # Sample location of new pose closely around location of last pose cam2world_matrix.translation = Sphere.sample(location, 0.3, "INTERIOR") # Check if sampled pose is valid if self._is_pose_valid(floor_obj, cam, cam_ob, cam2world_matrix): # Set camera extrinsics as the pose is valid frame = CameraUtility.add_camera_pose(cam2world_matrix) # Set group and room id keyframe (room id stays the same) cam_ob.keyframe_insert(data_path='["group_id"]', frame=frame) cam_ob.keyframe_insert(data_path='["room_id"]', frame=frame) return True, False else: return False, False
def sample_and_validate_cam_pose(self, cam, cam_ob, config): """ Samples a new camera pose, sets the parameters of the given camera object accordingly and validates it. :param cam: The camera which contains only camera specific attributes. :param cam_ob: The object linked to the camera which determines general properties like location/orientation :param config: The config object describing how to sample :return: True, if the sampled pose was valid """ # Sample room room_id = random.randrange(len(self.rooms)) room_obj, floor_obj = self.rooms[room_id] # Sample/set intrinsics self._set_cam_intrinsics( cam, Config(self.config.get_raw_dict("intrinsics", {}))) # Sample camera extrinsics (we do not set them yet for performance reasons) cam2world_matrix = self._cam2world_matrix_from_cam_extrinsics(config) # Make sure the sampled location is inside the room => overwrite x and y and add offset to z cam2world_matrix.translation[0] = random.uniform( room_obj["bbox"]["min"][0], room_obj["bbox"]["max"][0]) cam2world_matrix.translation[1] = random.uniform( room_obj["bbox"]["min"][1], room_obj["bbox"]["max"][1]) cam2world_matrix.translation[2] += room_obj["bbox"]["min"][2] # Check if sampled pose is valid if self._is_pose_valid(floor_obj, cam, cam_ob, cam2world_matrix): # Set camera extrinsics as the pose is valid frame = CameraUtility.add_camera_pose(cam2world_matrix) cam_ob["room_id"] = room_id # As the room id depends on the camera pose and therefore on the keyframe, we also need to add keyframes for the room id cam_ob.keyframe_insert(data_path='["room_id"]', frame=frame) return True else: return False
def get_cam_attribute( cam_ob: bpy.context.scene.camera, attribute_name: str, destination_frame: Union[List[str], None] = None) -> Any: """ Returns the value of the requested attribute for the given object. :param cam_ob: The camera object. :param attribute_name: The attribute name. :param destination_frame: Used to transform camera to blender coordinates. Default: ["X", "Y", "Z"] :return: The attribute value. """ if attribute_name == "fov_x": return cam_ob.data.angle_x elif attribute_name == "fov_y": return cam_ob.data.angle_y elif attribute_name == "shift_x": return cam_ob.data.shift_x elif attribute_name == "shift_y": return cam_ob.data.shift_y elif attribute_name == "half_fov_x": return cam_ob.data.angle_x * 0.5 elif attribute_name == "half_fov_y": return cam_ob.data.angle_y * 0.5 elif attribute_name == "cam_K": return [[x for x in c] for c in CameraUtility.get_intrinsics_as_K_matrix()] else: if destination_frame is None: destination_frame = ["X", "Y", "Z"] if attribute_name == "cam2world_matrix": return WriterUtility.get_common_attribute( cam_ob, "matrix_world", destination_frame) else: return WriterUtility.get_common_attribute( cam_ob, attribute_name, destination_frame)
default="examples/datasets/suncg_basic/output", help="Path to where the final files, will be saved") args = parser.parse_args() Initializer.init() # load the objects into the scene label_mapping = LabelIdMapping.from_csv( Utility.resolve_path( os.path.join('resources', 'id_mappings', 'nyu_idset.csv'))) objs = SuncgLoader.load(args.house, label_mapping=label_mapping) # define the camera intrinsics CameraUtility.set_intrinsics_from_blender_params(1, 512, 512, pixel_aspect_x=1.333333333, lens_unit="FOV") # read the camera positions file and convert into homogeneous camera-world transformation with open(args.camera, "r") as f: for line in f.readlines(): line = [float(x) for x in line.split()] position = MathUtility.change_coordinate_frame_of_point( line[:3], ["X", "-Z", "Y"]) rotation = MathUtility.change_coordinate_frame_of_point( line[3:6], ["X", "-Z", "Y"]) matrix_world = MathUtility.build_transformation_mat( position, CameraUtility.rotation_from_forward_vec(rotation)) CameraUtility.add_camera_pose(matrix_world)
def run(self): """ Load BOP data """ datasets_path = os.path.dirname(self.bop_dataset_path) dataset = os.path.basename(self.bop_dataset_path) print("bob: {}, dataset_path: {}".format(self.bop_dataset_path, datasets_path)) print("dataset: {}".format(dataset)) try: from bop_toolkit_lib import dataset_params, inout except ImportError as error: print( 'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!' ) print('https://github.com/thodan/bop_toolkit') raise error model_p = dataset_params.get_model_params( datasets_path, dataset, model_type=self.model_type if self.model_type else None) cam_p = dataset_params.get_camera_params( datasets_path, dataset, cam_type=self.cam_type if self.cam_type else None) try: split_p = dataset_params.get_split_params(datasets_path, dataset, split=self.split) except ValueError: raise Exception( "Wrong path or {} split does not exist in {}.".format( self.split, dataset)) bpy.context.scene.world["category_id"] = 0 bpy.context.scene.render.resolution_x = cam_p['im_size'][0] bpy.context.scene.render.resolution_y = cam_p['im_size'][1] loaded_objects = [] # only load all/selected objects here, use other modules for setting poses # e.g. camera.CameraSampler / object.ObjectPoseSampler if self.scene_id == -1: # TLESS exception because images are cropped if self.bop_dataset_name in ['tless']: cam_p['K'][0, 2] = split_p['im_size'][0] / 2 cam_p['K'][1, 2] = split_p['im_size'][1] / 2 # set camera intrinsics CameraUtility.set_intrinsics_from_K_matrix(cam_p['K'], split_p['im_size'][0], split_p['im_size'][1]) obj_ids = self.obj_ids if self.obj_ids else model_p['obj_ids'] # if sampling is enabled if self.sample_objects: loaded_ids = {} loaded_amount = 0 if self.obj_instances_limit != -1 and len( obj_ids ) * self.obj_instances_limit < self.num_of_objs_to_sample: raise RuntimeError( "{}'s {} split contains {} objects, {} object where requested to sample with " "an instances limit of {}. Raise the limit amount or decrease the requested " "amount of objects.".format(self.bop_dataset_path, self.split, len(obj_ids), self.num_of_objs_to_sample, self.obj_instances_limit)) while loaded_amount != self.num_of_objs_to_sample: random_id = choice(obj_ids) if random_id not in loaded_ids.keys(): loaded_ids.update({random_id: 0}) # if there is no limit or if there is one, but it is not reached for this particular object if self.obj_instances_limit == -1 or loaded_ids[ random_id] < self.obj_instances_limit: cur_obj = self._load_mesh(random_id, model_p, scale=self.scale) loaded_ids[random_id] += 1 loaded_amount += 1 loaded_objects.append(cur_obj) else: print( "ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are " "being requested".format( random_id, loaded_ids[random_id], self.obj_instances_limit, loaded_amount, self.num_of_objs_to_sample)) else: for obj_id in obj_ids: cur_obj = self._load_mesh(obj_id, model_p, scale=self.scale) loaded_objects.append(cur_obj) self._set_properties(loaded_objects) # replicate scene: load scene objects, object poses, camera intrinsics and camera poses else: sc_gt = inout.load_scene_gt(split_p['scene_gt_tpath'].format( **{'scene_id': self.scene_id})) sc_camera = inout.load_json(split_p['scene_camera_tpath'].format( **{'scene_id': self.scene_id})) for i, (cam_id, insts) in enumerate(sc_gt.items()): cam_K, cam_H_m2c_ref = self._get_ref_cam_extrinsics_intrinsics( sc_camera, cam_id, insts, self.scale) if i == 0: # define world = first camera cam_H_m2w_ref = cam_H_m2c_ref.copy() cur_objs = [] # load scene objects and set their poses for inst in insts: cur_objs.append( self._load_mesh(inst['obj_id'], model_p, scale=self.scale)) self.set_object_pose(cur_objs[-1], inst, self.scale) cam_H_c2w = self._compute_camera_to_world_trafo( cam_H_m2w_ref, cam_H_m2c_ref) # set camera intrinsics CameraUtility.set_intrinsics_from_K_matrix( cam_K, split_p['im_size'][0], split_p['im_size'][1]) # set camera extrinsics as next frame frame_id = CameraUtility.add_camera_pose(cam_H_c2w) # Add key frame for camera shift, as it changes from frame to frame in the tless replication cam = bpy.context.scene.camera.data cam.keyframe_insert(data_path='shift_x', frame=frame_id) cam.keyframe_insert(data_path='shift_y', frame=frame_id) # Copy object poses to key frame (to be sure) for cur_obj in cur_objs: self._insert_key_frames(cur_obj, frame_id) # move the origin of the object to the world origin and on top of the X-Y plane # makes it easier to place them later on, this does not change the `.location` # This is only useful if the BOP objects are not used in a pose estimation scenario. move_to_origin = self.config.get_bool("move_origin_to_x_y_plane", False) if move_to_origin: LoaderInterface.move_obj_origin_to_bottom_mean_point( loaded_objects)
tries = 0 while tries < 10000 and poses < 5: # Sample point inside house height = np.random.uniform(0.5, 2) location, _ = point_sampler.sample(height) # Sample rotation (fix around X and Y axis) euler_rotation = np.random.uniform([1.2217, 0, 0], [1.2217, 0, 6.283185307]) cam2world_matrix = MathUtility.build_transformation_mat( location, euler_rotation) # Check that obstacles are at least 1 meter away from the camera and make sure the view interesting enough if CameraValidation.perform_obstacle_in_view_check( cam2world_matrix, {"min": 1.0}, bvh_tree ) and CameraValidation.scene_coverage_score(cam2world_matrix) > 0.4: CameraUtility.add_camera_pose(cam2world_matrix) poses += 1 tries += 1 # activate normal and distance rendering RendererUtility.enable_normals_output() RendererUtility.enable_distance_output() MaterialLoaderUtility.add_alpha_channel_to_textures(blurry_edges=True) # render the whole pipeline data = RendererUtility.render() data.update( SegMapRendererUtility.render(Utility.get_temporary_directory(), Utility.get_temporary_directory(), "class"))
def load(bop_dataset_path: str, temp_dir: str, sys_paths: list, model_type: str = "", cam_type: str = "", split: str = "test", scene_id: int = -1, obj_ids: list = [], sample_objects: bool = False, num_of_objs_to_sample: int = None, obj_instances_limit: int = -1, move_origin_to_x_y_plane: bool = False, source_frame: list = ["X", "-Y", "-Z"], mm2m: bool = False) -> List[MeshObject]: """ Loads the 3D models of any BOP dataset and allows replicating BOP scenes - Interfaces with the bob_toolkit, allows loading of train, val and test splits - Relative camera poses are loaded/computed with respect to a reference model - Sets real camera intrinsics :param bop_dataset_path: Full path to a specific bop dataset e.g. /home/user/bop/tless. :param temp_dir: A temp directory which is used for writing the temporary .ply file. :param sys_paths: System paths to append. :param model_type: Optionally, specify type of BOP model. Available: [reconst, cad or eval]. :param cam_type: Camera type. If not defined, dataset-specific default camera type is used. :param split: Optionally, test or val split depending on BOP dataset. :param scene_id: Optionally, specify BOP dataset scene to synthetically replicate. Default: -1 (no scene is replicated, only BOP Objects are loaded). :param obj_ids: List of object ids to load. Default: [] (all objects from the given BOP dataset if scene_id is not specified). :param sample_objects: Toggles object sampling from the specified dataset. :param num_of_objs_to_sample: Amount of objects to sample from the specified dataset. If this amount is bigger than the dataset actually contains, then all objects will be loaded. :param obj_instances_limit: Limits the amount of object copies when sampling. Default: -1 (no limit). :param move_origin_to_x_y_plane: Move center of the object to the lower side of the object, this will not work when used in combination with pose estimation tasks! This is designed for the use-case where BOP objects are used as filler objects in the background. :param source_frame: Can be used if the given positions and rotations are specified in frames different from the blender frame. Has to be a list of three strings. Example: ['X', '-Z', 'Y']: Point (1,2,3) will be transformed to (1, -3, 2). Available: ['X', 'Y', 'Z', '-X', '-Y', '-Z']. :param mm2m: Specify whether to convert poses and models to meters. :return: The list of loaded mesh objects. """ for sys_path in sys_paths: if 'bop_toolkit' in sys_path: sys.path.append(sys_path) scale = 0.001 if mm2m else 1 bop_dataset_name = os.path.basename(bop_dataset_path) has_external_texture = bop_dataset_name in ["ycbv", "ruapc"] if obj_ids or sample_objects: allow_duplication = True else: allow_duplication = False datasets_path = os.path.dirname(bop_dataset_path) dataset = os.path.basename(bop_dataset_path) print("bob: {}, dataset_path: {}".format(bop_dataset_path, datasets_path)) print("dataset: {}".format(dataset)) try: from bop_toolkit_lib import dataset_params, inout except ImportError as error: print( 'ERROR: Please download the bop_toolkit package and add it to sys_paths in config!' ) print('https://github.com/thodan/bop_toolkit') raise error model_p = dataset_params.get_model_params( datasets_path, dataset, model_type=model_type if model_type else None) cam_p = dataset_params.get_camera_params( datasets_path, dataset, cam_type=cam_type if cam_type else None) try: split_p = dataset_params.get_split_params(datasets_path, dataset, split=split) except ValueError: raise Exception( "Wrong path or {} split does not exist in {}.".format( split, dataset)) bpy.context.scene.world["category_id"] = 0 bpy.context.scene.render.resolution_x = cam_p['im_size'][0] bpy.context.scene.render.resolution_y = cam_p['im_size'][1] loaded_objects = [] # only load all/selected objects here, use other modules for setting poses # e.g. camera.CameraSampler / object.ObjectPoseSampler if scene_id == -1: # TLESS exception because images are cropped if bop_dataset_name in ['tless']: cam_p['K'][0, 2] = split_p['im_size'][0] / 2 cam_p['K'][1, 2] = split_p['im_size'][1] / 2 # set camera intrinsics CameraUtility.set_intrinsics_from_K_matrix(cam_p['K'], split_p['im_size'][0], split_p['im_size'][1]) obj_ids = obj_ids if obj_ids else model_p['obj_ids'] # if sampling is enabled if sample_objects: loaded_ids = {} loaded_amount = 0 if obj_instances_limit != -1 and len( obj_ids) * obj_instances_limit < num_of_objs_to_sample: raise RuntimeError( "{}'s {} split contains {} objects, {} object where requested to sample with " "an instances limit of {}. Raise the limit amount or decrease the requested " "amount of objects.".format(bop_dataset_path, split, len(obj_ids), num_of_objs_to_sample, obj_instances_limit)) while loaded_amount != num_of_objs_to_sample: random_id = choice(obj_ids) if random_id not in loaded_ids.keys(): loaded_ids.update({random_id: 0}) # if there is no limit or if there is one, but it is not reached for this particular object if obj_instances_limit == -1 or loaded_ids[ random_id] < obj_instances_limit: cur_obj = BopLoader._load_mesh(random_id, model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale) loaded_ids[random_id] += 1 loaded_amount += 1 loaded_objects.append(cur_obj) else: print( "ID {} was loaded {} times with limit of {}. Total loaded amount {} while {} are " "being requested".format(random_id, loaded_ids[random_id], obj_instances_limit, loaded_amount, num_of_objs_to_sample)) else: for obj_id in obj_ids: cur_obj = BopLoader._load_mesh(obj_id, model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale) loaded_objects.append(cur_obj) # replicate scene: load scene objects, object poses, camera intrinsics and camera poses else: sc_gt = inout.load_scene_gt( split_p['scene_gt_tpath'].format(**{'scene_id': scene_id})) sc_camera = inout.load_json( split_p['scene_camera_tpath'].format(**{'scene_id': scene_id})) for i, (cam_id, insts) in enumerate(sc_gt.items()): cam_K, cam_H_m2c_ref = BopLoader._get_ref_cam_extrinsics_intrinsics( sc_camera, cam_id, insts, scale) if i == 0: # define world = first camera cam_H_m2w_ref = cam_H_m2c_ref.copy() cur_objs = [] # load scene objects and set their poses for inst in insts: cur_objs.append( BopLoader._load_mesh(inst['obj_id'], model_p, bop_dataset_name, has_external_texture, temp_dir, allow_duplication, scale)) BopLoader.set_object_pose(cur_objs[-1], inst, scale) cam_H_c2w = BopLoader._compute_camera_to_world_trafo( cam_H_m2w_ref, cam_H_m2c_ref, source_frame) # set camera intrinsics CameraUtility.set_intrinsics_from_K_matrix( cam_K, split_p['im_size'][0], split_p['im_size'][1]) # set camera extrinsics as next frame frame_id = CameraUtility.add_camera_pose(cam_H_c2w) # Add key frame for camera shift, as it changes from frame to frame in the tless replication cam = bpy.context.scene.camera.data cam.keyframe_insert(data_path='shift_x', frame=frame_id) cam.keyframe_insert(data_path='shift_y', frame=frame_id) # Copy object poses to key frame (to be sure) for cur_obj in cur_objs: BopLoader._insert_key_frames(cur_obj, frame_id) # move the origin of the object to the world origin and on top of the X-Y plane # makes it easier to place them later on, this does not change the `.location` # This is only useful if the BOP objects are not used in a pose estimation scenario. if move_origin_to_x_y_plane: for obj in loaded_objects: obj.move_origin_to_bottom_mean_point() return loaded_objects
# load the objects into the scene objs = BlendLoader.load(args.scene) # Set some category ids for loaded objects for j, obj in enumerate(objs): obj.set_cp("category_id", j + 1) # define a light and set its location and energy level light = Light() light.set_type("POINT") light.set_location([5, -5, 5]) light.set_energy(1000) # define the camera intrinsics CameraUtility.set_intrinsics_from_blender_params(1, 512, 512, lens_unit="FOV") # read the camera positions file and convert into homogeneous camera-world transformation with open(args.camera, "r") as f: for line in f.readlines(): line = [float(x) for x in line.split()] position, euler_rotation = line[:3], line[3:6] matrix_world = MathUtility.build_transformation_mat( position, euler_rotation) CameraUtility.add_camera_pose(matrix_world) # activate normal and distance rendering RendererUtility.enable_normals_output() RendererUtility.enable_distance_output() # set the amount of samples, which should be used for the color rendering
def _set_cam_intrinsics(self, cam, config): """ Sets camera intrinsics from a source with following priority 1. from config function parameter if defined 2. from custom properties of cam if set in Loader 3. default config: resolution_x/y: 512 pixel_aspect_x: 1 clip_start: : 0.1 clip_end : 1000 fov : 0.691111 :param cam: The camera which contains only camera specific attributes. :param config: A configuration object with cam intrinsics. """ if config.is_empty(): return width = config.get_int("resolution_x", bpy.context.scene.render.resolution_x) height = config.get_int("resolution_y", bpy.context.scene.render.resolution_y) # Clipping clip_start = config.get_float("clip_start", cam.clip_start) clip_end = config.get_float("clip_end", cam.clip_end) if config.has_param("cam_K"): if config.has_param("fov"): print( 'WARNING: FOV defined in config is ignored. Mutually exclusive with cam_K' ) if config.has_param("pixel_aspect_x"): print( 'WARNING: pixel_aspect_x defined in config is ignored. Mutually exclusive with cam_K' ) cam_K = np.array(config.get_list("cam_K")).reshape(3, 3).astype( np.float32) CameraUtility.set_intrinsics_from_K_matrix(cam_K, width, height, clip_start, clip_end) else: # Set FOV fov = config.get_float("fov", cam.angle) # Set Pixel Aspect Ratio pixel_aspect_x = config.get_float( "pixel_aspect_x", bpy.context.scene.render.pixel_aspect_x) pixel_aspect_y = config.get_float( "pixel_aspect_y", bpy.context.scene.render.pixel_aspect_y) # Set camera shift shift_x = config.get_float("shift_x", cam.shift_x) shift_y = config.get_float("shift_y", cam.shift_y) CameraUtility.set_intrinsics_from_blender_params(fov, width, height, clip_start, clip_end, pixel_aspect_x, pixel_aspect_y, shift_x, shift_y, lens_unit="FOV") CameraUtility.set_stereo_parameters( config.get_string("stereo_convergence_mode", cam.stereo.convergence_mode), config.get_float("convergence_distance", cam.stereo.convergence_distance), config.get_float("interocular_distance", cam.stereo.interocular_distance)) if config.has_param("depth_of_field"): depth_of_field_config = Config( config.get_raw_dict("depth_of_field")) fstop_value = depth_of_field_config.get_float("fstop", 2.4) aperture_blades = depth_of_field_config.get_int( "aperture_blades", 0) aperture_ratio = depth_of_field_config.get_float( "aperture_ratio", 1.0) aperture_rotation = depth_of_field_config.get_float( "aperture_rotation_in_rad", 0.0) if depth_of_field_config.has_param( "depth_of_field_dist") and depth_of_field_config.has_param( "focal_object"): raise RuntimeError( "You can only use either depth_of_field_dist or a focal_object but not both!" ) if depth_of_field_config.has_param("depth_of_field_dist"): depth_of_field_dist = depth_of_field_config.get_float( "depth_of_field_dist") CameraUtility.add_depth_of_field(cam, None, fstop_value, aperture_blades, aperture_rotation, aperture_ratio, depth_of_field_dist) elif depth_of_field_config.has_param("focal_object"): focal_object = depth_of_field_config.get_list("focal_object") if len(focal_object) != 1: raise RuntimeError( f"There has to be exactly one focal object, use 'random_samples: 1' or change " f"the selector. Found {len(focal_object)}.") CameraUtility.add_depth_of_field(cam, focal_object[0], fstop_value, aperture_blades, aperture_rotation, aperture_ratio) else: raise RuntimeError( "The depth_of_field dict must contain either a focal_object definition or " "a depth_of_field_dist")
def _set_cam_intrinsics(self, cam, config): """ Sets camera intrinsics from a source with following priority 1. from config function parameter if defined 2. from custom properties of cam if set in Loader 3. default config resolution_x/y: 512 pixel_aspect_x: 1 clip_start: : 0.1 clip_end : 1000 fov : 0.691111 :param cam: The camera which contains only camera specific attributes. :param config: A configuration object with cam intrinsics. """ if config.is_empty(): return width = config.get_int("resolution_x", bpy.context.scene.render.resolution_x) height = config.get_int("resolution_y", bpy.context.scene.render.resolution_y) # Clipping clip_start = config.get_float("clip_start", cam.clip_start) clip_end = config.get_float("clip_end", cam.clip_end) if config.has_param("cam_K"): if config.has_param("fov"): print( 'WARNING: FOV defined in config is ignored. Mutually exclusive with cam_K' ) if config.has_param("pixel_aspect_x"): print( 'WARNING: pixel_aspect_x defined in config is ignored. Mutually exclusive with cam_K' ) cam_K = np.array(config.get_list("cam_K")).reshape(3, 3).astype( np.float32) CameraUtility.set_intrinsics_from_K_matrix(cam_K, width, height, clip_start, clip_end) else: # Set FOV fov = config.get_float("fov", cam.angle) # Set Pixel Aspect Ratio pixel_aspect_x = config.get_float( "pixel_aspect_x", bpy.context.scene.render.pixel_aspect_x) pixel_aspect_y = config.get_float( "pixel_aspect_y", bpy.context.scene.render.pixel_aspect_y) # Set camera shift shift_x = config.get_float("shift_x", cam.shift_x) shift_y = config.get_float("shift_y", cam.shift_y) CameraUtility.set_intrinsics_from_blender_params(fov, width, height, clip_start, clip_end, pixel_aspect_x, pixel_aspect_y, shift_x, shift_y, lens_unit="FOV") CameraUtility.set_stereo_parameters( config.get_string("stereo_convergence_mode", cam.stereo.convergence_mode), config.get_float("convergence_distance", cam.stereo.convergence_distance), config.get_float("interocular_distance", cam.stereo.interocular_distance))
objs = ObjectLoader.load(args.scene) # define a light and set its location and energy level light = Light() light.set_type("POINT") light.set_location([5, -5, 5]) light.set_energy(1000) # Find point of interest, all cam poses should look towards it poi = MeshObject.compute_poi(objs) # Sample five camera poses for i in range(5): # Sample random camera location above objects location = np.random.uniform([-10, -10, 12], [10, 10, 8]) # Compute rotation based on vector going from location towards poi rotation_matrix = CameraUtility.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-0.7854, 0.7854)) # Add homog cam pose based on location an rotation cam2world_matrix = MathUtility.build_transformation_mat(location, rotation_matrix) CameraUtility.add_camera_pose(cam2world_matrix) # activate normal and distance rendering RendererUtility.enable_normals_output() RendererUtility.enable_distance_output() # set the amount of samples, which should be used for the color rendering RendererUtility.set_samples(350) # render the whole pipeline data = RendererUtility.render() # write the data to a .hdf5 container WriterUtility.save_to_hdf5(args.output_dir, data)