def test_set_camera_intrisics(self): # overwrite parameters self._cam_info.intrinsic = np.array([390, 390, 320, 240]) self._cam_info.intrinsics_conversion_mode = 'fov' K_test = np.asarray([[2666.67, 0, 960], [0, 2666.67, 540], [0, 0, 1]]) # set camera and get matrix camera.set_camera_info(bpy.context.scene, self._cam.data, self._cam_info) K = np.asarray( camera.get_calibration_matrix(bpy.context.scene, self._cam.data)) # test npt.assert_almost_equal( K_test, K, decimal=2, err_msg= 'Error while setting camera from intrinsics with conv. mode "fov"') # overwrite parameters self._cam_info.intrinsics_conversion_mode = 'mm' K_test = np.asarray([[2666.67, 0, 960], [0, 2666.67, 540], [0, 0, 1]]) # set camera and get amtrix camera.set_camera_info(bpy.context.scene, self._cam.data, self._cam_info) K = np.asarray( camera.get_calibration_matrix(bpy.context.scene, self._cam.data)) # test npt.assert_almost_equal( K_test, K, decimal=2, err_msg= 'Error while setting camera from intrinsics with conv. mode "mm"')
def test_set_camera_hfov(self): "test setting up camera with field of view" # overwrite parameters self._cam_info.hfov = 78.694 K_test = np.asarray([[73.2517014, 0, 960], [0, 73.2517014, 540], [0, 0, 1]]) # set camera and get matrix camera.set_camera_info(bpy.context.scene, self._cam.data, self._cam_info) K = np.asarray(camera.get_calibration_matrix(bpy.context.scene, self._cam.data)) # test npt.assert_almost_equal(K_test, K, err_msg='Error while setting camera from hfov')
def test_set_camera_swfl(self): "test setting up camera with sensor-width and focal length" # overwrite parameters self._cam_info.sensor_width = 1.89882275303 self._cam_info.focal_length = 1.158 K_test = np.asarray([[1170.91, 0, 960], [0, 1170.91, 540], [0, 0, 1]]) # set camera and get matrix camera.set_camera_info(bpy.context.scene, self._cam.data, self._cam_info) K = np.asarray(camera.get_calibration_matrix(bpy.context.scene, self._cam.data)) # test npt.assert_almost_equal(K_test, K, decimal=2, err_msg='Error while setting camera from swfl')
def postprocess(self, dirinfo, base_filename, camera, objs, zeroing, **kwargs): """Postprocessing the scene. This step will compute all the data that is relevant for PoseRenderResult. This data will then be saved to json. In addition, postprocessing will fix the filenames generated by blender. Args: dirinfo(DynamicStruct): struct with directory and path info base_filename(str): file name camera(bpy.types.Camera): active camera object objs(list): list of target objects zeroing(np.array): array for zeroing camera rotation Kwargs Args: postprocess_config(Configuration): postprocess specific config. See abr/scenes/baseconfiguration and scene configs for specific configuration values. """ # get postprocess specific configs postprocess_config = kwargs.get( 'postprocess_config', abr_scenes.BaseConfiguration().postprocess) # camera matrix K_cam = np.asarray( camera_utils.get_calibration_matrix(bpy.context.scene, camera.data)) # first we update the view-layer to get the updated values in # translation and rotation bpy.context.view_layer.update() # the compositor postprocessing takes care of fixing file names # and saving the masks filename into objs self.compositor.postprocess() # rectify range map into depth # Blender depth maps asare indeed ranges. Here we convert ranges into depth values fpath_range = os.path.join(dirinfo.images.range, f'{base_filename}.exr') # filenames (ranges are stored as true exr values, depth as 16 bit png) if not os.path.exists(dirinfo.images.depth): os.mkdir(dirinfo.images.depth) fpath_depth = os.path.join(dirinfo.images.depth, f'{base_filename}.png') # convert camera_utils.project_pinhole_range_to_rectified_depth( fpath_range, fpath_depth, res_x=bpy.context.scene.render.resolution_x, res_y=bpy.context.scene.render.resolution_y, calibration_matrix=K_cam, scale=postprocess_config.depth_scale) # NOTE: this assumes the camera(s) for which the disparity is computed # is(are) the correct one(s). That is it has the correct baseline according to # the rendered scene if postprocess_config.compute_disparity: # check whether current camera name contains any of the given # string for parallel setup if any([ c for c in postprocess_config.parallel_cameras if c in camera.name ]): # use precomputed depth if available, otherwise use range map dirpath = os.path.join(dirinfo.images.base_path, 'disparity') if not os.path.exists(dirpath): os.mkdir(dirpath) fpath_disparity = os.path.join(dirpath, f'{base_filename}.png') # compute map camera_utils.compute_disparity_from_z_info( fpath_depth, fpath_disparity, baseline_mm=postprocess_config. parallel_cameras_baseline_mm, calibration_matrix=K_cam, res_x=bpy.context.scene.render.resolution_x, res_y=bpy.context.scene.render.resolution_y, scale=postprocess_config.depth_scale) # compute bounding boxes and save annotations results_gl = ResultsCollection() results_cv = ResultsCollection() for obj in objs: render_result_gl, render_result_cv = self.build_render_result( obj, camera, zeroing, postprocess_config.visibility_from_mask) if obj['visible']: results_gl.add_result(render_result_gl) results_cv.add_result(render_result_cv) # if there's no visible object, add single instance results to have general scene information annotated if len(results_gl) == 0: results_gl.add_result(render_result_gl) if len(results_cv) == 0: results_cv.add_result(render_result_cv) self.save_annotations(dirinfo, base_filename, results_gl, results_cv)