def get_capture(self, timeout=TIMEOUT_WAIT_INFINITE, color_only=False, transform_depth_to_color=True): """Get next capture Parameters: timeout: Timeout of capture color_only: If True, only color image will be returned transform_depth_to_color: If True, depth image will be transformed to the color image space Returns: color: Color image deoth: Depth image pose: Pose data of shape (num_bodies, num_joints, 10). Last dimension means: 0:2 - 2d keypoints projected to color image 2:5 - 3d keypoints in world depth image coordinates 5:9 - orientation 9 - confidence body_index_map: Body Index map is the body instance segmentation map. Each pixel maps to the corresponding pixel in the depth image or the ir image. The value for each pixel represents which body the pixel belongs to. It can be either background (value 255) or the index of a detected k4abt_body_t. """ res = k4a_module.device_get_capture(timeout) self._verify_error(res) color = k4a_module.device_get_color_image() if color_only: return color # depth = k4a_module.device_get_depth_image(transform_depth_to_color) # pose, body_index_map = k4a_module.device_get_pose_data() return color, depth, pose, body_index_map
def get_capture(self, timeout=TIMEOUT_WAIT_INFINITE, color_only=False, transform_depth_to_color=True): r"""Fetch a capture from the device and return as numpy array(s) or None. Arguments: :param timeout: Timeout in ms. Default is infinite. :param color_only: If true, returns color image only as np.array :param transform_depth_to_color: If true, transforms the depth image to the color image reference, using the kinect azure device calibration parameters. Returns: :return img_color [, img_depth] # image could be None if config synchronized_images_only==False Examples:: - if config synchronized_images_only=True >>> k4a.get_capture(color_only=True) # type: np.ndarray - if config synchronized_images_only=False, you must check if returs for each image is None >>> k4a.get_capture(color_only=True) # type: Optional[np.ndarray] >>> k4a.get_capture() # type: Tuple[Optional[np.ndarray], Optional[np.ndarray]] """ res = k4a_module.device_get_capture(timeout) self._verify_error(res) color = self._get_capture_color() if color_only: return color else: depth = self._get_capture_depth(transform_depth_to_color) return color, depth
def get_capture( self, timeout=TIMEOUT_WAIT_INFINITE, ) -> "PyK4ACapture": """ Fetch a capture from the device and return a PyK4ACapture object. Images are lazily fetched. Arguments: :param timeout: Timeout in ms. Default is infinite. Returns: :return capture containing requested images and infos if they are available in the current capture. There are no guarantees that the returned object will contain all the requested images. If using any ImageFormat other than ImageFormat.COLOR_BGRA32, the color color_image must be decoded. See example/color_formats.py """ self._validate_is_opened() res, capture_capsule = k4a_module.device_get_capture( self._device_handle, self.thread_safe, timeout) _verify_error(res) capture = PyK4ACapture( calibration=self.calibration, capture_handle=capture_capsule, color_format=self._config.color_format, thread_safe=self.thread_safe, ) return capture
def get_capture(self, timeout=TIMEOUT_WAIT_INFINITE, transform_to_color=True, color=True, ir=True, depth=True,pcl=True): r"""Fetch a capture from the device and return as numpy array(s) or None. Arguments: :param depth: :param ir: :param color: :param timeout: Timeout in ms. Default is infinite. Returns: :return img_color [, img_depth] # image could be None if config synchronized_images_only==False Examples:: - if config synchronized_images_only=True >>> k4a.get_capture(color_only=True) # type: np.ndarray - if config synchronized_images_only=False, you must check if returs for each image is None >>> k4a.get_capture(color_only=True) # type: Optional[np.ndarray] >>> k4a.get_capture() # type: Tuple[Optional[np.ndarray], Optional[np.ndarray]] """ img_color = None img_ir = None img_depth = None img_pcl = None if not self.is_running: self.start() res = k4a_module.device_get_capture(timeout) self._verify_error(res) if ir: img_ir = self._get_capture_ir(transform_to_color) if depth: img_depth = self._get_capture_depth(transform_to_color) if pcl: if not depth: raise RuntimeError("need depth to calculate pcl") img_pcl = k4a_module.transformation_depth_image_to_pcl(img_depth, transform_to_color) if color: img_color = self._get_capture_color() if not transform_to_color: if not depth: raise RuntimeError("need depth to transofrm color to depth") img_color = k4a_module.transformation_color_image_to_depth_camera(img_color, img_depth) return img_color, img_ir, img_depth, img_pcl
def get_capture(self, timeout=TIMEOUT_WAIT_INFINITE, color_only=False, transform_depth_to_color=True): res = k4a_module.device_get_capture(timeout) self._verify_error(res) color = k4a_module.device_get_color_image() if color_only: return color depth = k4a_module.device_get_depth_image(transform_depth_to_color) return color, depth
def get_capture2(self, skip_old_atol_ms=None, parallel_bt=True, get_bt=True, get_depth=True, get_color=True, get_color_timestamp=True, get_depth_timestamp=True, undistort_color=True, undistort_depth=True, undistort_bt=True, transformed_depth=True, timeout=TIMEOUT_WAIT_INFINITE, verbose=0): self.counter += 1 skip_count = 0 while True: t0 = time.time() res = k4a_module.device_get_capture(timeout) self._verify_error(res) t1 = time.time() if skip_old_atol_ms is None or (t1 - t0) * 1000 > skip_old_atol_ms: break else: skip_count += 1 result = dict() result['skip_count'] = skip_count if get_color: if undistort_color: k4a_module.device_get_color_image_undistorted_start() else: k4a_module.device_get_color_image_start() if get_depth: if undistort_depth: if transformed_depth: k4a_module.device_get_transformed_depth_image_undistorted_start( ) else: k4a_module.device_get_depth_image_undistorted_start() else: if transformed_depth: k4a_module.device_get_transformed_depth_image_start() else: raise Exception('get depth_image not implemented') if parallel_bt: if get_bt: if undistort_bt: k4a_module.device_get_pose_data_undistorted_start() else: k4a_module.device_get_pose_data_start() t2 = time.time() suff = lambda x: '_undistorted' if x else '' if get_depth: if undistort_depth: if transformed_depth: depth_result = k4a_module.device_get_transformed_depth_image_undistorted_join( ) else: depth_result = k4a_module.device_get_depth_image_undistorted_join( ) else: if transformed_depth: depth_result = k4a_module.device_get_transformed_depth_image_join( ) else: raise Exception('get depth_image not implemented') if depth_result is not None: pref = 'tranfromed_' if transformed_depth else '' result[f'{pref}depth{suff(undistort_depth)}'] = depth_result if get_depth_timestamp: result[ 'depth_timestamp'] = k4a_module.device_get_depth_image_device_timestamp_usec( ) else: print('depth_result None') if get_color: color_result = k4a_module.device_get_color_image_undistorted_join() \ if undistort_color else k4a_module.device_get_color_image_join() if color_result is not None: result[f'color{suff(undistort_color)}'] = color_result if get_color_timestamp: result[ 'color_timestamp'] = k4a_module.device_get_color_image_device_timestamp_usec( ) if not parallel_bt: t2 = time.time() if get_bt: if not parallel_bt: if undistort_bt: k4a_module.device_get_pose_data_undistorted_start() else: k4a_module.device_get_pose_data_start() pose, body_index_map_result = k4a_module.device_get_pose_data_undistorted_join() \ if undistort_bt else k4a_module.device_get_pose_data_join() if not (pose is None or body_index_map_result is None): result['pose'] = pose result[ f'body_index_map{suff(undistort_bt)}'] = body_index_map_result t3 = time.time() self.timings['get_capture2_0'].append(t1 - t0) self.timings['get_capture2_1'].append(t2 - t1) self.timings['get_capture2_2'].append(t3 - t2) self.skip_counts.append(skip_count) if verbose > 0 and self.counter % verbose == 0: timings_s = 'k4a timings:\n' min_frames = 10 durations = dict() for k, v in sorted(self.timings.items(), key=operator.itemgetter(0)): if len(v) > 2 * min_frames: start = int(len(v) / 2) duration = np.array(v)[start:].mean() timings_s += f'\t{k}: {duration * 1000:.3f}ms\n' durations[k] = duration if skip_old_atol_ms is not None and len( self.skip_counts) > 2 * min_frames: start = int(len(self.skip_counts) / 2) timings_s += f'\tskip_count mean: {np.mean(self.skip_counts[start:]):.2f}\n' if len(durations) > 0: fps = 1 / (durations['get_capture2_0'] + durations['get_capture2_1'] + durations['get_capture2_2']) timings_s += f'\tfps: {fps:.1f}\n' if len(durations) > 0: print(timings_s) return result
def trigger_capture(self, timeout=TIMEOUT_WAIT_INFINITE): res = k4a_module.device_get_capture(timeout) self._verify_error(res)