def test_calibration_raw(device: PyK4A): device.open() raw = device.calibration_raw import sys print(raw, file=sys.stderr) assert raw
def test_color_property(device: PyK4A): device.open() assert device.brightness == 128 assert device.contrast == 5 assert device.saturation == 32 assert device.sharpness == 2 assert device.backlight_compensation == 0 assert device.gain == 128 assert device.powerline_frequency == 2 assert device.exposure == 16670 assert device.exposure_mode_auto is True assert device.whitebalance == 4500 assert device.whitebalance_mode_auto is True
def test_device_capture_images(device: PyK4A): device.open() device._start_cameras() capture = device.get_capture() color = capture.color assert color is not None depth = capture.depth assert depth is not None ir = capture.ir assert ir is not None assert np.array_equal(color, depth) is False assert np.array_equal(depth, ir) is False assert np.array_equal(ir, color) is False
def main(): k4a = PyK4A( Config( color_resolution=pyk4a.ColorResolution.RES_720P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED, synchronized_images_only=False, )) k4a.start() # getters and setters directly get and set on device k4a.whitebalance = 4500 assert k4a.whitebalance == 4500 k4a.whitebalance = 4510 assert k4a.whitebalance == 4510 while True: capture = k4a.get_capture() if np.any(capture.depth) and np.any(capture.color): trns_depth = capture.transformed_depth cv2.imshow('trns', colorize(trns_depth, (None, None), cv2.COLORMAP_HSV)) point_cloud = capture.transformed_depth_point_cloud with open('depthvalues.json', 'w') as f: json.dump(trns_depth.tolist(), f) with open('pointcloud.json', 'w') as f: json.dump(point_cloud.tolist(), f) cv2.imwrite('colorPC.bmp', capture.color) cv2.imwrite('trDepthPC.bmp', trns_depth) key = cv2.waitKey(1) if key == ord('q'): cv2.destroyAllWindows() break k4a.stop()
def main(): k4a = PyK4A( Config( color_resolution=pyk4a.ColorResolution.RES_720P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED, synchronized_images_only=True, ) ) k4a.start() # getters and setters directly get and set on device k4a.whitebalance = 4500 assert k4a.whitebalance == 4500 k4a.whitebalance = 4510 assert k4a.whitebalance == 4510 while 1: capture = k4a.get_capture() if np.any(capture.color): cv2.imshow("k4a", capture.color[:, :, :3]) key = cv2.waitKey(10) if key != -1: cv2.destroyAllWindows() break k4a.stop()
def bench(config: Config, device_id: int): device = PyK4A(config=config, device_id=device_id) device.start() depth = color = depth_period = color_period = 0 print("Press CTRL-C top stop benchmark") started_at = started_at_period = monotonic() while True: try: capture = device.get_capture() if capture.color is not None: color += 1 color_period += 1 if capture.depth is not None: depth += 1 depth_period += 1 elapsed_period = monotonic() - started_at_period if elapsed_period >= 2: print( f"Color: {color_period / elapsed_period:0.2f} FPS, Depth: {depth_period / elapsed_period: 0.2f} FPS" ) color_period = depth_period = 0 started_at_period = monotonic() except KeyboardInterrupt: break elapsed = monotonic() - started_at device.stop() print() print( f"Result: Color: {color / elapsed:0.2f} FPS, Depth: {depth / elapsed: 0.2f} FPS" )
def main(): k4a_config = Config( color_resolution=pyk4a.ColorResolution.RES_3072P, depth_mode=pyk4a.DepthMode.OFF, synchronized_images_only=False, camera_fps=pyk4a.FPS.FPS_15, ) k4a = PyK4A(k4a_config, device_id=DEV_ID) k4a.start() # getters and setters directly get and set on device k4a.whitebalance = 4500 assert k4a.whitebalance == 4500 k4a.whitebalance = 4510 assert k4a.whitebalance == 4510 while 1: capture = k4a.get_capture() cv2.namedWindow("k4a", cv2.WINDOW_NORMAL) if np.any(capture.color): cv2.imshow("k4a", capture.color[:, :, :3]) key = cv2.waitKey(10) if key == ord(' '): time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H-%M-%S') cv2.imwrite( osp.join('imgs', '4cam', str(DEV_ID), time + '.jpg'), capture.color[:, :, :3]) print('save img!' + str(DEV_ID)) elif key != -1: cv2.destroyAllWindows() break k4a.stop()
def main(): k4a = PyK4A( Config( color_resolution=pyk4a.ColorResolution.OFF, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED, synchronized_images_only=False, )) k4a.start() # getters and setters directly get and set on device k4a.whitebalance = 4500 assert k4a.whitebalance == 4500 k4a.whitebalance = 4510 assert k4a.whitebalance == 4510 while True: capture = k4a.get_capture() if np.any(capture.depth): cv2.imshow("k4a", colorize(capture.depth, (None, 5000), cv2.COLORMAP_HSV)) key = cv2.waitKey(10) if key != -1: cv2.destroyAllWindows() break k4a.stop()
def main(): k4a = PyK4A(Config(color_resolution=pyk4a.ColorResolution.RES_720P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,)) k4a.start() while True: capture = k4a.get_capture() if capture.depth is not None: cv2.imshow("Depth", colorize(capture.depth, (None, 5000))) if capture.ir is not None: cv2.imshow("IR", colorize(capture.ir, (None, 500), colormap=cv2.COLORMAP_JET)) if capture.color is not None: cv2.imshow("Color", capture.color) if capture.transformed_depth is not None: cv2.imshow("Transformed Depth", colorize(capture.transformed_depth, (None, 5000))) if capture.transformed_color is not None: cv2.imshow("Transformed Color", capture.transformed_color) if capture.transformed_ir is not None: cv2.imshow("Transformed IR", colorize(capture.transformed_ir, (None, 500), colormap=cv2.COLORMAP_JET)) key = cv2.waitKey(10) if key != -1: cv2.destroyAllWindows() break k4a.stop()
def device_not_exists(device_id_not_exists: int) -> Iterator[PyK4A]: device = PyK4A(device_id=device_id_not_exists) yield device # autoclose try: if device.opened: device.close() except K4AException: pass
def device(device_id_good: int) -> Iterator[PyK4A]: device = PyK4A(device_id=device_id_good) yield device # autoclose try: if device.opened: device.close() except K4AException: pass
def test_reset_color_control_to_default(device: PyK4A): device.open() device.reset_color_control_to_default() assert device.brightness == 128 # default value 128 device.brightness = 123 assert device.brightness == 123 device.reset_color_control_to_default() assert device.brightness == 128
def run(self) -> None: print("Start run") camera = PyK4A(device_id=self._device_id, thread_safe=self.thread_safe) camera.start() while not self._halt: capture = camera.get_capture() assert capture.depth is not None self._count += 1 sleep(0.1) camera.stop() del camera print("Stop run")
def main(dp, device_id=0): fps_config = pyk4a.FPS.FPS_30 k4a = PyK4A(Config(color_resolution=ColorResolution.RES_1536P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED, camera_fps=fps_config), device_id=device_id) k4a.connect(lut=True) cam_params = k4a.get_cam_params() serial_number = k4a.get_serial_number() assert serial_number is not None os.makedirs(dp, exist_ok=True) with open(osp.join(dp, f'{serial_number}.json'), 'w') as f: json.dump(cam_params, f, indent=4)
def main(): # Load camera with default config k4a = PyK4A() k4a.start() args = parse_arguments() bbox_path, a2j_path = get_model() model_setup = ModelSetup(BBOX_MODEL_PATH=bbox_path, A2J_model_path=a2j_path, trt_optim=args.trt) run_camera_inferance(k4a, model_setup)
def __init__(self): print("使用pyk4a打开Kinect4") self.cam = PyK4A( Config(color_resolution=ColorResolution.RES_1536P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED, synchronized_images_only=True)) self.COLOR_HGT = 1536 self.COLOR_WID = 2048 self.DEPTH_HGT = 576 self.DEPTH_WID = 640 self.distCoeffs = np.array([ 0.513059, -2.77779, -0.000323, 0.000703, 1.62693, 0.391017, -2.593868, 1.548565 ]) self.cameramtx = np.array([[976.405945, 0, 1020.967651], [0, 976.266479, 779.519653], [0, 0, 1]]) self.cam.connect()
def main() -> None: k4a = PyK4A( Config( color_resolution=pyk4a.ColorResolution.RES_720P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED, synchronized_images_only=True, )) k4a.start() while 1: capture = k4a.get_capture() if capture.color: cv2.imshow("k4a", capture.color[:, :, :3]) key = cv2.waitKey(10) if key != -1: cv2.destroyAllWindows() break k4a.stop()
def process_value(self): self.index += 1 if self.use_kinect: if self.k4a is None: import pyk4a from pyk4a import Config, PyK4A, ColorResolution k4a = PyK4A( Config(color_resolution=ColorResolution.RES_1536P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED)) k4a.connect(lut=True) self.k4a = k4a result = self.k4a.get_capture2(verbose=30) # can return result here else: self.sleeper.sleep() return self.index, get_result(f'{self.index}', self.name)
def device(device_id: int) -> Iterator[PyK4A]: device = PyK4A(device_id=device_id) yield device if device._device_handle: # close all try: device._stop_imu() except K4AException: pass try: device._stop_cameras() except K4AException: pass try: device.close() except K4AException: pass
def main(): k4a = PyK4A( Config( color_resolution=pyk4a.ColorResolution.RES_720P, camera_fps=pyk4a.FPS.FPS_5, depth_mode=pyk4a.DepthMode.WFOV_2X2BINNED, synchronized_images_only=True, ) ) k4a.start() # getters and setters directly get and set on device k4a.whitebalance = 4500 assert k4a.whitebalance == 4500 k4a.whitebalance = 4510 assert k4a.whitebalance == 4510 while True: capture = k4a.get_capture() if np.any(capture.depth) and np.any(capture.color): break while True: capture = k4a.get_capture() if np.any(capture.depth) and np.any(capture.color): break points = capture.depth_point_cloud.reshape((-1, 3)) colors = capture.transformed_color[..., (2, 1, 0)].reshape((-1, 3)) fig = plt.figure() ax = fig.add_subplot(111, projection="3d") ax.scatter( points[:, 0], points[:, 1], points[:, 2], s=1, c=colors / 255, ) ax.set_xlabel("x") ax.set_ylabel("y") ax.set_zlabel("z") ax.set_xlim(-2000, 2000) ax.set_ylim(-2000, 2000) ax.set_zlim(0, 4000) ax.view_init(elev=-90, azim=-90) plt.show() k4a.stop()
def capture_frames(iterations=50, save_path=const.SAVE_DATASET_PATH): """ Capturn and store images from the camera :param iterations: the total number of frames to record :param save_path: full path to the data directory """ ir_dir_path = os.path.join(save_path, "ir_image") depth_dir_path = os.path.join(save_path, "depth_image") rgb_dir_path = os.path.join(save_path, "rgb_image") casted_dir_path = os.path.join(save_path, "casted_ir_image") num_existing_frames = len(glob(os.path.join(ir_dir_path, "*.png"))) num_existing_frames = 0 if num_existing_frames == 0 else num_existing_frames + 1 # Load camera with default config k4a = PyK4A() k4a.start() print("Data Capture Starting!\n") for i in range(iterations): print(f"frame {i} outof {iterations}") capture = k4a.get_capture() ir_img = capture.ir depth_img = capture.depth rgb_img = capture.color frame_name = str(num_existing_frames + i).zfill(7) + ".png" # Store the images cv2.imwrite(os.path.join(ir_dir_path, frame_name), ir_img) cv2.imwrite(os.path.join(depth_dir_path, frame_name), depth_img) cv2.imwrite(os.path.join(rgb_dir_path, frame_name), rgb_img) cv2.imwrite(os.path.join(casted_dir_path, frame_name), ir_img.astype(np.uint8)) print(f"IR images stored at: {ir_dir_path}") print(f"Depth images stored at: {depth_dir_path}") print(f"RGBA images stored at: {rgb_dir_path}") print(f"Casted IR images atored at: {casted_dir_path}") print("Data capture finished")
def main(): k4a = PyK4A( Config( color_resolution=pyk4a.ColorResolution.RES_720P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED, synchronized_images_only=True, )) k4a.start() # getters and setters directly get and set on device k4a.whitebalance = 4500 assert k4a.whitebalance == 4500 k4a.whitebalance = 4510 assert k4a.whitebalance == 4510 vis = cph.visualization.Visualizer() vis.create_window() pcd = cph.geometry.PointCloud() frame_count = 0 while True: capture = k4a.get_capture() if not np.any(capture.depth) or not np.any(capture.color): break points = capture.depth_point_cloud.reshape((-1, 3)) colors = capture.transformed_color[..., (2, 1, 0)].reshape( (-1, 3)) / 255 pcd.points = cph.utility.Vector3fVector(points) pcd.colors = cph.utility.Vector3fVector(colors) if frame_count == 0: vis.add_geometry(pcd) vis.update_geometry(pcd) vis.poll_events() vis.update_renderer() frame_count += 1 k4a.stop()
def main(): k4a = PyK4A() k4a.start() img_num = 1 while (True): capture = k4a.get_capture() img_color = capture.color depth_img = capture.depth depth_img = np.array(depth_img, dtype=float) / 65535.0 * 255 depth_img = np.array(depth_img, dtype=np.uint8) print(depth_img.shape) print(img_color.shape) frame = img_color[:, :, 2::-1] gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) parameters = aruco.DetectorParameters_create() corners, ids, rejectedImgPoints = aruco.detectMarkers( gray, aruco_dict, parameters=parameters) print(np.mean(depth_img)) print(ids) #print(corners,ids) #depth_img = depth_img /10000 center_p(corners, depth_img) gray = aruco.drawDetectedMarkers(gray, corners) depth_img_jet = cv2.applyColorMap(depth_img * 10, cv2.COLORMAP_JET) cv2.imshow('frame', depth_img_jet) #cv2.imshow('frame2',depth_img/8.0) if cv2.waitKey(1) & 0xFF == ord('s'): cv2.imwrite("cam_cal_img/" + str(img_num) + "cal.png", gray) img_num += 1 if cv2.waitKey(10) & 0xFF == ord('q'): cv2.destroyAllWindows() break k4a.close()
res = xm_find_people() res.position.point.x, res.position.point.y, res.position.point.z = transformed_cor res.character = description return res if count >= 10: print(colored("No qualified person!", 'bright red')) res = xm_find_people() res.position.point.x, res.position.point.y, res.position.point.z = -12, -12, -12 res.character = 'No qualified person!' return res if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-d', '--debug', type=bool, default=False) args = parser.parse_args() is_debug = args.debug APP_ID = '23757775' API_KEY = 'K0uRowPaoSfSs0ABvUxurnHG' SECRET_KEY = 'Q6WtWCSFqqMAbKffCrqHQ6DDWRNc0oD7' client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY) camera = PyK4A() camera.config.color_resolution = ColorResolution.RES_1080P camera.start() rospy.init_node('handGestureNode') service = rospy.Service('handGesture', xm_find_people, call_back) rospy.loginfo('GPSR HandGesture\'s vision start!') rospy.spin()
import pyk4a from pyk4a import Config, PyK4A, ColorResolution import cv2 import numpy as np k4a = PyK4A( Config( color_resolution=ColorResolution.RES_720P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED, synchronized_images_only=True, )) k4a.connect() # getters and setters directly get and set on device k4a.whitebalance = 4500 assert k4a.whitebalance == 4500 k4a.whitebalance = 4510 assert k4a.whitebalance == 4510 img_color, img_ir, img_depth, img_pcl = k4a.get_capture( transform_to_color=False, pcl=True) out = cv2.VideoWriter('out.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 20.0, (img_ir.shape[1], img_ir.shape[0])) while 1: img_color, img_ir, img_depth, img_pcl = k4a.get_capture( transform_to_color=False, pcl=True) if img_color is not None: cv2.imshow('k4a color', img_color[:, :, :3])
def run_camera_inferance(model_setup: ModelSetup, iterations=1000, show_heatmap=False, trt_optim=False): """ Run the model for N number of frames :param model_setup: ModelSetup :param iterations: the total number of frames to run the model :param show_heatmap: set to visualize prediction heat map and mask """ if trt_optim: from torch2trt import torch2trt, TRTModule trt_model_path = os.path.join( const.CHECKPOINT_PATH, f"{model_setup.loss}_{model_setup.model_name}_{model_setup.input_format}.trt" ) if not os.path.exists(trt_model_path): print("Creating TRT Model...") x = torch.ones( (1, 3, const.IMG_SHAPE[0], const.IMG_SHAPE[1])).cuda() model_trt = torch2trt(model_setup.model.eval().cuda(), [x], fp16_mode=True) torch.save(model_trt.state_dict(), trt_model_path) print(f"TRT Model saved at:\n {trt_model_path}\n") else: print("Loading TRT Model...") model_trt = TRTModule() model_trt.load_state_dict(torch.load(trt_model_path)) print("TRT Model loaded!\n") if show_heatmap: fig = plt.figure(figsize=(6, 7)) ax1 = fig.add_subplot(3, 1, 1) ax1.get_xaxis().set_ticks([]) ax1.get_yaxis().set_ticks([]) ax2 = fig.add_subplot(3, 1, 2) ax2.get_xaxis().set_ticks([]) ax2.get_yaxis().set_ticks([]) ax3 = fig.add_subplot(3, 1, 3) ax3.get_xaxis().set_ticks([]) ax3.get_yaxis().set_ticks([]) else: fig = plt.figure(figsize=(4, 5)) ax1 = fig.add_subplot(1, 1, 1) ax1.get_xaxis().set_ticks([]) ax1.get_yaxis().set_ticks([]) # Load camera with default config k4a = PyK4A() k4a.start() props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) for i in range(iterations): capture = k4a.get_capture() ir_img = capture.ir depth_img = capture.depth w, h = ir_img.shape[1], ir_img.shape[0] transformed_image = input_image(depth_img, ir_img, const.IMG_SHAPE, model_setup.input_format) # Run Infrence t1 = time.time() if trt_optim: prediction = model_trt(transformed_image.cuda()) else: prediction = Run_Inference(model_setup, transformed_image) t2 = time.time() print(f"infrence time: {t2-t1:1.3f}") pred_heatmap = prediction[0][0:model_setup.num_classes].max( 0)[0].float() pred_mask = find_prediction_mask(pred_heatmap)[0][0] pred_yx_locations = torch.nonzero(pred_mask) pred_height = prediction[0][-4][pred_mask] pred_width = prediction[0][-3][pred_mask] pred_offset_y = prediction[0][-2][pred_mask] pred_offset_x = prediction[0][-1][pred_mask] pred_bboxes = get_bboxes(pred_yx_locations, pred_height, pred_width, pred_offset_x, pred_offset_y) rect = None for pred_box in pred_bboxes: x, y = pred_box[0], pred_box[1] width, height = pred_box[2], pred_box[3] rect = patches.Rectangle((x, y), width, height, linewidth=2, edgecolor='g', facecolor='none') ax1.add_patch(rect) ax1.text(x, y, str(get_depth(depth_img, pred_box)), fontsize=14, bbox=props) # For visualizing purposes ir_img[ir_img > 3000] = ir_img.mean() ir_img = cv2.resize(ir_img, const.IMG_SHAPE, interpolation=cv2.INTER_NEAREST) ax1.set_title('IR Image') ax1.get_xaxis().set_ticks([]) ax1.get_yaxis().set_ticks([]) ax1.imshow(ir_img, interpolation='nearest', cmap='gray') if show_heatmap: ax2.get_xaxis().set_ticks([]) ax2.get_yaxis().set_ticks([]) ax2.set_title('prediction Heatmap') ax2.imshow(pred_heatmap.cpu().numpy(), interpolation='nearest', cmap='gray') ax3.set_title('prediction Mask') ax3.get_xaxis().set_ticks([]) ax3.get_yaxis().set_ticks([]) ax3.imshow(pred_mask.float().cpu().numpy(), interpolation='nearest', cmap='gray') plt.draw() plt.pause(0.001) ax1.clear() if show_heatmap: ax2.clear() ax3.clear() if rect: del rect, capture, prediction else: del capture, prediction
from argparse import ArgumentParser from pyk4a import Config, ImageFormat, PyK4A, PyK4ARecord parser = ArgumentParser(description="pyk4a recorder") parser.add_argument("--device", type=int, help="Device ID", default=0) parser.add_argument("FILE", type=str, help="Path to MKV file") args = parser.parse_args() print(f"Starting device #{args.device}") config = Config(color_format=ImageFormat.COLOR_MJPG) device = PyK4A(config=config, device_id=args.device) device.start() print(f"Open record file {args.FILE}") record = PyK4ARecord(device=device, config=config, path=args.FILE) record.create() try: print("Recording... Press CTRL-C to stop recording.") while True: capture = device.get_capture() record.write_capture(capture) except KeyboardInterrupt: print("CTRL-C pressed. Exiting.") record.flush() record.close() print(f"{record.captures_count} frames written.")
def __init__(self, screenName=None, baseName=None, useTk=1, sync=0, use=None): super().__init__(screenName=screenName, baseName=baseName, useTk=useTk, sync=sync, use=use) ####################################################################### # Attributes ---------------------------------------------------------- self.age = ['User', 'User'] self.sex = ['User', 'User'] self.height = ['User', 'User'] self.weight = ['User', 'User'] self.is_streaming = False self.is_displaying = False self.frame_count = 0 self.time_stamp_tmp = 0 self.depth_frame_tmp = 0 self.rgb_frame_tmp = 0 self.activity = StringVar() self.activity1 = StringVar() self.sensor_ignore = BooleanVar() self.buffer_ignore = BooleanVar() self.debug_mode = BooleanVar() # Clients self.kinect_client = PahoMqtt(BROKER, "KINECT", c_msg="kinect") self.kinect_client.loop_start() self.sound_client = PahoMqtt(BROKER, "SOUND", c_msg="sound") self.sound_client.loop_start() self.clients = list() dis = 0 for i, item in enumerate(SENSORS): if item[2]: self.clients.append( PahoMqtt(BROKER, f"{item[1]}", c_msg=item[0])) self.clients[i - dis].subscribe(item[0]) self.clients[i - dis].loop_start() else: dis += 1 # Attributes ---------------------------------------------------------- ####################################################################### # Tk widgets ---------------------------------------------------------- self.title("Control") self.resizable(0, 0) self.configure(bg='white') # Sensor Frame 1 self.sensor_frame1 = LabelFrame(self, text="Sensor control", background='white') self.sensor_frame1.pack(side=LEFT, fill="y") self.sensor_state = list() for item in self.clients: self.sensor_state.append( Label(self.sensor_frame1, text=f"SENSOR {item.info}", background='white', font=("default", 15, 'bold'))) self.sensor_state[-1].grid(row=len(self.sensor_state), column=0) self.start_btn = ttk.Button(self.sensor_frame1, text="Refresh", command=self.refresh) self.start_btn.grid(row=len(self.clients) + 1, column=0) # Stream Frame 2 self.sensor_frame2 = LabelFrame(self, text="Person 1", background='white') self.sensor_frame2.pack(side=LEFT, fill="y") self.user_age = Label(self.sensor_frame2, text="Age", background='white', font=("default", 10, 'bold')) self.user_age.grid(row=0, column=0) self.age_label = Label(self.sensor_frame2, text=self.age[0], background='white', font=("default", 10, 'bold')) self.age_label.grid(row=0, column=1) self.user_sex = Label(self.sensor_frame2, text="Sex", background='white', font=("default", 10, 'bold')) self.user_sex.grid(row=1, column=0) self.sex_label = Label(self.sensor_frame2, text=self.sex[0], background='white', font=("default", 10, 'bold')) self.sex_label.grid(row=1, column=1) self.user_height = Label(self.sensor_frame2, text="Height", background='white', font=("default", 10, 'bold')) self.user_height.grid(row=2, column=0) self.height_label = Label(self.sensor_frame2, text=self.height[0], background='white', font=("default", 10, 'bold')) self.height_label.grid(row=2, column=1) self.user_weight = Label(self.sensor_frame2, text="Weight", background='white', font=("default", 10, 'bold')) self.user_weight.grid(row=3, column=0) self.weight_label = Label(self.sensor_frame2, text=self.weight[0], background='white', font=("default", 10, 'bold')) self.weight_label.grid(row=3, column=1) self.activity_menu = ttk.Combobox(self.sensor_frame2, value=ACTIVITIES, textvariable=self.activity) self.activity_menu.current(0) self.activity_menu.config(state="readonly", width=15) self.activity_menu.bind("<<ComboboxSelected>>") self.activity_menu.grid(row=4, column=0, columnspan=2, pady=5) self.stream_start_btn = ttk.Button(self.sensor_frame2, text="Stream start", command=self.stream_start, width=11) self.stream_start_btn.grid(row=5, column=0, padx=2, pady=2) self.stream_stop_btn = ttk.Button(self.sensor_frame2, text="Stream stop", command=self.stream_stop, width=11) self.stream_stop_btn["state"] = DISABLED self.stream_stop_btn.grid(row=5, column=1, padx=2, pady=2) self.stream_reset_btn = ttk.Button( self.sensor_frame2, text="Stream reset", command=lambda: self.stream_reset(delete=True), width=11) self.stream_reset_btn["state"] = DISABLED self.stream_reset_btn.grid(row=7, column=1, padx=2, pady=2) self.stream_save_btn = ttk.Button(self.sensor_frame2, text="Stream save", command=self.stream_save, width=11) self.stream_save_btn["state"] = DISABLED self.stream_save_btn.grid(row=7, column=0, padx=2, pady=2) self.act_start_btn = ttk.Button(self.sensor_frame2, text="Activity start", command=lambda: self.activity_start(0), width=11) self.act_start_btn["state"] = DISABLED self.act_start_btn.grid(row=6, column=0, padx=2, pady=5) self.act_end_btn = ttk.Button(self.sensor_frame2, text="Activity end", command=lambda: self.activity_end(0), width=11) self.act_end_btn["state"] = DISABLED self.act_end_btn.grid(row=6, column=1, padx=2, pady=5) ################################################################################### # Stream Frame 3 self.sensor_frame3 = LabelFrame(self, text="Person 2", background='white') self.sensor_frame3.pack(side=LEFT, fill="y") self.user1_age = Label(self.sensor_frame3, text="Age", background='white', font=("default", 10, 'bold')) self.user1_age.grid(row=0, column=0) self.age1_label = Label(self.sensor_frame3, text=self.age[1], background='white', font=("default", 10, 'bold')) self.age1_label.grid(row=0, column=1) self.user1_sex = Label(self.sensor_frame3, text="Sex", background='white', font=("default", 10, 'bold')) self.user1_sex.grid(row=1, column=0) self.sex1_label = Label(self.sensor_frame3, text=self.sex[1], background='white', font=("default", 10, 'bold')) self.sex1_label.grid(row=1, column=1) self.user1_height = Label(self.sensor_frame3, text="Height", background='white', font=("default", 10, 'bold')) self.user1_height.grid(row=2, column=0) self.height1_label = Label(self.sensor_frame3, text=self.height[1], background='white', font=("default", 10, 'bold')) self.height1_label.grid(row=2, column=1) self.user1_weight = Label(self.sensor_frame3, text="Weight", background='white', font=("default", 10, 'bold')) self.user1_weight.grid(row=3, column=0) self.weight1_label = Label(self.sensor_frame3, text=self.weight[1], background='white', font=("default", 10, 'bold')) self.weight1_label.grid(row=3, column=1) self.activity1_menu = ttk.Combobox(self.sensor_frame3, value=ACTIVITIES1, textvariable=self.activity1) self.activity1_menu.current(0) self.activity1_menu.config(state="readonly", width=15) self.activity1_menu.bind("<<ComboboxSelected>>") self.activity1_menu.grid(row=4, column=0, columnspan=2, pady=5) self.act_start_btn1 = ttk.Button( self.sensor_frame3, text="Activity start", command=lambda: self.activity_start(1), width=11) self.act_start_btn1["state"] = DISABLED self.act_start_btn1.grid(row=5, column=0, padx=2, pady=5) self.act_end_btn1 = ttk.Button(self.sensor_frame3, text="Activity end", command=lambda: self.activity_end(1), width=11) self.act_end_btn1["state"] = DISABLED self.act_end_btn1.grid(row=5, column=1, padx=2, pady=5) ################################################################################### # Menu menubar = Menu(self) tool = Menu(menubar, tearoff=0) tool.add_command(label="Insert user info", command=self.user_info) tool.add_command(label="Display kinect", command=self.disp_kinect) tool.add_checkbutton(label="Ignore sensor error", onvalue=1, offvalue=0, variable=self.sensor_ignore) tool.add_checkbutton(label="Ignore buffer error", onvalue=1, offvalue=0, variable=self.buffer_ignore) tool.add_checkbutton(label="debug mode", onvalue=1, offvalue=0, variable=self.debug_mode) tool.add_command(label="Close sensors", command=self.close) menubar.add_cascade(label="Tools", menu=tool) self.config(menu=menubar) # Tk widgets ---------------------------------------------------------- ####################################################################### # Sensor controls ----------------------------------------------------- self.azure = PyK4A() self.azure.start() self.stream_reset() self.get_video() self.stream_video() self.stream_depth() self.set_state() self.refresh() # Main loop ----------------------------------------------------------- self.mainloop()
import pyk4a from pyk4a import Config, PyK4A, ColorResolution import cv2 import numpy as np k4a = PyK4A(Config(color_resolution=ColorResolution.RES_720P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED, )) k4a.connect() # getters and setters directly get and set on device k4a.whitebalance = 4500 assert k4a.whitebalance == 4500 k4a.whitebalance = 4510 assert k4a.whitebalance == 4510 while 1: img_color = k4a.get_capture(color_only=True) if np.any(img_color): cv2.imshow('k4a', img_color[:, :, :3]) key = cv2.waitKey(10) if key != -1: cv2.destroyAllWindows() break k4a.disconnect()
def main(): k4a = PyK4A( Config( color_resolution=pyk4a.ColorResolution.RES_720P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED, synchronized_images_only=True, )) k4a.start() plt.ion() fig, axes = plt.subplots(3, sharex=False) data = { "temperature": [0] * MAX_SAMPLES, "acc_x": [0] * MAX_SAMPLES, "acc_y": [0] * MAX_SAMPLES, "acc_z": [0] * MAX_SAMPLES, "acc_timestamp": [0] * MAX_SAMPLES, "gyro_x": [0] * MAX_SAMPLES, "gyro_y": [0] * MAX_SAMPLES, "gyro_z": [0] * MAX_SAMPLES, "gyro_timestamp": [0] * MAX_SAMPLES, } y = np.zeros(MAX_SAMPLES) lines = { "temperature": axes[0].plot(y, label="temperature")[0], "acc_x": axes[1].plot(y, label="acc_x")[0], "acc_y": axes[1].plot(y, label="acc_y")[0], "acc_z": axes[1].plot(y, label="acc_z")[0], "gyro_x": axes[2].plot(y, label="gyro_x")[0], "gyro_y": axes[2].plot(y, label="gyro_y")[0], "gyro_z": axes[2].plot(y, label="gyro_z")[0], } for i in range(MAX_SAMPLES): sample = k4a.get_imu_sample() sample["acc_x"], sample["acc_y"], sample["acc_z"] = sample.pop( "acc_sample") sample["gyro_x"], sample["gyro_y"], sample["gyro_z"] = sample.pop( "gyro_sample") for k, v in sample.items(): data[k][i] = v if i == 0: set_default_data(data) for k in ("temperature", "acc_x", "acc_y", "acc_z", "gyro_x", "gyro_y", "gyro_z"): lines[k].set_data(range(MAX_SAMPLES), data[k]) acc_y = data["acc_x"] + data["acc_y"] + data["acc_z"] gyro_y = data["gyro_x"] + data["gyro_y"] + data["gyro_z"] lines["acc_x"].axes.set_ylim(min(acc_y), max(acc_y)) lines["gyro_x"].axes.set_ylim(min(gyro_y), max(gyro_y)) lines["temperature"].axes.set_ylim(min(data["temperature"][0:i + 1]), max(data["temperature"][0:i + 1])) fig.canvas.draw() fig.canvas.flush_events() k4a._stop_imu() k4a.stop()