Exemplo n.º 1
0
def main():
    k4a_config = Config(
        color_resolution=pyk4a.ColorResolution.RES_3072P,
        depth_mode=pyk4a.DepthMode.OFF,
        synchronized_images_only=False,
        camera_fps=pyk4a.FPS.FPS_15,
    )
    k4a = PyK4A(k4a_config, device_id=DEV_ID)
    k4a.start()

    # getters and setters directly get and set on device
    k4a.whitebalance = 4500
    assert k4a.whitebalance == 4500
    k4a.whitebalance = 4510
    assert k4a.whitebalance == 4510

    while 1:
        capture = k4a.get_capture()
        cv2.namedWindow("k4a", cv2.WINDOW_NORMAL)
        if np.any(capture.color):
            cv2.imshow("k4a", capture.color[:, :, :3])
            key = cv2.waitKey(10)
            if key == ord(' '):
                time = datetime.datetime.strftime(datetime.datetime.now(),
                                                  '%Y-%m-%d-%H-%M-%S')
                cv2.imwrite(
                    osp.join('imgs', '4cam', str(DEV_ID), time + '.jpg'),
                    capture.color[:, :, :3])
                print('save img!' + str(DEV_ID))
            elif key != -1:
                cv2.destroyAllWindows()
                break
    k4a.stop()
Exemplo n.º 2
0
def bench(config: Config, device_id: int):
    device = PyK4A(config=config, device_id=device_id)
    device.start()
    depth = color = depth_period = color_period = 0
    print("Press CTRL-C top stop benchmark")
    started_at = started_at_period = monotonic()
    while True:
        try:
            capture = device.get_capture()
            if capture.color is not None:
                color += 1
                color_period += 1
            if capture.depth is not None:
                depth += 1
                depth_period += 1
            elapsed_period = monotonic() - started_at_period
            if elapsed_period >= 2:
                print(
                    f"Color: {color_period / elapsed_period:0.2f} FPS, Depth: {depth_period / elapsed_period: 0.2f} FPS"
                )
                color_period = depth_period = 0
                started_at_period = monotonic()
        except KeyboardInterrupt:
            break
    elapsed = monotonic() - started_at
    device.stop()
    print()
    print(
        f"Result: Color: {color / elapsed:0.2f} FPS, Depth: {depth / elapsed: 0.2f} FPS"
    )
Exemplo n.º 3
0
def main():
    k4a = PyK4A(
        Config(
            color_resolution=pyk4a.ColorResolution.OFF,
            depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
            synchronized_images_only=False,
        ))
    k4a.start()

    # getters and setters directly get and set on device
    k4a.whitebalance = 4500
    assert k4a.whitebalance == 4500
    k4a.whitebalance = 4510
    assert k4a.whitebalance == 4510

    while True:
        capture = k4a.get_capture()
        if np.any(capture.depth):
            cv2.imshow("k4a",
                       colorize(capture.depth, (None, 5000), cv2.COLORMAP_HSV))
            key = cv2.waitKey(10)
            if key != -1:
                cv2.destroyAllWindows()
                break
    k4a.stop()
Exemplo n.º 4
0
def main():
    k4a = PyK4A(
        Config(
            color_resolution=pyk4a.ColorResolution.RES_720P,
            depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
            synchronized_images_only=True,
        )
    )
    k4a.start()

    # getters and setters directly get and set on device
    k4a.whitebalance = 4500
    assert k4a.whitebalance == 4500
    k4a.whitebalance = 4510
    assert k4a.whitebalance == 4510

    while 1:
        capture = k4a.get_capture()
        if np.any(capture.color):
            cv2.imshow("k4a", capture.color[:, :, :3])
            key = cv2.waitKey(10)
            if key != -1:
                cv2.destroyAllWindows()
                break
    k4a.stop()
Exemplo n.º 5
0
def main():
    k4a = PyK4A(
        Config(
            color_resolution=pyk4a.ColorResolution.RES_720P,
            depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
            synchronized_images_only=False,
        ))
    k4a.start()

    # getters and setters directly get and set on device
    k4a.whitebalance = 4500
    assert k4a.whitebalance == 4500
    k4a.whitebalance = 4510
    assert k4a.whitebalance == 4510
    while True:
        capture = k4a.get_capture()
        if np.any(capture.depth) and np.any(capture.color):
            trns_depth = capture.transformed_depth
            cv2.imshow('trns',
                       colorize(trns_depth, (None, None), cv2.COLORMAP_HSV))
            point_cloud = capture.transformed_depth_point_cloud
            with open('depthvalues.json', 'w') as f:
                json.dump(trns_depth.tolist(), f)
            with open('pointcloud.json', 'w') as f:
                json.dump(point_cloud.tolist(), f)

            cv2.imwrite('colorPC.bmp', capture.color)
            cv2.imwrite('trDepthPC.bmp', trns_depth)
            key = cv2.waitKey(1)
            if key == ord('q'):
                cv2.destroyAllWindows()
                break
    k4a.stop()
Exemplo n.º 6
0
def main():
    k4a = PyK4A(Config(color_resolution=pyk4a.ColorResolution.RES_720P, depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,))
    k4a.start()

    while True:
        capture = k4a.get_capture()
        if capture.depth is not None:
            cv2.imshow("Depth", colorize(capture.depth, (None, 5000)))
        if capture.ir is not None:
            cv2.imshow("IR", colorize(capture.ir, (None, 500), colormap=cv2.COLORMAP_JET))
        if capture.color is not None:
            cv2.imshow("Color", capture.color)
        if capture.transformed_depth is not None:
            cv2.imshow("Transformed Depth", colorize(capture.transformed_depth, (None, 5000)))
        if capture.transformed_color is not None:
            cv2.imshow("Transformed Color", capture.transformed_color)
        if capture.transformed_ir is not None:
            cv2.imshow("Transformed IR", colorize(capture.transformed_ir, (None, 500), colormap=cv2.COLORMAP_JET))

        key = cv2.waitKey(10)
        if key != -1:
            cv2.destroyAllWindows()
            break

    k4a.stop()
Exemplo n.º 7
0
def device(device_id_good: int) -> Iterator[PyK4A]:
    device = PyK4A(device_id=device_id_good)
    yield device
    # autoclose
    try:
        if device.opened:
            device.close()
    except K4AException:
        pass
Exemplo n.º 8
0
def device_not_exists(device_id_not_exists: int) -> Iterator[PyK4A]:
    device = PyK4A(device_id=device_id_not_exists)
    yield device
    # autoclose
    try:
        if device.opened:
            device.close()
    except K4AException:
        pass
Exemplo n.º 9
0
 def run(self) -> None:
     print("Start run")
     camera = PyK4A(device_id=self._device_id, thread_safe=self.thread_safe)
     camera.start()
     while not self._halt:
         capture = camera.get_capture()
         assert capture.depth is not None
         self._count += 1
     sleep(0.1)
     camera.stop()
     del camera
     print("Stop run")
Exemplo n.º 10
0
def main(dp, device_id=0):
    fps_config = pyk4a.FPS.FPS_30
    k4a = PyK4A(Config(color_resolution=ColorResolution.RES_1536P,
                       depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
                       camera_fps=fps_config),
                device_id=device_id)
    k4a.connect(lut=True)
    cam_params = k4a.get_cam_params()
    serial_number = k4a.get_serial_number()
    assert serial_number is not None
    os.makedirs(dp, exist_ok=True)
    with open(osp.join(dp, f'{serial_number}.json'), 'w') as f:
        json.dump(cam_params, f, indent=4)
def main():
    # Load camera with default config
    k4a = PyK4A()
    k4a.start()

    args = parse_arguments()
    bbox_path, a2j_path = get_model()

    model_setup = ModelSetup(BBOX_MODEL_PATH=bbox_path,
                             A2J_model_path=a2j_path,
                             trt_optim=args.trt)

    run_camera_inferance(k4a, model_setup)
Exemplo n.º 12
0
 def __init__(self):
     print("使用pyk4a打开Kinect4")
     self.cam = PyK4A(
         Config(color_resolution=ColorResolution.RES_1536P,
                depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
                synchronized_images_only=True))
     self.COLOR_HGT = 1536
     self.COLOR_WID = 2048
     self.DEPTH_HGT = 576
     self.DEPTH_WID = 640
     self.distCoeffs = np.array([
         0.513059, -2.77779, -0.000323, 0.000703, 1.62693, 0.391017,
         -2.593868, 1.548565
     ])
     self.cameramtx = np.array([[976.405945, 0, 1020.967651],
                                [0, 976.266479, 779.519653], [0, 0, 1]])
     self.cam.connect()
Exemplo n.º 13
0
def main() -> None:
    k4a = PyK4A(
        Config(
            color_resolution=pyk4a.ColorResolution.RES_720P,
            depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
            synchronized_images_only=True,
        ))
    k4a.start()

    while 1:
        capture = k4a.get_capture()
        if capture.color:
            cv2.imshow("k4a", capture.color[:, :, :3])
            key = cv2.waitKey(10)
            if key != -1:
                cv2.destroyAllWindows()
                break
    k4a.stop()
    def process_value(self):
        self.index += 1
        if self.use_kinect:
            if self.k4a is None:
                import pyk4a
                from pyk4a import Config, PyK4A, ColorResolution

                k4a = PyK4A(
                    Config(color_resolution=ColorResolution.RES_1536P,
                           depth_mode=pyk4a.DepthMode.NFOV_UNBINNED))
                k4a.connect(lut=True)
                self.k4a = k4a

            result = self.k4a.get_capture2(verbose=30)
            # can return result here
        else:
            self.sleeper.sleep()
        return self.index, get_result(f'{self.index}', self.name)
Exemplo n.º 15
0
def device(device_id: int) -> Iterator[PyK4A]:
    device = PyK4A(device_id=device_id)
    yield device

    if device._device_handle:
        # close all
        try:
            device._stop_imu()
        except K4AException:
            pass
        try:
            device._stop_cameras()
        except K4AException:
            pass
        try:
            device.close()
        except K4AException:
            pass
Exemplo n.º 16
0
def main():
    k4a = PyK4A(
        Config(
            color_resolution=pyk4a.ColorResolution.RES_720P,
            camera_fps=pyk4a.FPS.FPS_5,
            depth_mode=pyk4a.DepthMode.WFOV_2X2BINNED,
            synchronized_images_only=True,
        )
    )
    k4a.start()

    # getters and setters directly get and set on device
    k4a.whitebalance = 4500
    assert k4a.whitebalance == 4500
    k4a.whitebalance = 4510
    assert k4a.whitebalance == 4510
    while True:
        capture = k4a.get_capture()
        if np.any(capture.depth) and np.any(capture.color):
            break
    while True:
        capture = k4a.get_capture()
        if np.any(capture.depth) and np.any(capture.color):
            break
    points = capture.depth_point_cloud.reshape((-1, 3))

    colors = capture.transformed_color[..., (2, 1, 0)].reshape((-1, 3))

    fig = plt.figure()
    ax = fig.add_subplot(111, projection="3d")
    ax.scatter(
        points[:, 0], points[:, 1], points[:, 2], s=1, c=colors / 255,
    )
    ax.set_xlabel("x")
    ax.set_ylabel("y")
    ax.set_zlabel("z")
    ax.set_xlim(-2000, 2000)
    ax.set_ylim(-2000, 2000)
    ax.set_zlim(0, 4000)
    ax.view_init(elev=-90, azim=-90)
    plt.show()

    k4a.stop()
Exemplo n.º 17
0
def capture_frames(iterations=50, save_path=const.SAVE_DATASET_PATH):
    """
    Capturn and store images from the camera

    :param iterations: the total number of frames to record
    :param save_path: full path to the data directory
    """
    ir_dir_path = os.path.join(save_path, "ir_image")
    depth_dir_path = os.path.join(save_path, "depth_image")
    rgb_dir_path = os.path.join(save_path, "rgb_image")
    casted_dir_path = os.path.join(save_path, "casted_ir_image")

    num_existing_frames = len(glob(os.path.join(ir_dir_path, "*.png")))
    num_existing_frames = 0 if num_existing_frames == 0 else num_existing_frames + 1

    # Load camera with default config
    k4a = PyK4A()
    k4a.start()

    print("Data Capture Starting!\n")
    for i in range(iterations):
        print(f"frame {i} outof {iterations}")

        capture = k4a.get_capture()
        ir_img = capture.ir
        depth_img = capture.depth
        rgb_img = capture.color

        frame_name = str(num_existing_frames + i).zfill(7) + ".png"

        # Store the images
        cv2.imwrite(os.path.join(ir_dir_path, frame_name), ir_img)
        cv2.imwrite(os.path.join(depth_dir_path, frame_name), depth_img)
        cv2.imwrite(os.path.join(rgb_dir_path, frame_name), rgb_img)
        cv2.imwrite(os.path.join(casted_dir_path, frame_name),
                    ir_img.astype(np.uint8))

    print(f"IR images stored at: {ir_dir_path}")
    print(f"Depth images stored at: {depth_dir_path}")
    print(f"RGBA images stored at: {rgb_dir_path}")
    print(f"Casted IR images atored at: {casted_dir_path}")
    print("Data capture finished")
Exemplo n.º 18
0
def main():
    k4a = PyK4A(
        Config(
            color_resolution=pyk4a.ColorResolution.RES_720P,
            depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
            synchronized_images_only=True,
        ))
    k4a.start()

    # getters and setters directly get and set on device
    k4a.whitebalance = 4500
    assert k4a.whitebalance == 4500
    k4a.whitebalance = 4510
    assert k4a.whitebalance == 4510

    vis = cph.visualization.Visualizer()
    vis.create_window()
    pcd = cph.geometry.PointCloud()
    frame_count = 0
    while True:
        capture = k4a.get_capture()
        if not np.any(capture.depth) or not np.any(capture.color):
            break
        points = capture.depth_point_cloud.reshape((-1, 3))
        colors = capture.transformed_color[..., (2, 1, 0)].reshape(
            (-1, 3)) / 255
        pcd.points = cph.utility.Vector3fVector(points)
        pcd.colors = cph.utility.Vector3fVector(colors)

        if frame_count == 0:
            vis.add_geometry(pcd)

        vis.update_geometry(pcd)
        vis.poll_events()
        vis.update_renderer()
        frame_count += 1
    k4a.stop()
Exemplo n.º 19
0
def main():
    k4a = PyK4A()
    k4a.start()
    img_num = 1
    while (True):
        capture = k4a.get_capture()
        img_color = capture.color
        depth_img = capture.depth
        depth_img = np.array(depth_img, dtype=float) / 65535.0 * 255
        depth_img = np.array(depth_img, dtype=np.uint8)
        print(depth_img.shape)
        print(img_color.shape)
        frame = img_color[:, :, 2::-1]
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        parameters = aruco.DetectorParameters_create()
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        print(np.mean(depth_img))
        print(ids)
        #print(corners,ids)
        #depth_img = depth_img /10000
        center_p(corners, depth_img)
        gray = aruco.drawDetectedMarkers(gray, corners)
        depth_img_jet = cv2.applyColorMap(depth_img * 10, cv2.COLORMAP_JET)

        cv2.imshow('frame', depth_img_jet)
        #cv2.imshow('frame2',depth_img/8.0)
        if cv2.waitKey(1) & 0xFF == ord('s'):
            cv2.imwrite("cam_cal_img/" + str(img_num) + "cal.png", gray)
            img_num += 1
        if cv2.waitKey(10) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
    k4a.close()
Exemplo n.º 20
0
def run_camera_inferance(model_setup: ModelSetup,
                         iterations=1000,
                         show_heatmap=False,
                         trt_optim=False):
    """
    Run the model for N number of frames

    :param model_setup: ModelSetup
    :param iterations: the total number of frames to run the model
    :param show_heatmap: set to visualize prediction heat map and mask
    """
    if trt_optim:
        from torch2trt import torch2trt, TRTModule
        trt_model_path = os.path.join(
            const.CHECKPOINT_PATH,
            f"{model_setup.loss}_{model_setup.model_name}_{model_setup.input_format}.trt"
        )
        if not os.path.exists(trt_model_path):
            print("Creating TRT Model...")
            x = torch.ones(
                (1, 3, const.IMG_SHAPE[0], const.IMG_SHAPE[1])).cuda()
            model_trt = torch2trt(model_setup.model.eval().cuda(), [x],
                                  fp16_mode=True)
            torch.save(model_trt.state_dict(), trt_model_path)
            print(f"TRT Model saved at:\n {trt_model_path}\n")
        else:
            print("Loading TRT Model...")
            model_trt = TRTModule()
            model_trt.load_state_dict(torch.load(trt_model_path))
            print("TRT Model loaded!\n")

    if show_heatmap:
        fig = plt.figure(figsize=(6, 7))

        ax1 = fig.add_subplot(3, 1, 1)
        ax1.get_xaxis().set_ticks([])
        ax1.get_yaxis().set_ticks([])
        ax2 = fig.add_subplot(3, 1, 2)
        ax2.get_xaxis().set_ticks([])
        ax2.get_yaxis().set_ticks([])
        ax3 = fig.add_subplot(3, 1, 3)
        ax3.get_xaxis().set_ticks([])
        ax3.get_yaxis().set_ticks([])
    else:
        fig = plt.figure(figsize=(4, 5))

        ax1 = fig.add_subplot(1, 1, 1)
        ax1.get_xaxis().set_ticks([])
        ax1.get_yaxis().set_ticks([])

    # Load camera with default config
    k4a = PyK4A()
    k4a.start()

    props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)

    for i in range(iterations):
        capture = k4a.get_capture()
        ir_img = capture.ir
        depth_img = capture.depth

        w, h = ir_img.shape[1], ir_img.shape[0]
        transformed_image = input_image(depth_img, ir_img, const.IMG_SHAPE,
                                        model_setup.input_format)

        # Run Infrence
        t1 = time.time()
        if trt_optim:
            prediction = model_trt(transformed_image.cuda())
        else:
            prediction = Run_Inference(model_setup, transformed_image)
        t2 = time.time()
        print(f"infrence time: {t2-t1:1.3f}")

        pred_heatmap = prediction[0][0:model_setup.num_classes].max(
            0)[0].float()
        pred_mask = find_prediction_mask(pred_heatmap)[0][0]
        pred_yx_locations = torch.nonzero(pred_mask)

        pred_height = prediction[0][-4][pred_mask]
        pred_width = prediction[0][-3][pred_mask]

        pred_offset_y = prediction[0][-2][pred_mask]
        pred_offset_x = prediction[0][-1][pred_mask]

        pred_bboxes = get_bboxes(pred_yx_locations, pred_height, pred_width,
                                 pred_offset_x, pred_offset_y)

        rect = None
        for pred_box in pred_bboxes:
            x, y = pred_box[0], pred_box[1]
            width, height = pred_box[2], pred_box[3]
            rect = patches.Rectangle((x, y),
                                     width,
                                     height,
                                     linewidth=2,
                                     edgecolor='g',
                                     facecolor='none')
            ax1.add_patch(rect)
            ax1.text(x,
                     y,
                     str(get_depth(depth_img, pred_box)),
                     fontsize=14,
                     bbox=props)

        # For visualizing purposes
        ir_img[ir_img > 3000] = ir_img.mean()
        ir_img = cv2.resize(ir_img,
                            const.IMG_SHAPE,
                            interpolation=cv2.INTER_NEAREST)
        ax1.set_title('IR Image')
        ax1.get_xaxis().set_ticks([])
        ax1.get_yaxis().set_ticks([])
        ax1.imshow(ir_img, interpolation='nearest', cmap='gray')

        if show_heatmap:
            ax2.get_xaxis().set_ticks([])
            ax2.get_yaxis().set_ticks([])
            ax2.set_title('prediction Heatmap')
            ax2.imshow(pred_heatmap.cpu().numpy(),
                       interpolation='nearest',
                       cmap='gray')

            ax3.set_title('prediction Mask')
            ax3.get_xaxis().set_ticks([])
            ax3.get_yaxis().set_ticks([])
            ax3.imshow(pred_mask.float().cpu().numpy(),
                       interpolation='nearest',
                       cmap='gray')

        plt.draw()
        plt.pause(0.001)

        ax1.clear()
        if show_heatmap:
            ax2.clear()
            ax3.clear()
        if rect:
            del rect, capture, prediction
        else:
            del capture, prediction
Exemplo n.º 21
0
from argparse import ArgumentParser

from pyk4a import Config, ImageFormat, PyK4A, PyK4ARecord

parser = ArgumentParser(description="pyk4a recorder")
parser.add_argument("--device", type=int, help="Device ID", default=0)
parser.add_argument("FILE", type=str, help="Path to MKV file")
args = parser.parse_args()

print(f"Starting device #{args.device}")
config = Config(color_format=ImageFormat.COLOR_MJPG)
device = PyK4A(config=config, device_id=args.device)
device.start()

print(f"Open record file {args.FILE}")
record = PyK4ARecord(device=device, config=config, path=args.FILE)
record.create()
try:
    print("Recording... Press CTRL-C to stop recording.")
    while True:
        capture = device.get_capture()
        record.write_capture(capture)
except KeyboardInterrupt:
    print("CTRL-C pressed. Exiting.")

record.flush()
record.close()
print(f"{record.captures_count} frames written.")
Exemplo n.º 22
0
    def __init__(self,
                 screenName=None,
                 baseName=None,
                 useTk=1,
                 sync=0,
                 use=None):
        super().__init__(screenName=screenName,
                         baseName=baseName,
                         useTk=useTk,
                         sync=sync,
                         use=use)

        #######################################################################
        # Attributes ----------------------------------------------------------
        self.age = ['User', 'User']
        self.sex = ['User', 'User']
        self.height = ['User', 'User']
        self.weight = ['User', 'User']
        self.is_streaming = False
        self.is_displaying = False

        self.frame_count = 0
        self.time_stamp_tmp = 0
        self.depth_frame_tmp = 0
        self.rgb_frame_tmp = 0

        self.activity = StringVar()
        self.activity1 = StringVar()
        self.sensor_ignore = BooleanVar()
        self.buffer_ignore = BooleanVar()
        self.debug_mode = BooleanVar()

        # Clients
        self.kinect_client = PahoMqtt(BROKER, "KINECT", c_msg="kinect")
        self.kinect_client.loop_start()
        self.sound_client = PahoMqtt(BROKER, "SOUND", c_msg="sound")
        self.sound_client.loop_start()
        self.clients = list()
        dis = 0
        for i, item in enumerate(SENSORS):
            if item[2]:
                self.clients.append(
                    PahoMqtt(BROKER, f"{item[1]}", c_msg=item[0]))
                self.clients[i - dis].subscribe(item[0])
                self.clients[i - dis].loop_start()
            else:
                dis += 1

        # Attributes ----------------------------------------------------------
        #######################################################################
        # Tk widgets ----------------------------------------------------------

        self.title("Control")
        self.resizable(0, 0)
        self.configure(bg='white')

        # Sensor Frame 1
        self.sensor_frame1 = LabelFrame(self,
                                        text="Sensor control",
                                        background='white')
        self.sensor_frame1.pack(side=LEFT, fill="y")

        self.sensor_state = list()
        for item in self.clients:
            self.sensor_state.append(
                Label(self.sensor_frame1,
                      text=f"SENSOR {item.info}",
                      background='white',
                      font=("default", 15, 'bold')))
            self.sensor_state[-1].grid(row=len(self.sensor_state), column=0)

        self.start_btn = ttk.Button(self.sensor_frame1,
                                    text="Refresh",
                                    command=self.refresh)
        self.start_btn.grid(row=len(self.clients) + 1, column=0)

        # Stream Frame 2
        self.sensor_frame2 = LabelFrame(self,
                                        text="Person 1",
                                        background='white')
        self.sensor_frame2.pack(side=LEFT, fill="y")
        self.user_age = Label(self.sensor_frame2,
                              text="Age",
                              background='white',
                              font=("default", 10, 'bold'))
        self.user_age.grid(row=0, column=0)
        self.age_label = Label(self.sensor_frame2,
                               text=self.age[0],
                               background='white',
                               font=("default", 10, 'bold'))
        self.age_label.grid(row=0, column=1)
        self.user_sex = Label(self.sensor_frame2,
                              text="Sex",
                              background='white',
                              font=("default", 10, 'bold'))
        self.user_sex.grid(row=1, column=0)
        self.sex_label = Label(self.sensor_frame2,
                               text=self.sex[0],
                               background='white',
                               font=("default", 10, 'bold'))
        self.sex_label.grid(row=1, column=1)
        self.user_height = Label(self.sensor_frame2,
                                 text="Height",
                                 background='white',
                                 font=("default", 10, 'bold'))
        self.user_height.grid(row=2, column=0)
        self.height_label = Label(self.sensor_frame2,
                                  text=self.height[0],
                                  background='white',
                                  font=("default", 10, 'bold'))
        self.height_label.grid(row=2, column=1)
        self.user_weight = Label(self.sensor_frame2,
                                 text="Weight",
                                 background='white',
                                 font=("default", 10, 'bold'))
        self.user_weight.grid(row=3, column=0)
        self.weight_label = Label(self.sensor_frame2,
                                  text=self.weight[0],
                                  background='white',
                                  font=("default", 10, 'bold'))
        self.weight_label.grid(row=3, column=1)

        self.activity_menu = ttk.Combobox(self.sensor_frame2,
                                          value=ACTIVITIES,
                                          textvariable=self.activity)
        self.activity_menu.current(0)
        self.activity_menu.config(state="readonly", width=15)
        self.activity_menu.bind("<<ComboboxSelected>>")
        self.activity_menu.grid(row=4, column=0, columnspan=2, pady=5)

        self.stream_start_btn = ttk.Button(self.sensor_frame2,
                                           text="Stream start",
                                           command=self.stream_start,
                                           width=11)
        self.stream_start_btn.grid(row=5, column=0, padx=2, pady=2)
        self.stream_stop_btn = ttk.Button(self.sensor_frame2,
                                          text="Stream stop",
                                          command=self.stream_stop,
                                          width=11)
        self.stream_stop_btn["state"] = DISABLED
        self.stream_stop_btn.grid(row=5, column=1, padx=2, pady=2)

        self.stream_reset_btn = ttk.Button(
            self.sensor_frame2,
            text="Stream reset",
            command=lambda: self.stream_reset(delete=True),
            width=11)
        self.stream_reset_btn["state"] = DISABLED
        self.stream_reset_btn.grid(row=7, column=1, padx=2, pady=2)

        self.stream_save_btn = ttk.Button(self.sensor_frame2,
                                          text="Stream save",
                                          command=self.stream_save,
                                          width=11)
        self.stream_save_btn["state"] = DISABLED
        self.stream_save_btn.grid(row=7, column=0, padx=2, pady=2)

        self.act_start_btn = ttk.Button(self.sensor_frame2,
                                        text="Activity start",
                                        command=lambda: self.activity_start(0),
                                        width=11)
        self.act_start_btn["state"] = DISABLED
        self.act_start_btn.grid(row=6, column=0, padx=2, pady=5)

        self.act_end_btn = ttk.Button(self.sensor_frame2,
                                      text="Activity end",
                                      command=lambda: self.activity_end(0),
                                      width=11)
        self.act_end_btn["state"] = DISABLED
        self.act_end_btn.grid(row=6, column=1, padx=2, pady=5)

        ###################################################################################
        # Stream Frame 3
        self.sensor_frame3 = LabelFrame(self,
                                        text="Person 2",
                                        background='white')
        self.sensor_frame3.pack(side=LEFT, fill="y")
        self.user1_age = Label(self.sensor_frame3,
                               text="Age",
                               background='white',
                               font=("default", 10, 'bold'))
        self.user1_age.grid(row=0, column=0)
        self.age1_label = Label(self.sensor_frame3,
                                text=self.age[1],
                                background='white',
                                font=("default", 10, 'bold'))
        self.age1_label.grid(row=0, column=1)
        self.user1_sex = Label(self.sensor_frame3,
                               text="Sex",
                               background='white',
                               font=("default", 10, 'bold'))
        self.user1_sex.grid(row=1, column=0)
        self.sex1_label = Label(self.sensor_frame3,
                                text=self.sex[1],
                                background='white',
                                font=("default", 10, 'bold'))
        self.sex1_label.grid(row=1, column=1)
        self.user1_height = Label(self.sensor_frame3,
                                  text="Height",
                                  background='white',
                                  font=("default", 10, 'bold'))
        self.user1_height.grid(row=2, column=0)
        self.height1_label = Label(self.sensor_frame3,
                                   text=self.height[1],
                                   background='white',
                                   font=("default", 10, 'bold'))
        self.height1_label.grid(row=2, column=1)
        self.user1_weight = Label(self.sensor_frame3,
                                  text="Weight",
                                  background='white',
                                  font=("default", 10, 'bold'))
        self.user1_weight.grid(row=3, column=0)
        self.weight1_label = Label(self.sensor_frame3,
                                   text=self.weight[1],
                                   background='white',
                                   font=("default", 10, 'bold'))
        self.weight1_label.grid(row=3, column=1)

        self.activity1_menu = ttk.Combobox(self.sensor_frame3,
                                           value=ACTIVITIES1,
                                           textvariable=self.activity1)
        self.activity1_menu.current(0)
        self.activity1_menu.config(state="readonly", width=15)
        self.activity1_menu.bind("<<ComboboxSelected>>")
        self.activity1_menu.grid(row=4, column=0, columnspan=2, pady=5)

        self.act_start_btn1 = ttk.Button(
            self.sensor_frame3,
            text="Activity start",
            command=lambda: self.activity_start(1),
            width=11)
        self.act_start_btn1["state"] = DISABLED
        self.act_start_btn1.grid(row=5, column=0, padx=2, pady=5)

        self.act_end_btn1 = ttk.Button(self.sensor_frame3,
                                       text="Activity end",
                                       command=lambda: self.activity_end(1),
                                       width=11)
        self.act_end_btn1["state"] = DISABLED
        self.act_end_btn1.grid(row=5, column=1, padx=2, pady=5)
        ###################################################################################

        # Menu
        menubar = Menu(self)
        tool = Menu(menubar, tearoff=0)
        tool.add_command(label="Insert user info", command=self.user_info)
        tool.add_command(label="Display kinect", command=self.disp_kinect)
        tool.add_checkbutton(label="Ignore sensor error",
                             onvalue=1,
                             offvalue=0,
                             variable=self.sensor_ignore)
        tool.add_checkbutton(label="Ignore buffer error",
                             onvalue=1,
                             offvalue=0,
                             variable=self.buffer_ignore)
        tool.add_checkbutton(label="debug mode",
                             onvalue=1,
                             offvalue=0,
                             variable=self.debug_mode)
        tool.add_command(label="Close sensors", command=self.close)
        menubar.add_cascade(label="Tools", menu=tool)
        self.config(menu=menubar)

        # Tk widgets ----------------------------------------------------------
        #######################################################################
        # Sensor controls -----------------------------------------------------

        self.azure = PyK4A()
        self.azure.start()

        self.stream_reset()
        self.get_video()
        self.stream_video()
        self.stream_depth()

        self.set_state()
        self.refresh()

        # Main loop -----------------------------------------------------------
        self.mainloop()
Exemplo n.º 23
0
import pyk4a
from pyk4a import Config, PyK4A, ColorResolution

import cv2
import numpy as np

k4a = PyK4A(Config(color_resolution=ColorResolution.RES_720P,
                   depth_mode=pyk4a.DepthMode.NFOV_UNBINNED, ))
k4a.connect()

# getters and setters directly get and set on device
k4a.whitebalance = 4500
assert k4a.whitebalance == 4500
k4a.whitebalance = 4510
assert k4a.whitebalance == 4510

while 1:
    img_color = k4a.get_capture(color_only=True)
    if np.any(img_color):
        cv2.imshow('k4a', img_color[:, :, :3])
        key = cv2.waitKey(10)
        if key != -1:
            cv2.destroyAllWindows()
            break
k4a.disconnect()
Exemplo n.º 24
0
def main():
    parsed_args = parse_args()

    if parsed_args.fps == 30:
        fps_config = pyk4a.FPS.FPS_30
    elif parsed_args.fps == 15:
        fps_config = pyk4a.FPS.FPS_15
    else:
        raise Exception(f'fps {parsed_args.fps} not found')

    k4a = PyK4A(
        Config(color_resolution=ColorResolution.RES_1536P,
               depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
               camera_fps=fps_config))
    k4a.connect(lut=True)

    frame_num = -1

    is_dump = parsed_args.dump_filepath is not None and parsed_args.dump_frames is not None
    if is_dump:
        dump_data = {'frames': [], 'cam_params': k4a.get_cam_params()}
    times = []
    verbose = parsed_args.fps

    get_color = parsed_args.no_color
    get_depth = parsed_args.no_depth
    get_bt = parsed_args.no_bt

    # num_results = get_bt * 2 + get_depth + get_color
    while True:
        frame_num += 1
        start = time.time()
        result = k4a.get_capture2(
            parallel_bt=parsed_args.parallel_bt,
            get_bt=get_bt,
            get_depth=get_depth,
            get_color=get_color,
            get_color_timestamp=get_color,
            get_depth_timestamp=get_depth,
            undistort_color=parsed_args.undistort_color,
            undistort_depth=parsed_args.undistort_depth,
            transformed_depth=parsed_args.transformed_depth,
            undistort_bt=parsed_args.undistort_bt,
            verbose=verbose)

        # if len(result.keys()) < num_results:
        #     print(f'frame_num: {frame_num}, result_items: {len(result.keys())}')
        times.append(time.time() - start)

        if verbose > 0:
            if len(times) > 10 and len(times) % verbose == 0:
                print(f'py fps: {1 / np.array(times[10:]).mean()}')

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        if is_dump:
            dump_data['frames'].append(result)
            if frame_num > parsed_args.dump_frames:
                break

        suff = lambda x: '_undistorted' if x else ''
        if parsed_args.vis_color:
            k = f'color{suff(parsed_args.undistort_color)}'
            if k in result:
                result_img = result[k][::4, ::4, :3]
                cv2.imshow(k, result_img)

        if parsed_args.vis_depth:
            pref = 'transformed_' if parsed_args.transformed_depth else ''
            k = f'{pref}depth{suff(parsed_args.undistort_depth)}'
            if k in result:
                result_img = result[k]
                result_img = result_img.astype(np.float32) / 1000
                result_img = (255 * result_img / 4).clip(0,
                                                         255).astype(np.uint8)
                cv2.imshow(k, result_img)

        if parsed_args.vis_bt:
            k = f'body_index_map{suff(parsed_args.undistort_bt)}'
            if k in result and 'pose' in result:
                result_img = result[k]
                cv2.imshow(k, result_img)

    k4a.disconnect()

    if is_dump:
        print('dumping...')
        with open(parsed_args.dump_filepath, 'wb') as f:
            pickle.dump(dump_data, f)
    else:
        cv2.destroyAllWindows()
Exemplo n.º 25
0
def main():
    k4a = PyK4A(
        Config(
            color_resolution=pyk4a.ColorResolution.RES_720P,
            depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
            synchronized_images_only=True,
        ))
    k4a.start()

    plt.ion()
    fig, axes = plt.subplots(3, sharex=False)

    data = {
        "temperature": [0] * MAX_SAMPLES,
        "acc_x": [0] * MAX_SAMPLES,
        "acc_y": [0] * MAX_SAMPLES,
        "acc_z": [0] * MAX_SAMPLES,
        "acc_timestamp": [0] * MAX_SAMPLES,
        "gyro_x": [0] * MAX_SAMPLES,
        "gyro_y": [0] * MAX_SAMPLES,
        "gyro_z": [0] * MAX_SAMPLES,
        "gyro_timestamp": [0] * MAX_SAMPLES,
    }
    y = np.zeros(MAX_SAMPLES)
    lines = {
        "temperature": axes[0].plot(y, label="temperature")[0],
        "acc_x": axes[1].plot(y, label="acc_x")[0],
        "acc_y": axes[1].plot(y, label="acc_y")[0],
        "acc_z": axes[1].plot(y, label="acc_z")[0],
        "gyro_x": axes[2].plot(y, label="gyro_x")[0],
        "gyro_y": axes[2].plot(y, label="gyro_y")[0],
        "gyro_z": axes[2].plot(y, label="gyro_z")[0],
    }

    for i in range(MAX_SAMPLES):
        sample = k4a.get_imu_sample()
        sample["acc_x"], sample["acc_y"], sample["acc_z"] = sample.pop(
            "acc_sample")
        sample["gyro_x"], sample["gyro_y"], sample["gyro_z"] = sample.pop(
            "gyro_sample")
        for k, v in sample.items():
            data[k][i] = v
        if i == 0:
            set_default_data(data)

        for k in ("temperature", "acc_x", "acc_y", "acc_z", "gyro_x", "gyro_y",
                  "gyro_z"):
            lines[k].set_data(range(MAX_SAMPLES), data[k])

        acc_y = data["acc_x"] + data["acc_y"] + data["acc_z"]
        gyro_y = data["gyro_x"] + data["gyro_y"] + data["gyro_z"]
        lines["acc_x"].axes.set_ylim(min(acc_y), max(acc_y))
        lines["gyro_x"].axes.set_ylim(min(gyro_y), max(gyro_y))
        lines["temperature"].axes.set_ylim(min(data["temperature"][0:i + 1]),
                                           max(data["temperature"][0:i + 1]))

        fig.canvas.draw()
        fig.canvas.flush_events()

    k4a._stop_imu()
    k4a.stop()
Exemplo n.º 26
0
def main():
    k4a = PyK4A(
        Config(
            color_resolution=pyk4a.ColorResolution.RES_720P,
            color_format=pyk4a.ImageFormat.COLOR_BGRA32,
            depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
            synchronized_images_only=True,
        ))

    k4a.start()
    # getters and setters directly get and set on device
    k4a.whitebalance = 4500
    assert k4a.whitebalance == 4500
    k4a.whitebalance = 4510
    assert k4a.whitebalance == 4510
    windowID = ''  #Give id to the windows to know which to destroy when not needed
    BODY_PARTS = {
        "Nose": 0,
        "Neck": 1,
        "RShoulder": 2,
        "RElbow": 3,
        "RWrist": 4,
        "LShoulder": 5,
        "LElbow": 6,
        "LWrist": 7,
        "RHip": 8,
        "RKnee": 9,
        "RAnkle": 10,
        "LHip": 11,
        "LKnee": 12,
        "LAnkle": 13,
        "REye": 14,
        "LEye": 15,
        "REar": 16,
        "LEar": 17,
        "Background": 18
    }

    POSE_PAIRS = [["Neck", "RShoulder"], ["Neck", "LShoulder"],
                  ["RShoulder", "RElbow"], ["RElbow", "RWrist"],
                  ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
                  ["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"],
                  ["Neck", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"],
                  ["Neck", "Nose"], ["Nose", "REye"], ["REye", "REar"],
                  ["Nose", "LEye"], ["LEye", "LEar"]]

    net = cv2.dnn.readNetFromTensorflow("graph_opt.pb")  ##weights
    inWidth = 654
    inHeight = 368
    thr = 0.2
    while 1:
        capture = k4a.get_capture()

        if np.any(capture.color) and np.any(capture.depth):
            checker, frame, length = bodyTracking(capture.color, net, inWidth,
                                                  inHeight, BODY_PARTS,
                                                  POSE_PAIRS, thr)
            frame_depth = np.clip(capture.depth, 0, 2**10 - 1)
            frame_depth >>= 2
            num_fingers, img_draw = handEstimator(frame_depth)
            imgConts, conts = MiniUtils.getContours(capture.color,
                                                    capture.transformed_depth,
                                                    filter=4)
            cv2.imshow('Hand', img_draw)
            cv2.imshow('rgb', capture.color)
            '''if checker:
                if length < 11:
                    if windowID != '01' and windowID != '':
                        cv2.destroyAllWindows()
                    cv2.imshow('Hand', img_draw)
                    windowID = '01'
                else:
                    if windowID != '10' and windowID != '':
                        cv2.destroyAllWindows()
                    cv2.imshow('Body', frame)
                    windowID = '10'
            else:
                cv2.imshow('Conts', imgConts)
                if windowID != '11' and windowID != '':
                    cv2.destroyAllWindows()
                windowID = '11'
                '''
            key = cv2.waitKey(1)
            if key == ord('q'):
                cv2.destroyAllWindows()
                break
    k4a.stop()
Exemplo n.º 27
0
def main():
    BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
                       "LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
                       "RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14,
                       "LEye": 15, "REar": 16, "LEar": 17, "Background": 18 }

    POSE_PAIRS = [ ["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"],
                       ["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
                       ["RShoulder", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["LShoulder", "LHip"],
                       ["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"],
                       ["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"] ]

    net = cv.dnn.readNetFromTensorflow("graph_opt.pb") ##weights

    inWidth = 654
    inHeight = 368
    thr = 0.2

    k4a = PyK4A(
        Config(
            color_resolution=pyk4a.ColorResolution.RES_720P,
            color_format=pyk4a.ImageFormat.COLOR_BGRA32,
            depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
            synchronized_images_only=False,

        )
    )

    k4a.start()
    # getters and setters directly get and set on device
    k4a.whitebalance = 4500
    assert k4a.whitebalance == 4500
    k4a.whitebalance = 4510
    assert k4a.whitebalance == 4510

    while cv.waitKey(1) < 0:
        capture = k4a.get_capture()
        if np.any(capture.color):
            frame = capture.color
            frame = cv.cvtColor(frame, cv.COLOR_BGRA2BGR)
            frameWidth = frame.shape[1]
            frameHeight = frame.shape[0]
            net.setInput(
                cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), (127.5, 127.5, 127.5), swapRB=True, crop=False))
            out = net.forward()
            out = out[:, :19, :, :]  # MobileNet output [1, 57, -1, -1], we only need the first 19 elements

            assert (len(BODY_PARTS) == out.shape[1])

            points = []
            for i in range(len(BODY_PARTS)):
                # Slice heatmap of corresponging body's part.
                heatMap = out[0, i, :, :]

                # Originally, we try to find all the local maximums. To simplify a sample
                # we just find a global one. However only a single pose at the same time
                # could be detected this way.
                _, conf, _, point = cv.minMaxLoc(heatMap)
                x = (frameWidth * point[0]) / out.shape[3]
                y = (frameHeight * point[1]) / out.shape[2]
                # Add a point if it's confidence is higher than threshold.
                points.append((int(x), int(y)) if conf > thr else None)

            for pair in POSE_PAIRS:
                partFrom = pair[0]
                partTo = pair[1]
                assert (partFrom in BODY_PARTS)
                assert (partTo in BODY_PARTS)

                idFrom = BODY_PARTS[partFrom]
                idTo = BODY_PARTS[partTo]

                if points[idFrom] and points[idTo]:
                    cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3)
                    cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)
                    cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)

            t, _ = net.getPerfProfile()
            freq = cv.getTickFrequency() / 1000
            cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))

            cv.imshow('OpenPose using OpenCV', frame)

    k4a.stop()
Exemplo n.º 28
0
import pyk4a
from pyk4a import Config, PyK4A, ColorResolution

import cv2
import numpy as np

k4a = PyK4A(
    Config(
        color_resolution=ColorResolution.RES_720P,
        depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
        synchronized_images_only=True,
    ))
k4a.connect()

# getters and setters directly get and set on device
k4a.whitebalance = 4500
assert k4a.whitebalance == 4500
k4a.whitebalance = 4510
assert k4a.whitebalance == 4510

img_color, img_ir, img_depth, img_pcl = k4a.get_capture(
    transform_to_color=False, pcl=True)
out = cv2.VideoWriter('out.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 20.0,
                      (img_ir.shape[1], img_ir.shape[0]))

while 1:
    img_color, img_ir, img_depth, img_pcl = k4a.get_capture(
        transform_to_color=False, pcl=True)

    if img_color is not None:
        cv2.imshow('k4a color', img_color[:, :, :3])
Exemplo n.º 29
0
            res = xm_find_people()
            res.position.point.x, res.position.point.y, res.position.point.z = transformed_cor
            res.character = description
            return res
        if count >= 10:
            print(colored("No qualified person!", 'bright red'))
            res = xm_find_people()
            res.position.point.x, res.position.point.y, res.position.point.z = -12, -12, -12
            res.character = 'No qualified person!'
            return res


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', '--debug', type=bool, default=False)
    args = parser.parse_args()
    is_debug = args.debug

    APP_ID = '23757775'
    API_KEY = 'K0uRowPaoSfSs0ABvUxurnHG'
    SECRET_KEY = 'Q6WtWCSFqqMAbKffCrqHQ6DDWRNc0oD7'
    client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)

    camera = PyK4A()
    camera.config.color_resolution = ColorResolution.RES_1080P
    camera.start()
    rospy.init_node('handGestureNode')
    service = rospy.Service('handGesture', xm_find_people, call_back)
    rospy.loginfo('GPSR HandGesture\'s vision start!')
    rospy.spin()
Exemplo n.º 30
0
    def process_value(self):
        self.index += 1
        init_done = False
        if self.realtime:
            if self.k4a is None:
                if self.fps == 30:
                    fps_config = pyk4a.FPS.FPS_30
                elif self.fps == 15:
                    fps_config = pyk4a.FPS.FPS_15
                else:
                    raise Exception(f'fps {self.fps} not found')
                k4a = PyK4A(
                    Config(color_resolution=ColorResolution.RES_1536P,
                           depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
                           camera_fps=fps_config,
                           gpu_id=self.gpu_id))
                k4a.connect(lut=True)
                self.decoded_cam_params = cam_params_decode(
                    k4a.get_cam_params())
                self.k4a = k4a
                init_done = True
        elif self.kinect_dump_frames is None:
            with open(self.kinect_dump_fp, 'rb') as f:
                kinect_dump_data = pickle.load(f)
            self.decoded_cam_params = cam_params_decode(
                kinect_dump_data['cam_params'])
            self.kinect_dump_frames = kinect_dump_data['frames']
            init_done = True

        if init_done:
            self.msg_queue.put(
                MetaMsg(sender_subblock_name=self.subblock_name,
                        acceptor_name='aggregate',
                        acceptor_type='processor',
                        msg={'decoded_cam_params': self.decoded_cam_params}))

        if self.realtime:
            d = self.k4a.get_capture2(
                verbose=self.fps if self.log_level >= 2 else 0,
                skip_old_atol_ms=self.skip_old_atol_ms,
                get_color_timestamp=self.get_color_timestamp,
                get_color=True,
                undistort_color=False,
                get_depth=False,
                get_bt=True,
                undistort_bt=False,
                parallel_bt=self.parallel_bt)
        else:
            self.dump_index += 1
            if self.dump_index >= len(self.kinect_dump_frames):
                self.dump_index = 0
                if self.log_level >= 1:
                    print('dump finished')
            d = self.kinect_dump_frames[self.dump_index]
            cur_time = time.time()
            sleep_time = max(0, 1 / self.fps - (cur_time - self.prev_time))
            time.sleep(sleep_time)
            self.prev_time = time.time()

        for p in ['pose', 'color']:
            if not p in d:
                return None

        if self.kinect_dump_fp is None:
            self.index += d['skip_count']
        #         print(d['depth_timestamp'], d['color_timestamp'])

        # cv2.imshow('a', d['color_undistorted'][::4, ::4, :])
        # cv2.waitKey(1)

        if self.face_cropper is None:
            self.face_cropper = Cropper(
                self.decoded_cam_params['rvec'],
                self.decoded_cam_params['tvec'],
                self.decoded_cam_params['K_rgb_distorted'],
                self.decoded_cam_params['rgb_distortion'])

        start = time.time()
        img = d['color'][..., :3]

        if len(d['pose']) < 1:
            return None

        body_pose = d['pose'][0, :, 2:5] / 1000
        body_conf = d['pose'][0, :, -1]

        hand_dist = np.sqrt(np.sum((body_pose[8] - body_pose[15])**2))
        is_close_hands = hand_dist < 0.2

        w = img.shape[1]
        h = img.shape[0]
        hand_crops = dict()
        for hand_index, side in [
            [15, 'right'],
            [8, 'left'],
        ]:
            p = body_pose[hand_index]
            p_cube = get_cube(p)
            p_proj, _ = cv2.projectPoints(
                p_cube, self.decoded_cam_params['rvec'],
                self.decoded_cam_params['tvec'],
                self.decoded_cam_params['K_rgb_distorted'],
                self.decoded_cam_params['rgb_distortion'])
            p_proj = p_proj.reshape(-1, 2)
            lu = np.min(p_proj, axis=0)
            rb = np.max(p_proj, axis=0)
            lu = np.clip(lu, a_min=(0, 0), a_max=[w, h]).astype(int)
            rb = np.clip(rb, a_min=(0, 0), a_max=[w, h]).astype(int)
            crop = img[lu[1]:rb[1], lu[0]:rb[0], :3]
            crop = cv2.resize(crop, (128, 128))
            hand_crops[side] = crop

        face_crop, face_bbox = self.face_cropper.forward(img, body_pose)
        # cv2.imshow('face', face_crop)
        # cv2.waitKey(1)

        body_pose = body_pose @ self.decoded_cam_params['rotmtx'].T + \
                    self.decoded_cam_params['tvec']

        res = dict({
            'face_crop': face_crop,
            'hand_crops': hand_crops,
            'body_pose': body_pose,
            'body_conf': body_conf,
            'is_close_hands': is_close_hands,
            'time': start
        })

        if self.output_count == 0:
            if self.log_level >= 1:
                print(f'{self.subblock_name} working')
        self.output_count += 1

        return self.index, res