Esempio n. 1
0
def fire_detection(source, video_path, output, fps, frame_size):
    """
    基于深度学习的烟火检测器
    """
    # click.echo(click.get_current_context().params)
    device_info = getDeviceInfo()  # type: dai.DeviceInfo
    with dai.Device(create_pipeline(source), device_info) as device:
        fps_handler = FPSHandler()
        if source:
            cap = cv2.VideoCapture(video_path)
            frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            frame_shape = [frame_height, frame_width]
            print("CAP_PROP_FRAME_SHAPE: %s" % frame_shape)
            cap_fps = int(cap.get(cv2.CAP_PROP_FPS))
            print("CAP_PROP_FPS: %d" % cap_fps)

            fire_in = device.getInputQueue("fire_in")
        else:
            cam_out = device.getOutputQueue("rgb")

        fire_nn = device.getOutputQueue("fire_nn")

        def should_run():
            if source:
                return cap.isOpened()
            else:
                return True

        def get_frame():
            if source:
                return cap.read()
            else:
                return True, cam_out.get().getCvFrame()

        if output:
            output.parent.mkdir(parents=True, exist_ok=True)
            fourcc = cv2.VideoWriter_fourcc(*"mp4v")
            writer = cv2.VideoWriter(str(output), fourcc, fps, frame_size)

        label_map = ["fire", "normal", "smoke"]
        while should_run():
            read_correctly, frame = get_frame()
            if not read_correctly:
                break
            frame_debug = frame.copy()
            if source:
                run_nn(frame_debug, fire_in, 224, 224)
            fire_data = toTensorResult(fire_nn.get()).get("final_result")
            fps_handler.tick("fire")
            conf = np.max(fire_data)
            if conf > 0.5:
                label = label_map[np.argmax(fire_data)]

                drawText(frame_debug, f"{label}", (10, 30), "black")
                drawText(frame_debug, f"{conf:.2%}", (10, 50), "black")
            cv2.imshow("", frame_debug)
            if output:
                writer.write(cv2.resize(frame_debug, frame_size))

            key = cv2.waitKey(1)
            if key in [ord("q"), 27]:
                break
            elif key == ord("s"):
                cv2.imwrite(
                    "saved_%s.jpg" %
                    time.strftime("%Y%m%d_%H%M%S", time.localtime()),
                    frame_debug,
                )
        fps_handler.printStatus()
        if source:
            cap.release()
        if output:
            writer.release()
        cv2.destroyAllWindows()
Esempio n. 2
0
def blink(source, video_path, output, fps, frame_size):
    """
    在视频流中实时检测和计数眨眼次数
    """
    # click.echo(click.get_current_context().params)
    device_info = getDeviceInfo()  # type: dai.DeviceInfo
    with dai.Device(create_pipeline(source), device_info) as device:
        fps_handler = FPSHandler()
        if source:
            cap = cv2.VideoCapture(video_path)
            frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            frame_shape = [frame_height, frame_width]
            print("CAP_PROP_FRAME_SHAPE: %s" % frame_shape)
            cap_fps = int(cap.get(cv2.CAP_PROP_FPS))
            print("CAP_PROP_FPS: %d" % cap_fps)

            mesh_in = device.getInputQueue("mesh_in")
        else:
            cam_out = device.getOutputQueue("rgb")

        mesh_nn = device.getOutputQueue("mesh_nn")

        eye_in = device.getInputQueue("eye_in")
        eye_nn = device.getOutputQueue("eye_nn")
        left_eye_blink = []
        right_eye_blink = []

        left_number_of_blinks = 0
        right_number_of_blinks = 0

        def should_run():
            if source:
                return cap.isOpened()
            else:
                return True

        def get_frame():
            if source:
                return cap.read()
            else:
                return True, cam_out.get().getCvFrame()

        if output:
            output.parent.mkdir(parents=True, exist_ok=True)
            fourcc = cv2.VideoWriter_fourcc(*"mp4v")
            writer = cv2.VideoWriter(str(output), fourcc, fps, frame_size)

        while should_run():
            read_correctly, frame = get_frame()
            if not read_correctly:
                break
            frame_debug = frame.copy()
            if source:
                run_nn(frame_debug, mesh_in, 192, 192)
            mesh_data = toTensorResult(mesh_nn.get())
            fps_handler.tick("Mesh")

            score = mesh_data.get("conv2d_31").reshape((1, ))
            if score > 0.5:
                mesh = mesh_data.get("conv2d_21").reshape((468, 3))

                wh = np.array(frame_debug.shape[:2])[::-1]
                mesh *= np.array([*wh / 192, 1])

                for v in mesh.astype(int):
                    cv2.circle(frame_debug, v[:2], 1, (191, 255, 0))

                left_eye, left_box = get_mini_box_frame(
                    frame_debug,
                    np.array([mesh[71], mesh[107], mesh[116], mesh[197]]),
                )

                run_nn(left_eye, eye_in, 32, 32)
                left_eye_blink.append(
                    np.argmax(toTensorResult(eye_nn.get()).get("19")))

                if (len(left_eye_blink) > 5
                        and left_eye_blink[-1] not in left_eye_blink[-5:-1]):
                    left_number_of_blinks += 1

                if len(left_eye_blink) > 20:
                    del left_eye_blink[0]

                right_eye, right_box = get_mini_box_frame(
                    frame_debug,
                    np.array([mesh[301], mesh[336], mesh[345], mesh[197]]),
                )

                run_nn(right_eye, eye_in, 32, 32)
                right_eye_blink.append(
                    np.argmax(toTensorResult(eye_nn.get()).get("19")))
                if (len(right_eye_blink) > 5
                        and right_eye_blink[-1] not in right_eye_blink[-5:-1]):
                    right_number_of_blinks += 1

                if len(right_eye_blink) > 20:
                    del right_eye_blink[0]

                cv2.drawContours(frame_debug, [right_box], -1, (0, 139, 0), 2)
                cv2.drawContours(frame_debug, [left_box], -1, (0, 139, 0), 2)
                cv2.imshow("left", left_eye)
                cv2.imshow("right", right_eye)

            drawText(
                frame_debug,
                f"NumberOfBlinksOfRightEye: {right_number_of_blinks}",
                (20, 50),
                color="red",
            )
            drawText(
                frame_debug,
                f"NumberOfBlinksOfLeftEye: {left_number_of_blinks}",
                (20, 80),
                color="red",
            )

            cv2.imshow("", frame_debug)
            if output:
                writer.write(cv2.resize(frame_debug, frame_size))

            key = cv2.waitKey(1)
            if key in [ord("q"), 27]:
                break
            elif key == ord("s"):
                cv2.imwrite(
                    "saved_%s.jpg" %
                    time.strftime("%Y%m%d_%H%M%S", time.localtime()),
                    frame_debug,
                )
        fps_handler.printStatus()
        if source:
            cap.release()
        if output:
            writer.release()
        cv2.destroyAllWindows()
Esempio n. 3
0
def vehicle(source, video_path, output, fps, frame_size):
    """
    车辆属性识别和车牌识别
    """
    # click.echo(click.get_current_context().params)
    device_info = getDeviceInfo()  # type: dai.DeviceInfo
    with dai.Device(create_pipeline(source), device_info) as device:
        fps_handler = FPSHandler()
        if source:
            cap = cv2.VideoCapture(video_path)
            frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            frame_shape = [frame_height, frame_width]
            print("CAP_PROP_FRAME_SHAPE: %s" % frame_shape)
            cap_fps = int(cap.get(cv2.CAP_PROP_FPS))
            print("CAP_PROP_FPS: %d" % cap_fps)

            vehicle_in = device.getInputQueue("vehicle_in")
        else:
            cam_out = device.getOutputQueue("rgb")

        vehicle_nn = device.getOutputQueue("vehicle_nn")

        attr_in = device.getInputQueue("attr_in")
        attr_nn = device.getOutputQueue("attr_nn")
        license_in = device.getInputQueue("license_in")
        license_nn = device.getOutputQueue("license_nn")

        colors = ["white", "gray", "yellow", "red", "green", "blue", "black"]
        types = ["car", "bus", "truck", "van"]

        license_dict = [
            *map(chr, range(48, 58)),
            "<Anhui>",
            "<Beijing>",
            "<Chongqing>",
            "<Fujian>",
            "<Gansu>",
            "<Guangdong>",
            "<Guangxi>",
            "<Guizhou>",
            "<Hainan>",
            "<Hebei>",
            "<Heilongjiang>",
            "<Henan>",
            "<HongKong>",
            "<Hubei>",
            "<Hunan>",
            "<InnerMongolia>",
            "<Jiangsu>",
            "<Jiangxi>",
            "<Jilin>",
            "<Liaoning>",
            "<Macau>",
            "<Ningxia>",
            "<Qinghai>",
            "<Shaanxi>",
            "<Shandong>",
            "<Shanghai>",
            "<Shanxi>",
            "<Sichuan>",
            "<Tianjin>",
            "<Tibet>",
            "<Xinjiang>",
            "<Yunnan>",
            "<Zhejiang>",
            "<police>",
            *map(chr, range(65, 91)),
        ]

        def should_run():
            if source:
                return cap.isOpened()
            else:
                return True

        def get_frame():
            if source:
                return cap.read()
            else:
                return True, cam_out.get().getCvFrame()

        if output:
            output.parent.mkdir(parents=True, exist_ok=True)
            fourcc = cv2.VideoWriter_fourcc(*"mp4v")
            writer = cv2.VideoWriter(str(output), fourcc, fps, frame_size)

        while should_run():
            read_correctly, frame = get_frame()
            if not read_correctly:
                break
            frame_debug = frame.copy()
            if source:
                run_nn(frame_debug, vehicle_in, 300, 300)
            vehicle_data = vehicle_nn.get().detections
            fps_handler.tick("vehicle")

            for bbox in vehicle_data:
                if bbox.label == 1:
                    vehicle_coord = frameNorm(
                        frame_debug,
                        [bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax])

                    cv2.rectangle(frame_debug, vehicle_coord[:2],
                                  vehicle_coord[2:], (128, 128, 0))

                    vehicle_frame = frame[vehicle_coord[1]:vehicle_coord[3],
                                          vehicle_coord[0]:vehicle_coord[2], ]
                    run_nn(vehicle_frame, attr_in, 72, 72)
                    attr_data = toTensorResult(attr_nn.get())
                    color_ = colors[attr_data.get("color").argmax()]
                    type_ = types[attr_data.get("type").argmax()]
                    drawText(
                        frame_debug,
                        color_,
                        (vehicle_coord[0] + 10, vehicle_coord[1] + 10),
                    )
                    drawText(
                        frame_debug,
                        type_,
                        (vehicle_coord[0] + 10, vehicle_coord[1] + 25),
                    )
                elif bbox.label == 2:
                    plate_coord = frameNorm(
                        frame_debug,
                        [bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax])
                    cv2.rectangle(frame_debug, plate_coord[:2],
                                  plate_coord[2:], (128, 128, 0))

                    plate_frame = frame[plate_coord[1]:plate_coord[3],
                                        plate_coord[0]:plate_coord[2], ]
                    plate_frame = pad_resize(plate_frame, (24, 94))

                    # cv2.imshow("pl",plate_frame.astype(np.uint8))
                    run_nn(plate_frame, license_in, 94, 24)
                    license_data = (toTensorResult(
                        license_nn.get()).get("d_predictions.0").squeeze())
                    plate_str = ""
                    for j in license_data:
                        if j == -1:
                            break
                        plate_str += license_dict[j]
                    drawText(
                        frame_debug,
                        plate_str,
                        (plate_coord[0] - 10, plate_coord[1] - 10),
                    )
            cv2.imshow("", frame_debug)
            if output:
                writer.write(cv2.resize(frame_debug, frame_size))

            key = cv2.waitKey(1)
            if key in [ord("q"), 27]:
                break
            elif key == ord("s"):
                cv2.imwrite(
                    "saved_%s.jpg" %
                    time.strftime("%Y%m%d_%H%M%S", time.localtime()),
                    frame_debug,
                )
        fps_handler.printStatus()
        if source:
            cap.release()
        if output:
            writer.release()
        cv2.destroyAllWindows()
Esempio n. 4
0
def yolox(source, model_name, model_size, video_path, output, fps, frame_size):
    """
    This function is used to detect objects in a video

    :param model_name: The name of the model to use
    :param model_size: Size of the model
    :param video_path: Path to the video file
    :param output: The output file name
    :param fps: The FPS (frames per second) of the output video
    :param frame_size: The size of the frame to be saving
    """
    """
    基于 yolox 的目标检测器
    """
    model_w = model_size[0]
    model_h = model_size[1]
    # click.echo(click.get_current_context().params)
    device_info = getDeviceInfo()  # type: dai.DeviceInfo
    with dai.Device(create_pipeline(source, model_name, model_w, model_h), device_info) as device:
        print("Starting pipeline...")
        fps_handler = FPSHandler()
        if source:
            cap = cv2.VideoCapture(video_path)
            frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            frame_shape = [frame_height, frame_width]
            print("CAP_PROP_FRAME_SHAPE: %s" % frame_shape)
            cap_fps = int(cap.get(cv2.CAP_PROP_FPS))
            print("CAP_PROP_FPS: %d" % cap_fps)

            yolox_det_in = device.getInputQueue("yolox_det_in")
        else:
            cam_out = device.getOutputQueue("cam_out", 1, True)
        yolox_det_nn = device.getOutputQueue("yolox_det_nn")

        def should_run():
            if source:
                return cap.isOpened()
            else:
                return True

        def get_frame():
            """
            Get the current frame from the camera and return it
            """
            if source:
                return cap.read()
            else:
                return True, cam_out.get().getCvFrame()

        if output:
            output.parent.mkdir(parents=True, exist_ok=True)
            fourcc = cv2.VideoWriter_fourcc(*"mp4v")
            writer = cv2.VideoWriter(str(output), fourcc, fps, frame_size)

        while should_run():
            read_correctly, frame = get_frame()
            fps_handler.tick("Frame")
            if not read_correctly:
                break

            frame_debug = frame.copy()
            if source:
                run_nn(yolox_det_in, to_planar(frame, (model_h, model_w)), model_w, model_h)
            yolox_det_data = yolox_det_nn.get()

            res = toTensorResult(yolox_det_data).get("output")
            fps_handler.tick("nn")
            predictions = demo_postprocess(res, (model_h, model_w), p6=False)[0]

            boxes = predictions[:, :4]
            scores = predictions[:, 4, None] * predictions[:, 5:]

            boxes_xyxy = np.ones_like(boxes)
            boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
            boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
            boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
            boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0

            input_shape = np.array([model_h, model_w])
            min_r = (input_shape / frame.shape[:2]).min()
            offset = (np.array(frame.shape[:2]) * min_r - input_shape) / 2
            offset = np.ravel([offset, offset])
            boxes_xyxy = (boxes_xyxy + offset[::-1]) / min_r

            dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.2)

            if dets is not None:
                final_boxes = dets[:, :4]
                final_scores, final_cls_inds = dets[:, 4], dets[:, 5]
                frame_debug = vis(
                    frame_debug,
                    final_boxes,
                    final_scores,
                    final_cls_inds,
                    conf=0.5,
                    class_names=LABELS.get(model_name),
                )
            cv2.imshow("", frame_debug)
            if output:
                writer.write(cv2.resize(frame_debug, frame_size))

            key = cv2.waitKey(1)
            if key in [ord("q"), 27]:
                break
            elif key == ord("s"):
                cv2.imwrite(
                    "saved_%s.jpg" % time.strftime("%Y%m%d_%H%M%S", time.localtime()),
                    frame_debug,
                )
        fps_handler.printStatus()
        if source:
            cap.release()
        if output:
            writer.release()
    cv2.destroyAllWindows()
Esempio n. 5
0
    action="store_true",
    help="Use DepthAI 4K RGB camera for inference (conflicts with -vid)")
parser.add_argument(
    '-vid',
    '--video',
    type=str,
    help="Path to video file to be used for inference (conflicts with -cam)")
args = parser.parse_args()

if not args.camera and not args.video:
    raise RuntimeError(
        "No source selected. Please use either \"-cam\" to use RGB camera as a source or \"-vid <path>\" to run on video"
    )

debug = not args.no_debug
device_info = getDeviceInfo()

if args.camera:
    blob_path = "models/human-pose-estimation-0001_openvino_2021.2_6shave.blob"
else:
    blob_path = "models/human-pose-estimation-0001_openvino_2021.2_8shave.blob"
    if str(args.video).startswith('https'):
        args.video = downloadYTVideo(str(args.video))
        print("Youtube video downloaded.")
    if not Path(args.video).exists():
        raise ValueError("Path {} does not exists!".format(args.video))

colors = [[0, 100, 255], [0, 100, 255], [0, 255, 255], [0, 100, 255],
          [0, 255, 255], [0, 100, 255], [0, 255, 0], [255, 200, 100],
          [255, 0, 255], [0, 255, 0], [255, 200, 100], [255, 0, 255],
          [0, 0, 255], [255, 0, 0], [200, 200, 0], [255, 0, 0], [200, 200, 0],
Esempio n. 6
0
def palm(source, video_path, output, fps, frame_size):
    """
    手掌检测,控制鼠标
    """
    # click.echo(click.get_current_context().params)
    device_info = getDeviceInfo()  # type: dai.DeviceInfo
    with dai.Device(create_pipeline(source), device_info) as device:
        fps_handler = FPSHandler()
        if source:
            cap = cv2.VideoCapture(video_path)
            frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            frame_shape = [frame_height, frame_width]
            print("CAP_PROP_FRAME_SHAPE: %s" % frame_shape)
            cap_fps = int(cap.get(cv2.CAP_PROP_FPS))
            print("CAP_PROP_FPS: %d" % cap_fps)

            palm_in = device.getInputQueue("palm_in")
        else:
            cam_out = device.getOutputQueue("rgb")

        palm_nn = device.getOutputQueue("palm_nn")

        dots = []

        def should_run():
            if source:
                return cap.isOpened()
            else:
                return True

        def get_frame():
            if source:
                return cap.read()
            else:
                return True, cam_out.get().getCvFrame()

        if output:
            output.parent.mkdir(parents=True, exist_ok=True)
            fourcc = cv2.VideoWriter_fourcc(*"mp4v")
            writer = cv2.VideoWriter(str(output), fourcc, fps, frame_size)

        while should_run():
            read_correctly, frame = get_frame()
            if not read_correctly:
                break
            frame_debug = frame.copy()
            if source:
                run_nn(frame_debug, palm_in, 128, 128)
            results = toTensorResult(palm_nn.get())
            fps_handler.tick("palm")

            num_keypoints = 7
            min_score_thresh = 0.7
            anchors = np.load("anchors_palm.npy")

            raw_box_tensor = results.get("regressors")  # regress
            raw_score_tensor = results.get("classificators")  # classification
            detections = raw_to_detections(raw_box_tensor, raw_score_tensor,
                                           anchors, (128, 128), num_keypoints)

            palm_coords = [
                frameNorm(frame, obj[:4]) for det in detections for obj in det
                if obj[-1] > min_score_thresh
            ]

            palm_confs = [
                obj[-1] for det in detections for obj in det
                if obj[-1] > min_score_thresh
            ]

            if len(palm_coords) > 0:
                palm_coords = non_max_suppression(
                    boxes=np.concatenate(palm_coords).reshape(-1, 4),
                    probs=palm_confs,
                    overlapThresh=0.1,
                )

                for bbox in palm_coords:
                    cv2.rectangle(frame_debug, bbox[:2], bbox[2:],
                                  (10, 245, 10))
                    dot_x = (bbox[2] + bbox[0]) / 2
                    dot_y = (bbox[3] + bbox[1]) / 2
                    dots = move_mouse(dots, (dot_x, dot_y), frame.shape[:2])
            cv2.imshow("", frame_debug)
            if output:
                writer.write(cv2.resize(frame_debug, frame_size))

            key = cv2.waitKey(1)
            if key in [ord("q"), 27]:
                break
            elif key == ord("s"):
                cv2.imwrite(
                    "saved_%s.jpg" %
                    time.strftime("%Y%m%d_%H%M%S", time.localtime()),
                    frame_debug,
                )
        fps_handler.printStatus()
        if source:
            cap.release()
        if output:
            writer.release()
        cv2.destroyAllWindows()
Esempio n. 7
0
def facial_info(source, video_path, output, fps, frame_size):
    """
    面部信息识别
    """
    device_info = getDeviceInfo()  # type: dai.DeviceInfo
    with dai.Device(create_pipeline(source), device_info) as device:
        print("Starting pipeline...")
        # device.startPipeline()
        if source:
            cap = cv2.VideoCapture(video_path)
            frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            frame_shape = [frame_height, frame_width]
            print("CAP_PROP_FRAME_SHAPE: %s" % frame_shape)
            cap_fps = int(cap.get(cv2.CAP_PROP_FPS))
            print("CAP_PROP_FPS: %d" % cap_fps)

            face_in = device.getInputQueue("face_in")
        else:
            cam_out = device.getOutputQueue("cam_out", 1, True)
        face_nn = device.getOutputQueue("face_nn")
        head_pose_in = device.getInputQueue("head_pose_in")
        head_pose_nn = device.getOutputQueue("head_pose_nn")
        age_in = device.getInputQueue("age_in")
        age_nn = device.getOutputQueue("age_nn")
        emo_in = device.getInputQueue("emo_in")
        emo_nn = device.getOutputQueue("emo_nn")

        if output:
            output.parent.mkdir(parents=True, exist_ok=True)
            fourcc = cv2.VideoWriter_fourcc(*"mp4v")
            writer = cv2.VideoWriter(str(output), fourcc, fps, frame_size)

        def should_run():
            if source:
                return cap.isOpened()
            else:
                return True

        def get_frame():
            if source:
                return cap.read()
            else:
                return True, cam_out.get().getCvFrame()

        fps_handler = FPSHandler()

        while should_run():
            read_correctly, frame = get_frame()
            if not read_correctly:
                break
            frame_debug = frame.copy()
            if source:
                run_nn(face_in, frame, 300, 300)
            face_nn_data = face_nn.get()
            fps_handler.tick("All")
            if face_nn_data is not None:
                bboxes = face_nn_data.detections

                for bbox in bboxes:
                    face_coord = frameNorm(
                        frame_debug,
                        [bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax])
                    face_frame = frame[face_coord[1]:face_coord[3],
                                       face_coord[0]:face_coord[2], ]
                    cv2.rectangle(
                        frame_debug,
                        (face_coord[0], face_coord[1]),
                        (face_coord[2], face_coord[3]),
                        (0, 0, 0),
                    )

                    run_nn(head_pose_in, face_frame, 60, 60)
                    roll_degree = toTensorResult(
                        head_pose_nn.get()).get("angle_r_fc")[0][0]
                    center = (
                        (face_coord[2] + face_coord[0]) / 2,
                        (face_coord[3] + face_coord[1]) / 2,
                    )
                    size = (
                        (face_coord[2] - face_coord[0]),
                        (face_coord[3] - face_coord[1]),
                    )
                    face_frame_corr = rotate_frame(
                        frame,
                        center,
                        size,
                        roll_degree,
                    )
                    cv2.imshow("face_frame_corr", face_frame_corr)

                    run_nn(age_in, face_frame_corr, 62, 62)
                    age_gender = toTensorResult(age_nn.get())
                    age = age_gender.get("age_conv3").squeeze() * 100
                    # 0 - female, 1 - male
                    gender = "Male" if age_gender.get(
                        "prob").argmax() else "Female"
                    drawText(
                        frame_debug,
                        f"Age: {age:0.0f}",
                        (face_coord[0] + 10, face_coord[1] + 30),
                        color="greenyellow",
                    )
                    drawText(
                        frame_debug,
                        f"Gender: {gender}",
                        (face_coord[0] + 10, face_coord[1] + 50),
                        "greenyellow",
                    )

                    run_nn(emo_in, face_frame_corr, 64, 64)
                    # 0 - 'neutral', 1 - 'happy', 2 - 'sad', 3 - 'surprise', 4 - 'anger'
                    emo = ["neutral", "happy", "sad", "surprise", "anger"]
                    emo = emo[toTensorResult(
                        emo_nn.get()).get("prob_emotion").argmax()]
                    drawText(
                        frame_debug,
                        f"emo: {emo}",
                        (face_coord[0] + 10, face_coord[1] + 70),
                        "greenyellow",
                    )

            if output:
                writer.write(cv2.resize(frame_debug, frame_size))

            cv2.imshow("debug", frame_debug)
            key = cv2.waitKey(1)
            if key in [ord("q"), 27]:
                break
            elif key == ord("s"):
                cv2.imwrite(
                    "saved_%s.jpg" %
                    time.strftime("%Y%m%d_%H%M%S", time.localtime()),
                    frame_debug,
                )
        if source:
            cap.release()
        if output:
            writer.release()
        fps_handler.printStatus()
        cv2.destroyAllWindows()