Beispiel #1
0
                    print("#1")
                else:
                    if _v['intro'] == False:
                        a_thread = Thread(target=playFile,
                                          args=(
                                              target_ip,
                                              'test_intro',
                                          ))
                        print("Playing intro file, sleep for ", 27, "Seconds")
                        a_thread.start()
                        a_thread.join()

                        _v['intro'] = True

                    if _v['intro'] == True and _v['playing'] == False:
                        hand_detection = detect_hand(cam)
                        print("#3", hand_detection)
                        if hand_detection == 'P' and _v['problem'] == False:
                            print('Problem')
                            result = {}
                            result['flag'] = 'problem'
                            result['data'] = True
                            audio_player = _v['audio_player']
                            robot_speaking_thread = Thread(
                                target=audio_player.play,
                                args=(result, 'NV', 'F', ''))
                            robot_speaking_thread.start()
                            _v['problem'] = True
                            # audio_player.play(result, 'BR', '', '')

                        if hand_detection == 1:
Beispiel #2
0
#     x, y = position[0] * (SCREEN_SIZE[1] / frame_shape[1]), position[1] * (SCREEN_SIZE[0] / frame_shape[0])
#     width, height = SCREEN_SIZE
#     x_diff = width / 2 - x
#     y_diff = height / 2 - y
#     scale = 1.2
#     return width / 2 - scale * x_diff, height / 2 - scale * y_diff

while True:
    if cv2.waitKey(5) == ord("q"):
        break

    ret, frame = cap.read()
    frame = cv2.flip(frame, 1)
    try:
        # detection.detect_face(frame, block=Trqe)
        hand = detection.detect_hand(frame, hist)
        center = hand.get_center_of_mass()
        # cursor_position = get_cursor_position(center, frame.shape)
        # pyautogui.moveTo(*cursor_position, _pause=False)
        gesture = predict_gesture(hand.binary)
        cv2.putText(
            hand.outline,
            f"{EVENT_MAPPER[gesture]} ({gesture})",
            (5, hand.outline.shape[0] - 160),
            cv2.FONT_HERSHEY_SIMPLEX,
            1,
            (0, 0, 255),
            2,
            cv2.LINE_AA,
        )