Пример #1
0
def main():
    global current_slide

    cap = cv.VideoCapture(0)

    # EeLフォルダ設定、および起動 ##################################################
    eel.init('webslides')
    eel.start(
        'index.html',
        mode='chrome',
        cmdline_args=['--start-fullscreen'],
        block=False)
    eel.sleep(2.0)

    # モデルロード ################################################################
    sess = graph_load('model/frozen_inference_graph.pb')
    eel.go_nextslide()

    while True:
        eel.sleep(0.01)

        # カメラキャプチャ ########################################################
        ret, frame = cap.read()
        if not ret:
            continue

        # スライド頁に応じた処理 ###################################################
        draw_image = image_processing(current_slide, frame, sess)

        # UI側へ転送 ##############################################################
        _, imencode_image = cv.imencode('.jpg', draw_image)
        base64_image = base64.b64encode(imencode_image)
        eel.set_base64image("data:image/jpg;base64," +
                            base64_image.decode("ascii"))
Пример #2
0
def main():
    cap = cv.VideoCapture(0)

    # Eelフォルダ設定、および起動 ###############################################
    eel.init('web')
    eel.start('index.html',
              mode='chrome',
              cmdline_args=['--start-fullscreen'],
              block=False)

    while True:
        start_time = time.time()

        eel.sleep(0.01)

        # カメラキャプチャ #####################################################
        ret, frame = cap.read()
        if not ret:
            continue

        # UI側へ転送(画像) #####################################################
        _, imencode_image = cv.imencode('.jpg', frame)
        base64_image = base64.b64encode(imencode_image)
        eel.set_base64image("data:image/jpg;base64," +
                            base64_image.decode("ascii"))

        key = cv.waitKey(1)
        if key == 27:  # ESC
            break

        # UI側へ転送(処理時間) #################################################
        elapsed_time = round((time.time() - start_time), 3)
        eel.set_elapsedtime(elapsed_time)
Пример #3
0
def main():
    global current_slide

    cap = cv.VideoCapture(0)

    # EeLフォルダ設定、および起動 #########################################################
    eel.init('webslides')
    eel.start('index.html',
              mode='chrome',
              cmdline_args=['--start-fullscreen'],
              block=False)

    while True:
        eel.sleep(0.01)

        # カメラキャプチャ ########################################################
        ret, frame = cap.read()
        if not ret:
            continue

        print(current_slide)

        # UI側へ転送 ##############################################################
        _, imencode_image = cv.imencode('.jpg', frame)
        base64_image = base64.b64encode(imencode_image)
        eel.set_base64image("data:image/jpg;base64," +
                            base64_image.decode("ascii"))

        key = cv.waitKey(1)
        if key == 27:  # ESC
            break
Пример #4
0
                        break
                    if (sel_cam != 999):
                        eel.sleep(0.01)
                        if (sel_cam != sel_cam_before):
                            if (sel_cam_before != 999):
                                cap.release()
                            cap = cv.VideoCapture(sel_cam)
                            cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
                            cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
                            sel_cam_before = sel_cam
                        ret, frame = cap.read()
                        if (ret is True):
                            # UI側へ転送(画像) #####################################################
                            _, imencode_image = cv.imencode('.jpg', frame)
                            base64_image = base64.b64encode(imencode_image)
                            eel.set_base64image("data:image/jpg;base64," +
                                                base64_image.decode("ascii"))
                            continue
                        else:
                            eel.alert_mess()
                            cap.release()
                            decide_cam_py(999)
                            sel_cam_before = sel_cam
                            if (decideFlg == 1):
                                decide_flg_py(0)
                            continue
                    else:
                        eel.sleep(0.01)

            else:
                cap.release()
                if (webcam_flg == 0):
Пример #5
0
def HandTracking(cap, width, height, conf_flg=0):
    # ×ボタンが押されたかのフラグ(hand_gui.py内の変数、flg_closePush)の初期化
    hand_gui.close_switch_py(0)
    # 引数解析 #################################################################
    args = get_args()

    flg_video = 0  #「1」でカメラが接続されていない
    flg_break = 0  #「1」で最初のループを抜け終了する⇒正常終了
    name_pose = "Unknown"
    focus_flg = 1  #index.html の表示・非表示の切り替え、「0」:Main.pyで開いた場合、「1」:HandTracking.pyで開いた場合
    namePose_flg = 1  #complete_old.htmlの開始・終了フラグ
    potision_flg = 0
    #flg_closePush = 0
    global ShortCutList
    ShortCutList = config_sys_set()
    cap_device = args.device
    cap_width = args.width
    cap_height = args.height

    use_static_image_mode = args.use_static_image_mode
    min_detection_confidence = args.min_detection_confidence
    min_tracking_confidence = args.min_tracking_confidence

    use_brect = True
    #width,height = autopy.screen.size() #eel で立ち上げた際の表示位置を指定するために取得

    while (True):  #カメラが再度接続するまでループ処理
        #カメラが接続されていないフラグの場合
        if (flg_video == 1):
            #カメラが接続されているか確認
            cap = cv.VideoCapture(cap_device)
            cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
            cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
            ret, frame = cap.read()
            if (ret is True):
                flg_video = 0
                name_pose = "Unknown"
                focus_flg = 1
                namePose_flg = 1
                cap.release()
                eel.overlay_controll(True)
                eel.object_change("demo2.html", True)
                #eel.sleep(1)
                sel_cam_before = 999

                while (True):
                    if (decideFlgHT == 1):
                        if (sel_camHT != sel_cam_before):
                            cap = cv.VideoCapture(sel_camHT)
                            cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
                            cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
                            ret, frame = cap.read(sel_camHT)
                            if (ret is False):
                                eel.alert_mess()
                                cap.release()
                                decide_camHT_py(999)
                                decide_flgHT_py(0)
                                sel_cam_before = sel_camHT
                                continue
                            else:
                                decide_flgHT_py(0)
                                break
                        decide_flgHT_py(0)
                        break
                    if (sel_camHT != 999):
                        eel.sleep(0.01)
                        if (sel_camHT != sel_cam_before):
                            if (sel_cam_before != 999):
                                cap.release()
                            cap = cv.VideoCapture(sel_camHT)
                            cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
                            cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
                            sel_cam_before = sel_camHT
                        ret, frame = cap.read()
                        if (ret is True):
                            # UI側へ転送(画像) #####################################################
                            _, imencode_image = cv.imencode('.jpg', frame)
                            base64_image = base64.b64encode(imencode_image)
                            eel.set_base64image("data:image/jpg;base64," +
                                                base64_image.decode("ascii"))
                            continue
                        else:
                            eel.alert_mess()
                            cap.release()
                            decide_camHT_py(999)
                            sel_cam_before = sel_camHT
                            if (decideFlgHT == 1):
                                decide_flgHT_py(0)
                            continue
                    else:
                        eel.sleep(0.01)
                        continue  #最初の while に戻る
            else:
                #カメラが接続されていない場合
                #print("webcamないよ!!!")
                eel.sleep(0.01)
                time.sleep(0.01)
                continue  #最初の while に戻る
        elif (flg_break == 1):
            decide_camHT_py(999)
            decide_flgHT_py(0)
            break  #最初の while を抜けて正常終了

        # カメラ準備 ###############################################################
        #cap = cv.VideoCapture(cap_device)
        #cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
        #cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)

        # モデルロード #############################################################
        mp_hands = mp.solutions.hands
        hands = mp_hands.Hands(
            static_image_mode=use_static_image_mode,
            max_num_hands=1,
            min_detection_confidence=min_detection_confidence,
            min_tracking_confidence=min_tracking_confidence,
        )

        keypoint_classifier = KeyPointClassifier()

        point_history_classifier = PointHistoryClassifier()

        # ラベル読み込み ###########################################################
        with open('model/keypoint_classifier/keypoint_classifier_label.csv',
                  encoding='utf-8-sig') as f:
            keypoint_classifier_labels = csv.reader(f)
            keypoint_classifier_labels = [
                row[0] for row in keypoint_classifier_labels
            ]
        with open(
                'model/point_history_classifier/point_history_classifier_label.csv',
                encoding='utf-8-sig') as f:
            point_history_classifier_labels = csv.reader(f)
            point_history_classifier_labels = [
                row[0] for row in point_history_classifier_labels
            ]

        # FPS計測モジュール ########################################################
        cvFpsCalc = CvFpsCalc(buffer_len=10)

        # 座標履歴 #################################################################
        history_length = 16
        point_history = deque(maxlen=history_length)

        # フィンガージェスチャー履歴 ################################################
        finger_gesture_history = deque(maxlen=history_length)

        #  ########################################################################
        mode = 0
        CountPose = [0, 0, 0, 0, 0, 0, 0]
        CountMotion = [0, 0, 0, 0]  # [Top,Right,Down,Left]
        identification = False
        while True:
            fps = cvFpsCalc.get()
            # キー処理(ESC:終了) #################################################
            key = cv.waitKey(10)
            if key == 27:  # ESC
                break
            number, mode = select_mode(key, mode)

            # カメラキャプチャ #####################################################
            ret, image = cap.read()
            if not ret:
                #それぞれのフラグを立てて、システムを終了させ、最初の while に戻る
                flg_video = 1
                focus_flg = 0

                print("【通知】WebCameraが接続されていません。")
                eel.focusSwitch(width, height, focus_flg)
                eel.overlay_controll(True)
                eel.object_change("connect.html", True)
                cap.release()
                cv.destroyAllWindows()
                break

            image = cv.flip(image, 1)  # ミラー表示
            debug_image = copy.deepcopy(image)

            # 検出実施 #############################################################
            image = cv.cvtColor(image, cv.COLOR_BGR2RGB)

            image.flags.writeable = False
            results = hands.process(image)
            image.flags.writeable = True

            #  ####################################################################
            if results.multi_hand_landmarks is not None:
                #j=1
                for hand_landmarks, handedness in zip(
                        results.multi_hand_landmarks,
                        results.multi_handedness):
                    # 外接矩形の計算
                    brect = calc_bounding_rect(debug_image, hand_landmarks)
                    # ランドマークの計算
                    landmark_list = calc_landmark_list(debug_image,
                                                       hand_landmarks)

                    # 相対座標・正規化座標への変換
                    pre_processed_landmark_list = pre_process_landmark(
                        landmark_list)
                    pre_processed_point_history_list = pre_process_point_history(
                        debug_image, point_history)
                    # 学習データ保存
                    logging_csv(number, mode, pre_processed_landmark_list,
                                pre_processed_point_history_list)

                    # ハンドサイン分類
                    hand_sign_id = keypoint_classifier(
                        pre_processed_landmark_list)

                    if hand_sign_id == 1:  # Dangサイン
                        point_history.append(landmark_list[8])  # 人差指座標
                    else:
                        point_history.append([0, 0])

                    # フィンガージェスチャー分類
                    finger_gesture_id = 0
                    point_history_len = len(pre_processed_point_history_list)
                    if point_history_len == (history_length * 2):
                        finger_gesture_id = point_history_classifier(
                            pre_processed_point_history_list)

                    # 直近検出の中で最多のジェスチャーIDを算出
                    finger_gesture_history.append(finger_gesture_id)
                    most_common_fg_id = Counter(
                        finger_gesture_history).most_common()

                    gesture_name = point_history_classifier_labels[
                        most_common_fg_id[0][0]]

                    # 描画
                    debug_image = draw_bounding_rect(use_brect, debug_image,
                                                     brect)
                    debug_image = draw_landmarks(debug_image, landmark_list)
                    debug_image = draw_info_text(
                        debug_image,
                        brect,
                        handedness,
                        keypoint_classifier_labels[hand_sign_id],
                        gesture_name,
                    )

                    #人差し指の先の座標を取得
                    x, y = landmark_list[8]
                    #座標調整
                    x_width = args.width * 0.05
                    x = x - x_width
                    x = x * 1.5
                    y = y * 1.5
                    #ジェスチャーが判定された回数をカウント
                    if gesture_name == 'Stop':
                        CountMotion = [0, 0, 0, 0]
                    elif gesture_name == 'Move_Top':
                        Count_temp = CountMotion[0]
                        Count_temp += 1
                        CountMotion = [Count_temp, 0, 0, 0]
                    elif gesture_name == 'Move_Right':
                        Count_temp = CountMotion[1]
                        Count_temp += 1
                        CountMotion = [0, Count_temp, 0, 0]
                    elif gesture_name == 'Move_Down':
                        Count_temp = CountMotion[2]
                        Count_temp += 1
                        CountMotion = [0, 0, Count_temp, 0]
                    elif gesture_name == 'Move_Left':
                        Count_temp = CountMotion[3]
                        Count_temp += 1
                        CountMotion = [0, 0, 0, Count_temp]

                    #各種操作の実行
                    CountPose, CountMotion = PoseAction.action(
                        hand_sign_id, x, y, CountPose, CountMotion,
                        ShortCutList)
                    name_pose = keypoint_classifier_labels[hand_sign_id]
                    eel.set_posegauge(str(name_pose))
                    identification = True

            else:
                point_history.append([0, 0])
                if identification == True:
                    eel.set_posegauge('None')
                    identification = False
                    eel.shortcut_overlay(False, 0)

            debug_image = draw_point_history(debug_image, point_history)
            debug_image = draw_info(debug_image, fps, mode, number)

            # 画面反映 #############################################################
            debug_image = cv.resize(debug_image, dsize=(520, 260))
            cv.imshow('FOCUS preview', debug_image)
            if potision_flg == 0:
                cv.moveWindow('FOCUS preview', 0, 0)
                potision_flg = 1

            # eel立ち上げ #############################################################
            #cnt_gui, flg_end, flg_restart, flg_start, keep_flg = hand_gui.start_gui(cnt_gui, name_pose, flg_restart, flg_start, keep_flg)

            if (namePose_flg == 1):
                eel.object_change("complete_old.html", True)
                #eel.sleep(0.01)
                #eel.init("GUI/web")
                #eel.start("開きたい上記のフォルダ下のファイル名",~
                #eel.start("html/keeper.html",
                #            port = 0,
                #            mode='chrome',
                #            size=(4, 2),  #サイズ指定(横, 縦)
                #            position=(width,0), #位置指定(left, top)
                #            block=False
                #            )
                eel.sleep(0.01)
                print("【通知】準備完了")
                namePose_flg = 0
            elif (focus_flg == 1 and name_pose != 'Unknown'):
                eel.object_change("complete_old.html", False)
                eel.overlay_controll(False)
                eel.focusSwitch(width, height, focus_flg)
                print("【実行】index.html")
                eel.sleep(0.01)
                #eel.set_posegauge(name_pose,i)
                focus_flg = 0
                #i+=1

            # eel立ち上げ #############################################################
            flg_end = hand_gui.start_gui()
            if (flg_end == 1):
                #正常に終了する処理(中間のループを抜ける)
                flg_break = 1
                eel.endSwitch()  #flg_end の値をもとに戻す関数
                cap.release()
                cv.destroyAllWindows()
                eel.overlay_controll(False)
                eel.object_change("endpage.html", False)
                #eel.windowclose_keeper()
                break
Пример #6
0
def main():
    # EeLフォルダ設定、および起動 #########################################################
    eel.init('web')
    eel.start(
        'index.html',
        mode='chrome',
        # cmdline_args=['--start-fullscreen', '--browser-startup-dialog'])
        cmdline_args=['--start-fullscreen'],
        block=False)

    ##### ADD START #####
    print("Hand Detection Start...\n")

    # カメラ準備 ##############################################################
    cap = cv.VideoCapture(0)
    cap.set(cv.CAP_PROP_FRAME_WIDTH, 1280)
    cap.set(cv.CAP_PROP_FRAME_HEIGHT, 720)

    # FPS算出クラス起動 #######################################################
    fpsWithTick = FpsCalc.fpsWithTick()

    # GPUメモリを必要な分だけ確保
    # ※指定しない限りデフォルトではすべて確保する
    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))

    # 手検出モデルロード #######################################################
    with tf.Graph().as_default() as net1_graph:
        graph_data = tf.gfile.FastGFile('frozen_inference_graph1.pb',
                                        'rb').read()
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(graph_data)
        tf.import_graph_def(graph_def, name='')

    sess1 = tf.Session(graph=net1_graph, config=config)
    sess1.graph.as_default()

    animation_counter = 0
    ##### ADD END #####

    # cap = cv.VideoCapture(0)
    # cap.set(cv.CAP_PROP_FRAME_WIDTH, 1280)
    # cap.set(cv.CAP_PROP_FRAME_HEIGHT, 720)

    while True:
        eel.sleep(0.01)

        # # カメラキャプチャ ########################################################
        # ret, frame = cap.read()
        # if not ret:
        #     continue

        animation_counter += 10
        # FPS算出 ####################################################
        display_fps = fpsWithTick.get()
        if display_fps == 0:
            display_fps = 0.1

        # カメラキャプチャ ###################################################
        ret, frame = cap.read()
        if not ret:
            continue
        debug_image = copy.deepcopy(frame)

        # 検出実施 ####################################################
        inp = cv.resize(frame, (512, 512))
        inp = inp[:, :, [2, 1, 0]]  # BGR2RGB

        out = session_run(sess1, inp)

        rows = frame.shape[0]
        cols = frame.shape[1]

        # 検出結果可視化 ###############################################
        num_detections = int(out[0][0])
        for i in range(num_detections):
            class_id = int(out[3][0][i])
            score = float(out[1][0][i])
            bbox = [float(v) for v in out[2][0][i]]

            if score < 0.8:
                continue

            x = bbox[1] * cols
            y = bbox[0] * rows
            right = bbox[3] * cols
            bottom = bbox[2] * rows

            radius = int((bottom - y) * (5 / 10))
            tickness = int(radius / 20)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 0 + animation_counter, 0,
                       50, (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 80 + animation_counter,
                       0, 50, (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 150 + animation_counter,
                       0, 30, (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 200 + animation_counter,
                       0, 10, (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 230 + animation_counter,
                       0, 10, (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 260 + animation_counter,
                       0, 60, (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 337 + animation_counter,
                       0, 5, (255, 255, 205), tickness)

            radius = int((bottom - y) * (4.5 / 10))
            tickness = int(radius / 10)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 0 - animation_counter, 0,
                       50, (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 80 - animation_counter,
                       0, 50, (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 150 - animation_counter,
                       0, 30, (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 200 - animation_counter,
                       0, 30, (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 260 - animation_counter,
                       0, 60, (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius), 337 - animation_counter,
                       0, 5, (255, 255, 205), tickness)

            radius = int((bottom - y) * (4 / 10))
            tickness = int(radius / 15)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius),
                       30 + int(animation_counter / 3 * 2), 0, 50,
                       (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius),
                       110 + int(animation_counter / 3 * 2), 0, 50,
                       (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius),
                       180 + int(animation_counter / 3 * 2), 0, 30,
                       (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius),
                       230 + int(animation_counter / 3 * 2), 0, 10,
                       (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius),
                       260 + int(animation_counter / 3 * 2), 0, 10,
                       (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius),
                       290 + int(animation_counter / 3 * 2), 0, 60,
                       (255, 255, 205), tickness)
            cv.ellipse(debug_image, (int((x + right) / 2), int(
                (y + bottom) / 2)), (radius, radius),
                       367 + int(animation_counter / 3 * 2), 0, 5,
                       (255, 255, 205), tickness)

        # UI側へ転送 ##############################################################
        # _, imencode_image = cv.imencode('.jpg', frame)
        _, imencode_image = cv.imencode('.jpg', debug_image)
        base64_image = base64.b64encode(imencode_image)
        eel.set_base64image("data:image/jpg;base64," +
                            base64_image.decode("ascii"))

        key = cv.waitKey(1)
        if key == 27:  # ESC
            break