def run_inference_on_image():
    answer = None

    if not tf.gfile.Exists(imagePath):
        tf.logging.fatal('File does not exist %s', imagePath)
        return answer

    image_data = tf.gfile.FastGFile(imagePath, 'rb').read()
    # 저장된(saved) GraphDef 파일로부터 graph를 생성한다.
    create_graph()

    with tf.Session() as sess:
        f = open(labelsFullPath, 'rb')
        lines = f.readlines()
        labels = [str(w).replace("\n", "") for w in lines]

        softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
        cap = cv2.VideoCapture(0)

        while True:
            start = time.time()
            ret, frame = cap.read()
            frame = hs.handsegment(frame)
            cv2.imwrite('./test.jpg', frame)
            image_data = tf.gfile.FastGFile(imagePath, 'rb').read()
            predictions = sess.run(softmax_tensor,
                                   {'DecodeJpeg/contents:0': image_data})
            predictions = np.squeeze(predictions)

            top_k = predictions.argsort(
            )[-5:][::-1]  # 가장 높은 확률을 가진 5개(top 5)의 예측값(predictions)을 얻는다.
            for node_id in top_k:
                human_string = labels[node_id]
                score = predictions[node_id]
                print('%s (score = %.5f)' % (human_string, score))
            answer = labels[top_k[0]]
            print("ans : " + str(answer))
            print("time : ", time.time() - start)
Beispiel #2
0
        if len(frames) < FRAMES_PER_VIDEO:
            print('[WARNING] Not enough frames. At least %d frames' %
                  (FRAMES_PER_VIDEO))
            state = 0
        else:
            frames = np.array(frames)
            ind = np.arange(0, len(frames),
                            len(frames) / FRAMES_PER_VIDEO).astype(int)
            frames = frames[ind]
            print('index', ind)
            print('No of frames', frames.shape)

            for i in range(len(frames)):
                f = frames[i]
                f = cv2.resize(f, SIZE)
                f = handsegment.handsegment(f)
                reprocessed_frames.append(f)
                cv2.imwrite(folder + '/unknown_frame_' + str(i) + '.jpeg', f)

            state = 4
            start_time = time.time()

    elif state == 4:
        # for f in reprocessed_frames:
        #     cv2.imshow('after reprocessing', f)
        #     cv2.waitKey(1)

        # PREDICT_SPATIAL
        from predict_spatial import *
        print('[] PREDICT SPATIAL ')
Beispiel #3
0
def convert(dataset):
    fcount = 0
    rootPath = os.getcwd()
    majorData = os.path.join(os.getcwd(), "majorData")
    if (not exists(majorData)):
        os.makedirs(majorData)
    dataset = os.path.join(os.getcwd(), dataset)
    os.chdir(dataset)
    x = os.listdir(os.getcwd())

    for i in range(len(x)):
        if x[i] == "test":
            x = x[i]

    x.remove(".DS_Store")
    print(x)
    for gesture in x:
        adhyan = gesture
        gesture = os.path.join(dataset, gesture)
        os.chdir(gesture)
        frames = os.path.join(majorData, adhyan)
        if (not os.path.exists(frames)):
            os.makedirs(frames)
        videos = os.listdir(os.getcwd())
        videos = [video for video in videos if (os.path.isfile(video))]

        for video in videos:
            name = os.path.abspath(video)
            fcount = fcount + 1
            print(fcount, " : ", name)
            cap = cv2.VideoCapture(name)  # capturing input video
            frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            print(frameCount)
            count = 0
            os.chdir(frames)
            lastFrame = None
            while (1):
                ret, frame = cap.read()  # extract frame
                if ret is False:
                    break
                framename = os.path.splitext(video)[0]
                framename = framename + "_frame_" + str(count) + ".jpeg"
                hc.append([join(frames, framename), adhyan, frameCount])

                if (not os.path.exists(framename)):
                    frame = hs.handsegment(frame)
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    lastFrame = frame
                    cv2.imwrite(framename, frame)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
                count += 1
            while (count < 201):
                framename = os.path.splitext(video)[0]
                framename = framename + "_frame_" + str(count) + ".jpeg"
                hc.append([join(frames, framename), adhyan, frameCount])
                if (not os.path.exists(framename)):
                    cv2.imwrite(framename, lastFrame)
                count += 1

            os.chdir(gesture)
            cap.release()
            cv2.destroyAllWindows()
    print(hc)

    os.chdir(rootPath)
    with open('data/labeled-frames-1.pkl', 'wb') as handle:
        # with open('data/labeled-frames-2.pkl', 'wb') as handle:
        pickle.dump(hc, handle, protocol=pickle.HIGHEST_PROTOCOL)
Beispiel #4
0
def convert(gesture_folder, target_folder):
    rootPath = os.getcwd()
    majorData = os.path.abspath(target_folder)

    if not exists(majorData):
        os.makedirs(majorData)

    gesture_folder = os.path.abspath(gesture_folder)

    os.chdir(gesture_folder)
    gestures = os.listdir(os.getcwd())

    print("Source Directory containing gestures: %s" % (gesture_folder))
    print("Destination Directory containing frames: %s\n" % (majorData))

    for gesture in tqdm(gestures, unit='actions', ascii=True):
        gesture_path = os.path.join(gesture_folder, gesture)
        os.chdir(gesture_path)

        gesture_frames_path = os.path.join(majorData, gesture)
        if not os.path.exists(gesture_frames_path):
            os.makedirs(gesture_frames_path)

        videos = os.listdir(os.getcwd())
        videos = [video for video in videos if(os.path.isfile(video))]

        for video in tqdm(videos, unit='videos', ascii=True):
            name = os.path.abspath(video)
            cap = cv2.VideoCapture(name)  # capturing input video
            frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            lastFrame = None

            os.chdir(gesture_frames_path)
            count = 1

            # assumption only first 150 frames are important
            while count < 151:
                ret, frame = cap.read()  # extract frame
                if ret is False:
                    break
                framename = os.path.splitext(video)[0]
                framename = framename + "_frame_" + str(count) + ".jpeg"
                hc.append([join(gesture_frames_path, framename), gesture, frameCount])

                if not os.path.exists(framename):
                    frame = hs.handsegment(frame)
                    lastFrame = frame
                    cv2.imwrite(framename, frame)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
                count += 1

            # repeat last frame untill we get 200 frames
            while count < 151:
                framename = os.path.splitext(video)[0]
                framename = framename + "_frame_" + str(count) + ".jpeg"
                hc.append([join(gesture_frames_path, framename), gesture, frameCount])

                if not os.path.exists(framename):
                    cv2.imwrite(framename, lastFrame)
                count += 1

            os.chdir(gesture_path)
            cap.release()
            cv2.destroyAllWindows()

    os.chdir(rootPath)