예제 #1
0
def run_object_detection(config):
    # Set variables
    camera_width = 320
    camera_height = 240
    window_name = "Object Detection Demo"
    input_width = config.IMAGE_SIZE[1]
    input_height = config.IMAGE_SIZE[0]

    vc = cv2.VideoCapture(0)
    vc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, camera_width)
    vc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, camera_height)
    vc.set(cv2.cv.CV_CAP_PROP_FPS, 15)

    pool = Pool(processes=1)
    result = False
    fps = 1.0

    q_save = Queue()
    q_show = Queue()

    grabbed, camera_img = vc.read()

    q_show.put(camera_img.copy())
    input_img = camera_img.copy()

    #  ----------- Beginning of Main Loop ---------------
    while True:
        m1 = MyTime("1 loop of while(1) of main()")
        pool_result = pool.apply_async(run_inference, (input_img, ))
        is_first = True
        while True:
            sleep(0.01)
            grabbed, camera_img = vc.read()
            if is_first:
                input_img = camera_img.copy()
                is_first = False
            q_save.put(camera_img.copy())
            if not q_show.empty():
                window_img = q_show.get()
                if result:
                    window_img = add_rectangle(config.CLASSES, window_img,
                                               result,
                                               (input_height, input_width))
                    window_img = add_fps(window_img, fps)
                # ---------- END of if result != False -----------------

                cv2.imshow(window_name, window_img)
                key = cv2.waitKey(2)  # Wait for 2ms
                if key == 27:  # ESC to quit
                    return

            if pool_result.ready():
                break
        # -------------- END of wait loop ----------------------
        q_show = clear_queue(q_show)
        q_save, q_show = swap_queue(q_save, q_show)
        result, fps = pool_result.get()
        m1.show()
예제 #2
0
def run_classification(config):
    global nn
    camera_height = 240
    camera_width = 320

    window_name = "Classification Demo"
    window_width = 320
    window_height = 240

    vc = init_camera(camera_width, camera_height)

    pool = Pool(processes=1, initializer=nn.init)

    grabbed, camera_img = vc.read()

    pool_result = pool.apply_async(_run_inference, (camera_img, ))
    result = None
    fps = 1.0
    loop_count = 0

    while 1:

        m1 = MyTime("1 loop of while(1) of main()")
        key = cv2.waitKey(2)    # Wait for 2ms
        if key == 27:           # ESC to quit
            break

        m2 = MyTime("vc.read()")
        grabbed, camera_img = vc.read()
        m2.show()

        if pool_result.ready():
            result, fps = pool_result.get()
            pool_result = pool.apply_async(_run_inference, (camera_img, ))

        if (window_width == camera_width) and (window_height == camera_height):
            window_img = camera_img
        else:
            window_img = cv2.resize(camera_img, (window_width, window_height))

        if result is not None:
            result_class = np.argmax(result, axis=1)
            add_class_label(window_img, text=str(result[0, result_class][0]), font_scale=0.52, dl_corner=(230, 230))
            add_class_label(window_img, text=config.CLASSES[result_class[0]], font_scale=0.52, dl_corner=(230, 210))
            window_img = add_fps(window_img, fps)
            loop_count += 1
            print("loop_count:", loop_count)

        m3 = MyTime("cv2.imshow()")
        cv2.imshow(window_name, window_img)
        m3.show()

        m1.show()
        sleep(0.05)

    cv2.destroyAllWindows()
예제 #3
0
def run_keypoint_detection(config):
    global nn
    camera_width = 320
    camera_height = 240
    window_name = "Keypoint Detection Demo"

    input_width = config.IMAGE_SIZE[1]
    input_height = config.IMAGE_SIZE[0]

    vc = init_camera(camera_width, camera_height)

    pool = Pool(processes=1, initializer=nn.init)
    result = None
    fps = 1.0

    q_save = Queue()
    q_show = Queue()

    grabbed, camera_img = vc.read()

    q_show.put(camera_img.copy())
    input_img = camera_img.copy()

    while True:
        m1 = MyTime("1 loop of while(1) of main()")
        pool_result = pool.apply_async(_run_inference, (input_img, ))
        is_first = True
        while True:
            grabbed, camera_img = vc.read()
            if is_first:
                input_img = camera_img.copy()
                is_first = False
            q_save.put(camera_img.copy())
            if not q_show.empty():
                window_img = q_show.get()
                drawed_img = window_img
                if result is not None:

                    drawed_img = visualize_keypoint_detection(
                        window_img, result[0], (input_height, input_width))
                    drawed_img = add_fps(drawed_img, fps)

                cv2.imshow(window_name, drawed_img)
                key = cv2.waitKey(2)  # Wait for 2ms
                # TODO(yang): Consider using another key for abort.
                if key == 27:  # ESC to quit
                    return

            # TODO(yang): Busy loop is not efficient here. Improve it and change them in other tasks.
            if pool_result.ready():
                break

        q_show = clear_queue(q_show)
        q_save, q_show = swap_queue(q_save, q_show)
        result, fps = pool_result.get()
        m1.show()
예제 #4
0
def show_keypoint_detection(img, result, fps, window_height, window_width,
                            config):
    window_img = resize(img, size=[window_height, window_width])

    window_img = visualize_keypoint_detection(window_img, result[0],
                                              (input_height, input_width))
    window_img = add_fps(window_img, fps)

    window_name = "Keypoint Detection Demo"
    cv2.imshow(window_name, window_img)
예제 #5
0
def run_sementic_segmentation(config):
    global nn
    camera_width = 320
    camera_height = 240
    window_name = "Segmentation Demo"

    vc = init_camera(camera_width, camera_height)

    pool = Pool(processes=1, initializer=nn.init)
    result = None
    fps = 1.0

    q_save = Queue()
    q_show = Queue()

    grabbed, camera_img = vc.read()
    if not grabbed:
        print("Frame is empty")

    q_show.put(camera_img.copy())
    input_img = camera_img.copy()

    colormap = np.array(get_color_map(len(config['CLASSES'])), dtype=np.uint8)

    while True:
        m1 = MyTime("1 loop of while(1) of main()")
        pool_result = pool.apply_async(_run_inference, (input_img, ))
        is_first = True
        while True:
            grabbed, camera_img = vc.read()
            if is_first:
                input_img = camera_img.copy()
                is_first = False
            q_save.put(camera_img.copy())
            if not q_show.empty():
                window_img = q_show.get()
                overlay_img = window_img
                if result is not None:
                    seg_img = label_to_color_image(result, colormap)
                    seg_img = cv2.resize(seg_img,
                                         dsize=(camera_width, camera_height))
                    overlay_img = cv2.addWeighted(window_img, 1, seg_img, 0.8,
                                                  0)
                    overlay_img = add_fps(overlay_img, fps)

                cv2.imshow(window_name, overlay_img)
                key = cv2.waitKey(2)  # Wait for 2ms
                if key == 27:  # ESC to quit
                    return
            if pool_result.ready():
                break
        q_show = clear_queue(q_show)
        q_save, q_show = swap_queue(q_save, q_show)
        result, fps = pool_result.get()
        m1.show()
예제 #6
0
def show_semantic_segmentation(img, result, fps, window_height, window_width,
                               config):
    orig_img = resize(img, size=[window_height, window_width])

    seg_img = label_to_color_image(result, colormap)
    seg_img = cv2.resize(seg_img, dsize=(window_width, window_height))
    window_img = cv2.addWeighted(orig_img, 1, seg_img, 0.8, 0)
    window_img = add_fps(window_img, fps)

    window_name = "Semantic Segmentation Demo"
    cv2.imshow(window_name, window_img)
예제 #7
0
def show_object_detection(img, result, fps, window_height, window_width,
                          config):
    window_img = resize(img, size=[window_height, window_width])

    input_width = config.IMAGE_SIZE[1]
    input_height = config.IMAGE_SIZE[0]
    window_img = add_rectangle(config.CLASSES, window_img, result,
                               (input_height, input_width))
    img = add_fps(window_img, fps)

    window_name = "Object Detection Demo"
    cv2.imshow(window_name, window_img)
예제 #8
0
def show_semantic_segmentation(img, result, fps, window_height, window_width,
                               config):
    orig_img = resize(img, size=[window_height, window_width])

    colormap = np.array(get_color_map(len(config.CLASSES)), dtype=np.uint8)
    seg_img = label_to_color_image(result, colormap)
    seg_img = cv2.resize(seg_img, dsize=(window_width, window_height))
    window_img = cv2.addWeighted(orig_img, 1, seg_img, 0.8, 0)
    window_img = add_fps(window_img, fps)

    window_name = "Semantic Segmentation Demo"
    cv2.imshow(window_name, window_img)
예제 #9
0
def show_classification(img, result, fps, window_height, window_width, config):
    window_img = resize(img, size=[window_height, window_width])

    result_class = np.argmax(result, axis=1)
    add_class_label(window_img,
                    text=str(result[0, result_class][0]),
                    font_scale=0.52,
                    dl_corner=(230, 230))
    add_class_label(window_img,
                    text=config.CLASSES[result_class[0]],
                    font_scale=0.52,
                    dl_corner=(230, 210))
    window_img = add_fps(window_img, fps)

    window_name = "Classification Demo"
    cv2.imshow(window_name, window_img)