def waitUntilMoveCompletes():
    while True:
        frame = video_capture_thread.get_frame()
        frame = perspective_transform(frame, pts1)
        fgmask = move_fgbg.apply(normalize_illumination(frame))
        motion_fgbg.apply(frame)
        mean = fgmask.mean()
        if mean < MOVE_END_THRESHOLD:
            break

    while fgmask.mean() > MOVE_START_THRESHOLD:
        frame = video_capture_thread.get_frame()
        frame = perspective_transform(frame, pts1)
        new_fgmask = move_fgbg.apply(normalize_illumination(frame))
        motion_fgbg.apply(frame)
        if abs(new_fgmask.mean() -
               fgmask.mean()) <= MOVE_MEAN_DIFFERENCE_THRESHOLD:
            fgmask = new_fgmask
        else:
            break

    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)

    return frame, fgmask
def waitUntilMotionCompletes():
    while True:
        frame = video_capture_thread.get_frame()
        frame = perspective_transform(frame, pts1)
        fgmask = motion_fgbg.apply(frame)
        mean = fgmask.mean()
        if mean < MOTION_END_THRESHOLD:
            break
def initialize_background_subtractors():
    while True:
        frame = video_capture_thread.get_frame()
        frame = perspective_transform(frame, pts1)
        move_fgbg.apply(normalize_illumination(frame))
        fgmask = motion_fgbg.apply(frame)
        mean = fgmask.mean()
        if mean < MOTION_END_THRESHOLD:
            break
            x = corner1[0][0] + (corner1[0][0] - corner2[0][0])
            y = corner1[0][1] + (corner1[0][1] - corner2[0][1])
            row.append((x, y))

        augmented_corners.append(row)
        print(augmented_corners)

        while augmented_corners[0][0][0] > augmented_corners[8][8][0] or augmented_corners[0][0][1] > \
                augmented_corners[8][8][1]:
            rotateMatrix(augmented_corners)

        print(augmented_corners)
        pts1 = np.float32([list(augmented_corners[0][0]), list(augmented_corners[8][0]), list(augmented_corners[0][8]),
                           list(augmented_corners[8][8])])

        empty_board = perspective_transform(gray, pts1)
        #cv2.imwrite("empty_board.jpg", empty_board)

        for i in range(len(augmented_corners)):
            for j in range(len(augmented_corners[i])):
                frame = cv2.putText(frame, str(i) + "," + str(j), augmented_corners[i][j], cv2.FONT_HERSHEY_SIMPLEX,
                                    0.5, (255, 0, 0), 1, cv2.LINE_AA)
        cv2.imshow('frame', frame)
        cv2.waitKey(0)
        break

    cv2.imshow('frame', frame)
    if cv2.waitKey(3) & 0xFF == ord('q'):
        break

cap.release()
Exemplo n.º 5
0
def lanefinding_pipeline(image, left_lane_line, right_lane_line):
    #def find_lane_lines(image):

    undist = camera_cal(image, mtx, dist)

    thresholded = threshold(undist)

    warped, M, M_inv = perspective_transform(thresholded, mtx, dist, src, dest)

    #if not (left_lane_line.detected or right_lane_line.detected):
    left_lane_line, right_lane_line, car_center, lane_center, y, out_img = sliding_window(
        warped, left_lane_line, right_lane_line)

    # print(left_fit)
    # print(right_fit)

    # update Line objects
    # left_lane_line.detected = True
    # right_lane_line.detected = True

    # if abs(offset(car_center, lane_center)) > 0.3:

    # left_lane_line, right_lane_line, car_center, lane_center = prior_frame_search(warped, 100, left_lane_line.current_fit, right_lane_line.current_fit)

    # left_fit, right_fit, left_fitx, right_fitx, y, out_img = fit_poly(warped.shape, leftx, lefty, rightx, righty, out_img)

    # sanity checks

    # left_curve, right_curve = curvature(left_lane_line.fit_x, right_lane_line.fit_x, y)
    # curverad = curvature(left_lane_line.fit_x, left_lane_line.fit_y)
    #left_curve, right_curve = curvature(left_lane_line.fit_x, right_lane_line.fit_x, y)
    left_curve = curvature(left_lane_line.fit_x, left_lane_line.fit_y)
    right_curve = curvature(right_lane_line.fit_x, right_lane_line.fit_y)
    # print(left_curve, right_curve)

    # if left_curve < 1000 or right_curve < 1000:
    #     #leftx, lefty, rightx, righty, car_center, lane_center = prior_frame_search(warped, 100, left_lane_line.current_fit, right_lane_line.current_fit)
    #     left_fit, right_fit, left_fitx, right_fitx, y, out_img = fit_poly(warped.shape, leftx, lefty, rightx, righty, out_img)
    #     left_curve, right_curve = curvature(left_fit, right_fit, y)

    #     result = draw_lane_lines(image, undist, warped, left_lane_line.recent_xfitted, right_lane_line.recent_xfitted, y, M_inv, prev_car_center, prev_lane_center,
    #                              left_lane_line.radius_of_curvature, right_lane_line.radius_of_curvature)

    # else:

    #     # update Line objects
    #     left_lane_line.current_fit = left_fit
    #     right_lane_line.current_fit = right_fit
    #     left_lane_line.recent_xfitted = left_fitx
    #     right_lane_line.recent_xfitted = right_fitx
    #     left_lane_line.radius_of_curvature = left_curve
    #     right_lane_line.radius_of_curvature = right_curve

    #     prev_car_center = car_center
    #     prev_lane_center = lane_center

    # leftx, lefty, rightx, righty, car_center, lane_center = base_lane_lines(warped)
    # left_fit, right_fit, left_fitx, right_fitx, y = fit_poly(warped.shape, leftx, lefty, rightx, righty)

    # left_curve, right_curve = curvature(left_fit, right_fit, y)

    result = draw_lane_lines(image, undist, warped, left_lane_line.fit_x,
                             right_lane_line.fit_x, y, M_inv, car_center,
                             lane_center, left_curve, right_curve)

    # draw_copy = np.copy(result)

    # plt.imshow(draw_copy)
    # plt.show()

    #plt.imshow(warped, cmap = "gray")
    # plt.imshow(result)

    # plt.show()

    return result
        frame = video_capture_thread.get_frame()
        frame = perspective_transform(frame, pts1)
        move_fgbg.apply(normalize_illumination(frame))
        fgmask = motion_fgbg.apply(frame)
        mean = fgmask.mean()
        if mean < MOTION_END_THRESHOLD:
            break


initialize_background_subtractors()

speech_thread.put_text("Game started")

while not game.board.is_game_over():
    frame = video_capture_thread.get_frame()
    frame = perspective_transform(frame, pts1)
    fgmask = motion_fgbg.apply(frame)
    mean = fgmask.mean()
    if mean > MOTION_START_THRESHOLD:
        # cv2.imwrite("prev_frame.jpg", frame)
        waitUntilMotionCompletes()
        frame, fgmask = waitUntilMoveCompletes()
        move_fgbg.apply(normalize_illumination(frame), learningRate=1.0)
        waitUntilMoveCompletes()
        if game.register_move(fgmask):
            pass
            # cv2.imwrite(game.executed_moves[-1] + " frame.jpg", frame)
            # cv2.imwrite(game.executed_moves[-1] + " mask.jpg", fgmask)
        else:
            pass
            # cv2.imwrite("frame_fail.jpg", frame)