Beispiel #1
0
def test(img):
    """
    ASSUMES:
        -img: numpy.array, cv2 image in BGR,
    RETURNS:
        -img_ROI: numpy.array, cv2 image in BGR, image that shows ONLY around
            the biggest red region in img
        -centerX: int, x coordinate of the center of the ROI in img_ROI
        -centerY: int, y coordinate of the center of the ROI in img_ROI
    """
    #color filtering
    _,img_red = color_filter(img)
    # noise canceling
    img_noiseless = noise_cancel(img_red)
    # resize
    img_noiseless_small = cv2.resize(img_noiseless,(200,100))
    img_small = cv2.resize(img,(200,100))
    # region of interest
    mask,centerX,centerY,(h,w)  = mask4ROI(img_noiseless_small,isColoredPixel)
    img_ROI = region_of_interest(img_small,mask)

    return img_ROI,centerX,centerY,(h,w)
def final_viz(undist, left_fit, right_fit, m_inv, left_curve, right_curve,
              vehicle_offset):
    ploty = np.linspace(0, undist.shape[0] - 1, undist.shape[0])
    left_fitx = left_fit[0] * ploty**2 + left_fit[1] * ploty + left_fit[2]
    right_fitx = right_fit[0] * ploty**2 + right_fit[1] * ploty + right_fit[2]

    # 创建图片
    color_warp = np.zeros((1080, 1920, 3), dtype='uint8')

    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array(
        [np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    newwarp = cv2.warpPerspective(color_warp, m_inv,
                                  (undist.shape[1], undist.shape[0]))
    result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)

    # 图片右上角显示曲率半径,中心偏移量
    avg_curve = (left_curve + right_curve) / 2
    string1 = 'R_mean : %.1f m' % avg_curve
    if left_fit[0] > 0 and avg_curve > 500:
        string2 = "gentle right"
    elif left_fit[0] > 0 and avg_curve <= 500:
        string2 = "hard right"
    elif left_fit[0] < 0 and avg_curve > 500:
        string2 = "gentle left"
    elif left_fit[0] < 0 and avg_curve <= 500:
        string2 = "hard left"
    string3 = 'central offset: %.1f m' % vehicle_offset

    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(result, string1, (1500, 100), font, 0.9, (0, 0, 0), 4,
                cv2.LINE_AA)
    cv2.putText(result, string2, (1500, 300), font, 0.9, (0, 0, 0), 4,
                cv2.LINE_AA)
    cv2.putText(result, string3, (1500, 200), font, 0.9, (0, 0, 0), 4,
                cv2.LINE_AA)

    # 图片上方显示消除失真图,鸟瞰图,车道线检测图
    small_undist = cv2.resize(undist, (0, 0), fx=0.2, fy=0.2)
    #这里的鸟瞰图其实就是透射变换
    bird_eye, _, _, _ = perspective_transform(undist)
    small_bird_eye = cv2.resize(bird_eye, (0, 0), fx=0.2, fy=0.2)

    img = cv2.cvtColor(undist, cv2.COLOR_BGR2RGB)
    vertices = np.int32([[(50, 1080), (700, 760), (920, 760), (1400, 1080)]])
    masked_image = region_of_interest(img, vertices)
    cv2.line(masked_image, (50, 1080), (700, 760), (0, 0, 0), 10)
    cv2.line(masked_image, (920, 760), (1400, 1080), (0, 0, 0), 25)
    img, abs_bin, mag_bin, dir_bin, hls_bin = combined_thresh(masked_image)
    binary_warped, binary_unwarped, m, m_inv = perspective_transform(img)
    ret = line_fit(binary_warped)
    out_image = viz2(binary_warped, ret)
    small_out_image = cv2.resize(out_image, (0, 0), fx=0.2, fy=0.2)

    x1 = 0
    y1 = 100
    x2 = small_out_image.shape[0]
    y2 = small_out_image.shape[1]
    y3 = small_out_image.shape[1] * 2
    y4 = small_out_image.shape[1] * 3
    result[x1 + 100:x2 + 100, y1:y2 + 100, :] = small_undist
    result[x1 + 100:x2 + 100, y2 + 200:y3 + 200, :] = small_bird_eye
    result[x1 + 100:x2 + 100, y3 + 300:y4 + 300, :] = small_out_image
    cv2.putText(result, "Undist", (100, 80), font, 0.9, (0, 0, 0), 4,
                cv2.LINE_AA)
    cv2.putText(result, "Bird's Eye", (580, 80), font, 0.9, (0, 0, 0), 4,
                cv2.LINE_AA)
    cv2.putText(result, "Line Search", (1080, 80), font, 0.9, (0, 0, 0), 4,
                cv2.LINE_AA)

    return result
Beispiel #3
0
            result = False
            return result
    return result


while True:
    (ret, frame) = camera.read()
    if not ret:
        print("faile to get frame")
        break

    h, w = frame.shape[:2]
    vertices = np.array([[30, h - 1], [w / 5, h / 2 - h / 10 + 15],
                         [w - w / 5, h / 2 - h / 10 + 15], [w - 30, h - 1]],
                        np.int32)  #for set the Roi
    Roi_frame = region_of_interest.region_of_interest(frame, [vertices])

    gray = cv2.cvtColor(Roi_frame, cv2.COLOR_BGR2GRAY)

    if first_frame is None:
        first_frame = gray
        continue
    delta_frame = cv2.absdiff(first_frame, gray)

    blur_delta = cv2.GaussianBlur(delta_frame, (3, 3), 0)
    ad_th_delta = cv2.adaptiveThreshold(blur_delta, 255,
                                        cv2.ADAPTIVE_THRESH_MEAN_C,
                                        cv2.THRESH_BINARY_INV, 21, 10)
    Roi_sub_frame0 = ad_th_delta[(h // 2 + h // 10) + 10:(h // 2 + h // 10) +
                                 40, w // 2 - 15:w // 2 + 15]
    Roi_sub_frame1 = ad_th_delta[(h // 4) * 3 - 10:(h // 4 * 3) + 20,
Beispiel #4
0
    Driver main for module_orientation

    To test module_orientation, use
    "python module_orientation.py -i {depthimage.npy}"

    Also note that region_of_interest should be in the same folder as module_orientation
    """
    import argparse
    from region_of_interest import region_of_interest

    # # Create object for parsing command-line options
    parser = argparse.ArgumentParser(description="Read .npy file and test for get_module_depth.\
                                            To read a .npy file, type \"python get_module_depth.py --i (image name).npy)\"")
    # # Add argument which takes path to a bag file as an input
    parser.add_argument("-i", "--input", type=str, help="Path to the .npy file")
    # # Parse the command line arguments to an object
    args = parser.parse_args()

    if args.input:
        depthNpy = args.input
    else:
        raise FileNotFoundError("No input parameter has been given. For help type --help")

    depthImage = np.load(depthNpy)

    # values for the test depth image
    center = (650, 560)
    roi = region_of_interest(depthImage, depthImage[560][650], center)

    get_module_orientation(roi)
# 批量读取图片
image_files = os.listdir('test_images')
for image_file in image_files:
    out_image_file = image_file.split('.')[0] + '.png'  # write to png format
    img = mpimg.imread('test_images/' + image_file)

    # 校正图片
    img = cv2.undistort(img, mtx, dist, None, mtx)
    plt.imshow(img)
    plt.savefig('example_images/undistort' + out_image_file)

    # ROI操作
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    # 多边形角点(左下,左上,右上,右下)
    vertices = np.int32([[(50, 1080), (700, 760), (920, 760), (1400, 1080)]])
    masked_image = region_of_interest(img, vertices)
    # 消除ROI边界影响,宽度适当调整
    cv2.line(masked_image, (50, 1080), (700, 760), (0, 0, 0), 10)
    cv2.line(masked_image, (920, 760), (1400, 1080), (0, 0, 0), 25)

    # 二值化
    img, abs_bin, mag_bin, dir_bin, hls_bin = combined_thresh(masked_image)
    plt.imshow(img, cmap='gray', vmin=0, vmax=1)
    plt.savefig('example_images/binary' + out_image_file)

    # 透视变换
    img, binary_unwarped, m, m_inv = perspective_transform(img)
    plt.imshow(img, cmap='gray', vmin=0, vmax=1)
    plt.savefig('example_images/warped' + out_image_file)

    # 多项式拟合