Beispiel #1
0
def frame_pipeline(frame, camera_properties, cache_length=20):

    if frame_pipeline.cache is None:

        left_lane = Lane(LEFT_LANE_BASE_PT * frame.shape[0],
                         frame.shape[:2],
                         cache_length=cache_length)

        right_lane = Lane(RIGHT_LANE_BASE_PT * frame.shape[0],
                          frame.shape[:2],
                          cache_length=cache_length)

        cache = dict(cam_mtx=camera_properties["mtx"],
                     cam_dist=camera_properties["dist"],
                     warp_m=None,
                     left=left_lane,
                     right=right_lane,
                     base_pts=None)

    else:
        cache = frame_pipeline.cache

    left_lane, right_lane = cache['left'], cache['right']

    mtx = camera_properties["mtx"]
    dist = camera_properties["dist"]
    undist = cv2.undistort(frame, mtx, dist, None, mtx)

    if cache['warp_m'] is None:
        src = find_perspective_points(undist)
        warp_m, warp_minv = get_perspective_transform(frame, src_in=src)

        if src is not None:
            # Save only if customized perspective transform is found
            cache['warp_m'] = warp_m
            cache['warp_minv'] = warp_minv
    else:
        warp_m, warp_minv = cache['warp_m'], cache['warp_minv']

    edges = find_edges(undist)
    warp_edges = cv2.warpPerspective(edges,
                                     warp_m, (frame.shape[1], frame.shape[0]),
                                     flags=cv2.INTER_LINEAR)

    base_pts = cache['base_pts']
    if base_pts is None:
        new_base_pts = histogram_base_points(warp_edges)

        if new_base_pts is not None:
            base_pts = new_base_pts
        else:
            # Could not find new base points
            return undist

    if any((left_lane.current_xfit is None
            or left_lane.dropped_frames > 16, right_lane.current_xfit is None
            or right_lane.dropped_frames > 16)):
        left_lane.radius_of_curvature = None
        right_lane.radius_of_curvature = None
        sliding_window(warp_edges, left_lane, right_lane, base_pts)
    else:
        left_lane.detect_from_mask(warp_edges)
        right_lane.detect_from_mask(warp_edges)

    cache['base_pts'] = base_pts
    frame_pipeline.cache = cache

    # Create an image to draw the lines on
    color_warp = np.zeros_like(frame).astype(np.uint8)

    yvals = left_lane.yvals
    left_fitx = left_lane.current_xfit
    right_fitx = right_lane.current_xfit

    # Create an image to draw the lines on
    color_warp = np.zeros_like(frame).astype(np.uint8)

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_fitx, yvals]))])
    pts_right = np.array(
        [np.flipud(np.transpose(np.vstack([right_fitx, yvals])))])
    pts = np.hstack((pts_left, pts_right))

    cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    # Draw lane markers
    pts = np.transpose(np.vstack([left_lane.current_xfit,
                                  left_lane.yvals])).reshape(
                                      (-1, 1, 2)).astype(np.int32)
    cv2.drawContours(color_warp, pts, -1, (255, 0, 0), thickness=30)
    pts = np.transpose(np.vstack([right_lane.current_xfit,
                                  right_lane.yvals])).reshape(
                                      (-1, 1, 2)).astype(np.int32)
    cv2.drawContours(color_warp, pts, -1, (0, 0, 255), thickness=30)

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, warp_minv,
                                  (frame.shape[1], frame.shape[0]))

    # Combine the result with the original image
    result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)

    left_r = left_lane.radius_of_curvature
    right_r = right_lane.radius_of_curvature
    middle = (left_fitx[-1] + right_fitx[-1]) // 2
    veh_pos = frame.shape[1] // 2

    dx = (veh_pos - middle
          ) * Lane.X_MTS_PER_PIX  # Positive if on right, Negative on left

    results_str = '{} radii of curvature = {:.2f}'

    font = cv2.FONT_HERSHEY_TRIPLEX
    cv2.putText(result, results_str.format("left m", left_r), (80, 50), font,
                1, (255, 255, 255), 2, cv2.LINE_AA)
    cv2.putText(result, results_str.format("right m", right_r), (80, 80), font,
                1, (255, 255, 255), 2, cv2.LINE_AA)

    cv2.putText(
        result, "Vehicle postion: {:.2f} m {} of center".format(
            abs(dx), 'left' if dx < 0 else 'right'), (80, 110), font, 1,
        (255, 255, 255), 2, cv2.LINE_AA)
    is_tracking = left_lane.detected or right_lane.detected

    cv2.putText(result, 'Tracking Locked' if is_tracking else 'Tracking Lost',
                (80, 140), font, 1, (0, 255, 0) if is_tracking else
                (255, 0, 0), 3, cv2.LINE_AA)

    cache['left'] = left_lane
    cache['right'] = right_lane

    return result