def renderLaneLines(image): # Build the geometry of the detected lines laneLines = buildLines(image) h = image.shape[0] w = image.shape[1] # Make a mask on which to draw just the lines mask = np.zeros(shape=(h, w)) cv2.line(mask, (laneLines[0][0], laneLines[0][1]), (laneLines[0][2], laneLines[0][3]), [255, 0, 0], 4) cv2.line(mask, (laneLines[1][0], laneLines[1][1]), (laneLines[1][2], laneLines[1][3]), [255, 0, 0], 4) # Cull the mask with region of interest # We want the bottom 40% ish of the image p1 = (10, h - 10) p2 = (10, int(h * 0.6)) p3 = (w - 10, int(h * 0.6)) p4 = (w - 10, h - 10) cullVerts = np.array([[p1, p2, p3, p4]], dtype=np.int32) mask = region_of_interest(mask, cullVerts) rgbMask = np.uint8(mask) # Make the mask 3 channel, but the contents red if len(rgbMask.shape) is 2: rgbMask = np.dstack( (rgbMask, np.zeros_like(rgbMask), np.zeros_like(rgbMask))) return weighted_img(rgbMask, image, 0.6, 1.0, 0)
def process_image(image): # Pull out the x and y sizes and make a copy of the image ysize = image.shape[0] xsize = image.shape[1] region_select = np.copy(image) # Convert to Grayscale image_gray=helpers.grayscale(image) # Blurring blurred_image = helpers.gaussian_blur(image_gray, parameters.Blurring.kernel_size) # Canny Transform edges = helpers.canny(blurred_image, parameters.Canny.low_threshold, parameters.Canny.high_threshold) # Four sided polygon to mask imshape = image.shape lower_left = (50, imshape[0]) upper_left = (400, 320) upper_right = (524, 320) lower_right = (916, imshape[0]) parameters.Masking.vertices = np.array([[lower_left, upper_left, upper_right, lower_right]], dtype=np.int32) # masking masked_edges = helpers.region_of_interest(edges, parameters.Masking.vertices) # Run Hough on edge detected image hough_lines,raw_hough_lines_img = helpers.hough_lines(masked_edges, parameters.Hough.rho, parameters.Hough.theta, parameters.Hough.threshold, parameters.Hough.min_line_length, parameters.Hough.max_line_gap) # classify left and right lane lines left_lane_lines, right_lane_lines = helpers.classify_left_right_lanes(hough_lines) # Raw hough_lines image helpers.draw_lines(raw_hough_lines_img, hough_lines, color=[255, 0, 0], thickness=2) # RANSAC fit left and right lane lines fitted_left_lane_points = helpers.ransac_fit_hough_lines(left_lane_lines) fitted_right_lane_points = helpers.ransac_fit_hough_lines(right_lane_lines) helpers.draw_model(image, fitted_left_lane_points, color=[255, 0, 0], thickness=2) helpers.draw_model(image, fitted_right_lane_points, color=[255, 0, 0], thickness=2) # 1D Interpolator - does not work as good as RANSAC so its commented out # interpolated_left_lane_line = helpers.interpolate_hough_lines(left_lane_lines) # interpolated_right_lane_line = helpers.interpolate_hough_lines(left_lane_lines) # helpers.draw_model(image, interpolated_left_lane_line, color=[255, 0, 0], thickness=2) # helpers.draw_model(image, interpolated_right_lane_line, color=[255, 0, 0], thickness=2) # superpose images # superposed_image = helpers.weighted_img(image, raw_hough_lines_img, α=0.8, β=1., λ=0.) return image
def process_image(image): # NOTE: The output you return should be a color image (3 channel) for processing video below # TODO: put your pipeline here, # you should return the final output (image where lines are drawn on lanes) hsvMasked = helpers.hsvMaskConv(image) gray = helpers.grayscale(hsvMasked) # Define a kernel size and apply Gaussian smoothing kernel_size = helpers.kernel_size blur_gray = helpers.gaussian_blur(gray, kernel_size) # Define our parameters for Canny and apply low_threshold = helpers.low_threshold high_threshold = helpers.high_threshold edges = helpers.canny(blur_gray, low_threshold, high_threshold) # Next we'll isolate the region of interest to apply the Hough transform upon mask = np.zeros_like(edges) ignore_mask_color = 255 (imHeight, imWidth, __) = image.shape vertices = np.array([[(.10*imWidth,imHeight),(0.45*imWidth, 0.60*imHeight), (0.55*imWidth, 0.60*imHeight), (0.9*imWidth,imHeight)]], dtype=np.int32) masked_edges = helpers.region_of_interest(edges, vertices) # Define the Hough transform parameters # Make a blank the same size as our image to draw on rho = helpers.rho # distance resolution in pixels of the Hough grid theta = helpers.theta # angular resolution in radians of the Hough grid threshold = helpers.threshold # minimum number of votes (intersections in Hough grid cell) min_line_length = helpers.min_line_length #minimum number of pixels making up a line max_line_gap = helpers.max_line_gap # maximum gap in pixels between connectable line segments # Run Hough on edge detected image # Output "lines" is an array containing endpoints of detected line segments line_img, lines = helpers.hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap) # Draw the lines on the edge image result = helpers.weighted_img(line_img, image, α=0.8, β=1., γ=0.) return result
def process_image(image): # CONVERT TO GRAYSCALE gray = helpers.grayscale(image) # APPLY GAUSSIAN BLUR kernel_size = 7 # Must be an odd number. blur_gray = helpers.gaussian_blur(gray, kernel_size) # APPLY CANNY EDGE DETECTOR low_threshold = 70 high_threshold = 140 edges = helpers.canny(blur_gray, low_threshold, high_threshold) # DEFINE REGION OF INTEREST imshape = image.shape line_height = 330 vertices = np.array([[(0, imshape[0]), (435, line_height), (540, line_height), (imshape[1], imshape[0])]], dtype=np.int32) masked_edges = helpers.region_of_interest(edges, vertices) # APPLY HOUGH TRANSFORMATION rho = 2 # distance resolution in pixels of the Hough grid theta = np.pi / 180 # angular resolution in radians of the Hough grid threshold = 50 # minimum number of votes (intersections in Hough grid cell) min_line_length = 100 # minimum number of pixels making up a line max_line_gap = 100 # maximum gap in pixels between connectable line segments # Run Hough on edge detected image # Output "lines" is an array containing endpoints of detected line segments lines = helpers.hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap, line_height) # Draw the lines on the edge image lines_edges = helpers.weighted_img(lines, image, 0.8, 1, 0) plt.imshow(lines_edges, cmap='gray') return lines_edges
# Define our parameters for Canny and apply low_threshold = helpers.low_threshold high_threshold = helpers.high_threshold edges = helpers.canny(blur_gray, low_threshold, high_threshold) # Next we'll isolate the region of interest to apply the Hough transform upon mask = np.zeros_like(edges) ignore_mask_color = 255 (imHeight, imWidth, __) = currentImage.shape vertices = np.array([[(.10 * imWidth, imHeight), (0.45 * imWidth, 0.60 * imHeight), (0.55 * imWidth, 0.60 * imHeight), (0.9 * imWidth, imHeight)]], dtype=np.int32) masked_edges = helpers.region_of_interest(edges, vertices) # Define the Hough transform parameters # Make a blank the same size as our image to draw on rho = helpers.rho # distance resolution in pixels of the Hough grid theta = helpers.theta # angular resolution in radians of the Hough grid threshold = helpers.threshold # minimum number of votes (intersections in Hough grid cell) min_line_length = helpers.min_line_length #minimum number of pixels making up a line max_line_gap = helpers.max_line_gap # maximum gap in pixels between connectable line segments line_image = np.copy(currentImage) * 0 # creating a blank to draw lines on # Run Hough on edge detected image # Output "lines" is an array containing endpoints of detected line segments line_img, lines = helpers.hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap) color_edges = np.dstack((masked_edges, masked_edges, masked_edges))
# Canny Transform edges = helpers.canny(blurred_image, parameters.Canny.low_threshold, parameters.Canny.high_threshold) # Four sided polygon to mask imshape = image.shape lower_left = (50, imshape[0]) upper_left = (400, 320) upper_right = (524, 320) lower_right = (916, imshape[0]) parameters.Masking.vertices = np.array( [[lower_left, upper_left, upper_right, lower_right]], dtype=np.int32) # masking masked_edges = helpers.region_of_interest(edges, parameters.Masking.vertices) # Run Hough on edge detected image hough_lines, hough_image = helpers.hough_lines( masked_edges, parameters.Hough.rho, parameters.Hough.theta, parameters.Hough.threshold, parameters.Hough.min_line_length, parameters.Hough.max_line_gap) # classify left and right lane lines left_lane_lines, right_lane_lines = helpers.classify_left_right_lanes( hough_lines) # RANSAC fit left and right lane lines fitted_left_lane_points = helpers.ransac_fit_hough_lines(left_lane_lines) fitted_right_lane_points = helpers.ransac_fit_hough_lines(right_lane_lines) helpers.draw_model(image,
def buildLines(image): # Apply a gray scale grayScaleImage = grayscale(image) # Apply the gausian blur blurredImage = gaussian_blur(grayScaleImage, 15) # Apply Canny Edge detection cannyImage = canny(blurredImage, 70, 100) # Make a ROI mask h = image.shape[0] w = image.shape[1] p1 = (w / 10, h - 10) p2 = (int(w * 3 / 7), int(h * 0.6)) p3 = (int(w * 4 / 7), int(h * 0.6)) p4 = (w * 9 / 10, h - 10) # Mask canny edges with ROI cullVerts = np.array([[p1, p2, p3, p4]], dtype=np.int32) cannyImage = region_of_interest(cannyImage, cullVerts) # Get lines from Hough space rho = 2 theta = np.pi / 180 threshold = 1 minLineLength = 15 maxLineGap = 5 lines = cv2.HoughLinesP(cannyImage, rho, theta, threshold, np.array([]), minLineLength, maxLineGap) # Transform into numpy arrays for easy processing # Each array represents [x1, y1, x2, y2] nplines = [] for l in lines: nplines.append( np.array([ np.float32(l[0][0]), np.float32(l[0][1]), np.float32(l[0][2]), np.float32(l[0][3]) ])) nplines = [ l for l in nplines if 0.5 <= np.abs((l[3] - l[1]) / (l[2] - l[0])) <= 2 ] # Sort the lines based on whether they are likely to be the left or right lane marker leftLaneLines = [] rightLaneLines = [] for l in nplines: sortLine(l, leftLaneLines, rightLaneLines) # Calculate the left lane line # We use the median here because it gives us better performance for video leftLaneOffset = np.median([(l[1] - (l[3] - l[1]) * l[0] / (l[2] - l[0])) for l in leftLaneLines]).astype(int) leftLaneSlope = np.median([((l[3] - l[1]) / (l[2] - l[0])) for l in leftLaneLines]) # We use basic line algebra here, y_n = slope * x_n + offset leftLaneLine = np.array( [0, leftLaneOffset, -int(np.round(leftLaneOffset / leftLaneSlope)), 0]) # Calculate the right lane line rightLaneOffset = np.median([(l[1] - (l[3] - l[1]) * l[0] / (l[2] - l[0])) for l in rightLaneLines]).astype(int) rightLaneSlope = np.median([((l[3] - l[1]) / (l[2] - l[0])) for l in rightLaneLines]) # Must account for image origin being at the top left corner here rightLaneLine = np.array([ 0, rightLaneOffset, int(np.round( (cannyImage.shape[0] - rightLaneOffset) / rightLaneSlope)), cannyImage.shape[0] ]) return leftLaneLine, rightLaneLine