def draw_window_centroids(img, window_centroids): # If we found any window centers if len(window_centroids) > 0: # Points used to draw all the left and right windows l_points = np.zeros_like(img) r_points = np.zeros_like(img) # Go through each level and draw the windows for level in range(0, len(window_centroids)): # Window_mask is a function to draw window areas l_mask = window_mask(window_width, window_height, img, window_centroids[level][0], level) r_mask = window_mask(window_width, window_height, img, window_centroids[level][1], level) # Add graphic points from window mask here to total pixels found l_points[(l_points == 255) | ((l_mask == 1))] = 255 r_points[(r_points == 255) | ((r_mask == 1))] = 255 # Draw the results template = np.array( r_points + l_points, np.uint8) # add both left and right window pixels together zero_channel = np.zeros_like(template) # create a zero color channel template = np.array(cv2.merge((zero_channel, template, zero_channel)), np.uint8) # make window pixels green warpage = np.array( to_RGB(img), np.uint8) # making the original road pixels 3 color channels output = cv2.addWeighted( warpage, 1.0, template, 0.5, 0.0) # overlay the orignal road image with window results # If no window centers found, just display original road image else: output = np.array(cv2.merge((img, img, img)), np.uint8) return output
elif first_round: test_image = cv2.imread('test_images/straight_lines1.jpg') gray = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY) undistort = rmv_distortion.undistort(test_image) #works for straight road #sample_bound_box = np.array([317, 640, 990, 646, 490, 522, 801, 524]) # curved road sample_bound_box = np.array([390, 600, 940, 590, 560, 480, 745, 470], np.float32) shape = (1280, 720) phi = horizontal_shift(sample_bound_box[:4], sample_bound_box[4:8]) transformed_lower, transformed_upper = center_shift_points( sample_bound_box[:4], sample_bound_box[4:8], shape, phi) transformed = transform_perspective( to_RGB(bit_and_transform(undistort)), np.float32(sample_bound_box.reshape((4, 2))), np.float32( np.stack([transformed_lower, transformed_upper]).reshape( (4, 2))), shape) plt.imshow(transformed, cmap='gray') plt.show() elif second_round: test_image = mpimg.imread('test_images/test2.jpg') gray = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY) undistort = rmv_distortion.undistort(test_image) transformed = transform_perspective2(undistort) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9)) f.tight_layout() ax1.imshow(undistort, cmap='gray')
def pipeline(img): global left_line global right_line global counter rmv_distortion = RemoveDistortion() rmv_distortion.load_pickle() undistort = rmv_distortion.undistort(img) # transform into bird-eye perspective src = np.array([390, 600, 940, 590, 560, 480, 745, 470], np.float32).reshape((4, 2)) dst = np.array([455, 650, 870, 670, 460, 350, 905, 360], np.float32).reshape((4, 2)) M = cv2.getPerspectiveTransform(src, dst) Minv = cv2.getPerspectiveTransform(dst, src) boolean = np.logical_or(bit_and_transform(undistort), hls_decision_rule(undistort)) warped = cv2.warpPerspective(to_RGB(boolean), M, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)[:, :, 0] # remove irrelevant regions warped[-200:, :370] = False # lower left warped[:550, :300] = False # top left warped[-300:, -300:] = False # lower right warped[:450, -300:] = False # top right warped[-20:, :] = False # remove bottom 20 pixels # find centroids and transform back if (left_line.current_fit is None and right_line.current_fit is None) or counter > 5: centroids = find_window_centroids(warped, window_width, window_height, margin) counter = 0 else: try: centroids = Line.local_search(warped, left_line.centroid, right_line.centroid, left_line.extrap.predict_xfitted(), right_line.extrap.predict_xfitted()) counter += 1 except ValueError: print('exception used') centroids = find_window_centroids(warped, window_width, window_height, margin) counter = 0 # add in new values left_centroid = centroids[:, 0] right_centroid = centroids[:, 1] # update dependent values offset = distance_from_center_lane(centroids, img, Minv) # initial attempt left_line.next_fit(left_centroid, right_centroid, offset) right_line.next_fit(right_centroid, left_centroid, offset) # Fit a second order polynomial to each bounded = bound_lanes(np.uint8(warped), left_line.get_best_fit(), right_line.get_best_fit()) bounds_img = cv2.warpPerspective(bounded, Minv, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR) bounded_lanes = cv2.addWeighted(np.uint8(img), 1, to_RGB(bounds_img), 0.8, 0) # find radius curvature and offset left_curverad, right_curverad = radius_curvature(img, centroids) left_line.set_curverad(left_curverad) right_line.set_curverad(right_curverad) offset = distance_from_center_lane(centroids, img, Minv) #drawing the convolutional process for diagnostic purposes convolutional_process = draw_window_centroids(np.float32(warped), centroids) return bounded_lanes, warped, convolutional_process, ( left_curverad, right_curverad), offset
def insert_diag_into(frame, diag, x_slice, y_slice): # should take in upper left and lower right pixel location x_shape = x_slice.stop - x_slice.start y_shape = y_slice.stop - y_slice.start frame[x_slice, y_slice] = cv2.resize(to_RGB(diag), (y_shape, x_shape), interpolation=cv2.INTER_AREA)
bounding_box[0][1]:bounding_box[1][1], bounding_box[0][0]:bounding_box[1][0]] print("mean BGR is: ") print(np.mean(pixels_of_interest.reshape((-1, 3)), axis=0)) print("mean HLS is: ") print(np.mean(hls_pixels_of_interest.reshape((-1, 3)), axis=0)) print(np.std(hls_pixels_of_interest.reshape((-1, 3)), axis=0)) elif test_lane_colors: cv2.namedWindow('original') cv2.imshow('original', img) cv2.namedWindow('image Hue') cv2.imshow('image Hue', to_RGB(threshold_hue(img, (70, 180)))) cv2.namedWindow('img saturation') cv2.imshow('img saturation', to_RGB(threshold_saturation( img, (80, 255)))) #saturation great for yellow # great for white lines too... cv2.namedWindow('combined rule') cv2.imshow('combined rule', to_RGB(hls_decision_rule(img))) cv2.namedWindow('combined and sobel') cv2.imshow('combined and sobel', to_RGB((img[:, :, 2] > 230) & (img[:, :, 1] > 200))) cv2.namedWindow('sobel_mag')
first procedure I tested-- to be deprecated """ test = cv2.imread('test_images/straight_lines2.jpg') rmv_distortion = RemoveDistortion() rmv_distortion.load_pickle() undistort = rmv_distortion.undistort(test) binary_img = or_decision_rule( undistort ) # a more robust version using hlv color space will be added cv2.namedWindow('original img') cv2.imshow('original img', undistort) cv2.namedWindow('binary img') cv2.imshow('binary img', to_RGB(binary_img)) cv2.namedWindow('binary img transform') cv2.imshow('binary img transform', default_transform_perspective(to_RGB(binary_img))) shifted_perspective = default_transform_perspective(to_RGB(binary_img)) #plt.imshow(shifted_perspective) #plt.show() centroids = find_window_centroids( shifted_perspective, window_width, window_height, margin) # find centroids using default #print(centroids) # draw_window_centroids(shifted_perspective[:,:,0], centroids) corrected_centroids = np.array([[358., 972.], [359.,