def pipeline_heat(image, return_images=False): global heat hot_windows_all = [] # Cool down the heat map # print('heat before cooling', np.unique(heat)) heat[heat > pars.max_heat] = pars.max_heat heat[heat > 0] -= 1 heat[heat < 0] = 0 # print('heat after cooling', np.unique(heat)) for ii in pars.combinations['num_window']: window_size = pars.combinations['window_size'][ii] color = pars.combinations['color'][ii] overlap = pars.combinations['overlap'][ii] y_limit = pars.combinations['y_limit'][ii] hot_windows, _ = pipeline_search(image, y_limit, window_size, overlap, color) # Add heat to each box in box list heat = hm.add_heat(heat, hot_windows) hot_windows_all = hot_windows_all + hot_windows # Apply threshold to help remove false positives # print('heat before thres', np.unique(heat)) heat = hm.apply_threshold(heat, pars.heatmap_threshold) # print('heat after thres', np.unique(heat)) # Visualize the heatmap when displaying heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function labels = label(heatmap) image_labeled = hm.draw_labeled_bboxes(np.copy(image), labels) #Restart every cycle # heat = np.zeros_like( heat ) image_with_bboxes = draw_boxes(image, hot_windows_all, color=color, thick=6) # plt.figure() # plt.imshow(heatmap, cmap='hot') if return_images: return image_labeled, image_with_bboxes, heatmap else: return image_labeled
def process_image_vedet(image): dst = process_image_lane_detect(image) draw_image = np.copy(dst) # Uncomment the following line if you extracted training # data from .png images (scaled 0 to 1 by mpimg) and the # image you are searching is a .jpg (scaled 0 to 255) image = image.astype(np.float32) / 255 windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop, xy_window=(96, 96), xy_overlap=(0.8, 0.8)) hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat) #hot_windows = [item for sublist in hot_windows for item in sublist] # add hot windows to history if len(hot_windows) > 0: memory.add_rect(hot_windows) window_img = draw_boxes(draw_image, hot_windows, color=(0, 255, 255), thick=1) heat = np.zeros_like(image[:, :, 0]).astype(np.float) for rect in memory.previous_rects: heat = add_heat(heat, rect) # Apply threshold to help remove false positives heat = apply_threshold(heat, len(memory.previous_rects)) # Visualize the heatmap when displaying heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function labels = label(heatmap) draw_img = draw_labeled_bboxes(window_img, labels) return draw_img
def handle_image(self, img): # if self.count < 170: # self.count += 1 # return img alpha = 0.3 #scipy.misc.imsave('input_images/' + str(self.count) + '.jpg', img) if self.heat is None: self.heat = np.zeros_like(img[:, :, 0]).astype(np.float) else: self.heat = self.heat * (1.0 - alpha) heat = np.zeros_like(img[:, :, 0]).astype(np.float) image = img.astype(np.float32) / 255. boxes = sliding_window.search_classify(image, clf) draw_image = np.copy(image) window_img = sliding_window.draw_boxes(draw_image, boxes, color=(0, 0, 1.0), thick=6) scipy.misc.imsave('output_images/boxes_' + str(self.count) + '.jpg', window_img) heat = heatmap.add_heat(heat, boxes) #self.heatmaps.append(heat) self.heat += heat * alpha # Apply threshold to help remove false positives hm = heatmap.apply_threshold(np.copy(self.heat), 0.6) # Visualize the heatmap when displaying hm = np.clip(hm, 0, 255) scipy.misc.imsave('output_images/map_' + str(self.count) + '.jpg', self.heat) # Find final boxes from heatmap using label function labels = label(hm) draw_img = heatmap.draw_labeled_bboxes(image, labels) scipy.misc.imsave('output_images/' + str(self.count) + '.jpg', draw_img) self.count += 1 return draw_img * 255.0
def process_single_image(self, image): # 1. extract all potential hits hot_windows = search_windows(image, self.windows, self.classifier, self.scaler) # 2. combine duplicate detections by creating a heatmap current_heatmap = np.zeros_like(image[:, :, 0]).astype(np.float) current_heatmap = add_heat(current_heatmap, hot_windows) # 3. threshold the heatmap to remove false positives and duplicate detections current_heatmap_thresh = apply_threshold(current_heatmap, self.threshold_single) # 4. Determine the number of vehicles and their position by identifying the positions and regions in the heatmap if self.heatmap is None: # There is no previous frame heat map so just use blank images current_heatmap_combined = np.zeros_like(image[:, :, 0]).astype(np.float) current_heatmap_combined_thresh = current_heatmap_combined labels = label(current_heatmap_thresh) self.heatmap = current_heatmap_thresh else: # use a smoothing factor to combine the current and previous frame heat map current_heatmap_combined = self.heatmap * self.smoothing +\ current_heatmap_thresh * (1 - self.smoothing) # apply a different threshold to the combined heatmap current_heatmap_combined_thresh = apply_threshold( current_heatmap_combined, self.threshold_combined) labels = label(current_heatmap_combined_thresh) self.heatmap = current_heatmap_combined_thresh # 5. draw the bounding boxes of the detected regions in the original image/frame window_hot = draw_labeled_bboxes(np.copy(image), labels) return window_hot
color=(0, 0, 255), thick=1) heat = np.zeros_like(image[:, :, 0]).astype(np.float) heat = add_heat(heat, hot_windows) # Apply threshold to help remove false positives heat = apply_threshold(heat, 3) # Visualize the heatmap when displaying heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function labels = label(heatmap) print(labels[1], 'cars found') mpimg.imsave(outimgpath + "heatmap_" + os.path.splitext(os.path.basename(filename))[0] + ".png", labels[0]) # jpg write not possible, use png draw_img = draw_labeled_bboxes(window_img, labels) mpimg.imsave(outimgpath + "candidate_" + os.path.splitext(os.path.basename(filename))[0] + ".png", draw_img) # jpg write not possible, use png # mpimg.imsave('candidates.png', window_img) print('processed example img ', filename) else: video_input00 = 'test_video.mp4' video_input01 = 'project_video.mp4' video_output00 = 'output_videos/test_video_output.mp4' video_output01 = 'output_videos/project_video_output.mp4' videoclip00 = VideoFileClip(video_input00) #videoclip01 = VideoFileClip(video_input01).subclip(40,51) videoclip01 = VideoFileClip(video_input01) #.subclip(0,10) processed_video00 = videoclip00.fl_image(process_image_vedet)
def videopipe(video_image): # this is the way to treat each videoframe video_image_png = video_image.astype(np.float32) / 255 draw_image = np.copy(video_image) # print(str(np.amax(draw_image))) ''' # For the output of a single image: test_file_name = 'test1.jpg' test_file_folder = '../test_images/' test_file = test_file_folder+test_file_name test_image_name = test_file_name+'_LUVonlySpat.jpg' test_image_png = mpimg.imread(test_file) test_image = test_image_png.astype(np.float32)/255 draw_image = np.copy(test_image) ''' # Search for matches in image: hot_windows_spat = detect.search_windows(video_image_png, windows, svc_spat, X_scaler_spat, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=False, hog_feat=False) hot_windows_hist = detect.search_windows(video_image_png, windows, svc_hist, X_scaler_hist, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=spatial_feat, hist_feat=hist_feat, hog_feat=False) hot_windows_hog = detect.search_windows(video_image_png, windows, svc_hog, X_scaler_hog, color_space=color_space, spatial_size=spatial_size, hist_bins=hist_bins, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, hog_channel=hog_channel, spatial_feat=False, hist_feat=False, hog_feat=hog_feat) #print("hot_windows_spat: "+str(len(hot_windows_spat))) #print("hot_windows_hist: "+str(len(hot_windows_hist))) #print("hot_windows_hog: "+str(len(hot_windows_hog ))) hot_windows = [] hot_windows.extend(hot_windows_spat) hot_windows.extend(hot_windows_hist) hot_windows.extend(hot_windows_hog) burning_windows = len(hot_windows) font = cv2.FONT_HERSHEY_SIMPLEX # ''' # for debugging: # Drawing the identified rectangles on the image window_img = lesson_functions.draw_boxes(draw_image, hot_windows_spat, color=(255, 0, 255), thick=2) cv2.putText(window_img, str(nnn), (50, 100), font, 1.5, (255, 255, 255), 2, cv2.LINE_AA) cv2.putText(window_img, "hot windows found: " + str(len(hot_windows_spat)), (150, 100), font, 1.5, (255, 0, 255), 2, cv2.LINE_AA) cv2.putText(window_img, "spatial bins: " + str(spatial_size), (100, 150), font, 1, (255, 0, 255), 2, cv2.LINE_AA) cv2.imwrite(folder + '/spat/spat_' + str(nnn) + '.jpg', window_img) window_img = lesson_functions.draw_boxes(draw_image, hot_windows_hist, color=(0, 255, 255), thick=2) cv2.putText(window_img, str(nnn), (50, 100), font, 1.5, (255, 255, 255), 2, cv2.LINE_AA) cv2.putText(window_img, "hot windows found: " + str(len(hot_windows_hist)), (150, 100), font, 1.5, (0, 255, 255), 2, cv2.LINE_AA) cv2.putText(window_img, "hist_bins: " + str(hist_bins), (100, 150), font, 1, (0, 255, 255), 2, cv2.LINE_AA) cv2.imwrite(folder + '/hist/hist_' + str(nnn) + '.jpg', window_img) window_img = lesson_functions.draw_boxes(draw_image, hot_windows_hog, color=(255, 255, 0), thick=2) cv2.putText(window_img, str(nnn), (50, 100), font, 1.5, (255, 255, 255), 2, cv2.LINE_AA) cv2.putText(window_img, "hot windows found: " + str(len(hot_windows_hog)), (150, 100), font, 1.5, (255, 255, 0), 2, cv2.LINE_AA) cv2.putText( window_img, "orient: " + str(orient) + " pix_per_cell: " + str(pix_per_cell) + " cell_per_block: " + str(cell_per_block) + " hog_channel: " + str(hog_channel), (100, 150), font, 1, (255, 255, 0), 2, cv2.LINE_AA) cv2.imwrite(folder + '/hog/hog_' + str(nnn) + '.jpg', window_img) window_img = lesson_functions.draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=2) cv2.putText(window_img, str(nnn), (50, 100), font, 1.5, (255, 255, 255), 2, cv2.LINE_AA) cv2.putText(window_img, "hot windows found: " + str(len(hot_windows)), (150, 100), font, 1.5, (0, 0, 255), 2, cv2.LINE_AA) cv2.putText( window_img, "abs_threshold: " + str(abs_threshold) + " dyn_lim: " + str(dyn_lim) + " = " + str(int(dyn_lim * len(hot_windows))), (100, 150), font, 1, (0, 0, 255), 2, cv2.LINE_AA) cv2.putText( window_img, "spat: " + str(len(hot_windows_spat)) + " hist: " + str(len(hot_windows_hist)) + " hog: " + str(len(hot_windows_hog)), (100, 200), font, 1, (0, 0, 255), 2, cv2.LINE_AA) # document single image output # ''' global nnn nnn = nnn + 1 # consolidate heatmap # threshold = 6 # dyn_threshold = int(burning_windows*0.15) # % of all frames must be overlapping object for detection dyn_threshold = int( burning_windows * dyn_lim) # % of all frames must be overlapping object for detection labels, heat_max = heatmap.heathot(hot_windows, draw_image, dyn_threshold, abs_threshold, max_threshold) # print("labels shape after: "+labels.shape) ############################## # Another filter function added with love to the Udacity reviewers ############################## # generate list of boundary boxes bbox_list = heatmap.draw_labeled_bboxes(video_image_png, labels)[1] # print("before append: "+str(history)) history.append(bbox_list) # print("after append: "+str(history)) # print(hot_windows_hist) tmp = [] for n in history: tmp.extend(n) # print("tmp = ") # print(tmp) # print(history[0].shape) if nnn > history_length + 1: labels = heatmap.heathot(tmp, draw_image, history_threshold, -1, 100)[0] # print(bbox_list) # print(len(bbox_list)) # print(np.amax(bbox_list)) # print(labels[0].shape) if (len(hot_windows_spat) > abs_limit) and (len(hot_windows_hist) > abs_limit) and ( len(hot_windows_hog) > abs_limit ): # there have to be at least some detections in each filter draw_image = heatmap.draw_labeled_bboxes(draw_image, labels)[0] # else: # draw_img = draw_image # ''' for documentation purposes add results to output image if (len(hot_windows_spat) > abs_limit) and (len(hot_windows_hist) > abs_limit) and ( len(hot_windows_hog) > abs_limit ): # there have to be at least some detections in each filter docu_img = heatmap.draw_labeled_bboxes(window_img, labels)[0] else: docu_img = window_img cv2.putText(docu_img, "heat_max: " + str(heat_max), (100, 250), font, 1, (255, 255, 255), 2, cv2.LINE_AA) cv2.imwrite(folder + '/all/all_' + str(nnn) + '.jpg', docu_img) # ''' # label each frame with a counter for debugging / threshold cv2.putText(draw_image, str(nnn), (50, 100), font, 1, (255, 255, 255), 2, cv2.LINE_AA) cv2.putText(draw_image, "hot windows found: " + str(len(hot_windows)), (150, 100), font, 1, (0, 0, 255), 2, cv2.LINE_AA) cv2.imwrite(folder + '/output/out_' + str(nnn) + '.jpg', draw_image) return draw_image
color=(0, 0, 255), thick=2) cv2.imwrite(folder + '/testpic/' + test_file_name + '_All_criteria.jpg', window_img) # document single image output # consolidate heatmap #threshold = 6 #threshold = 150 dyn_threshold = int( burning_windows * dyn_lim) # 15% of all frames must be overlapping object for detection labels, heat_max = heatmap.heathot(hot_windows, test_image, dyn_threshold, abs_threshold, max_threshold) draw_img = heatmap.draw_labeled_bboxes(test_image_png, labels)[0] cv2.imwrite(folder + '/testpic/' + test_file_name + '_heatmap.jpg', draw_img) from moviepy.editor import VideoFileClip from IPython.display import HTML from PIL import Image nnn = 0 global history #history = [] from collections import deque history = deque(maxlen=history_length) def videopipe(video_image):
def process_frame(frame, clf, norm_scaler, hog_parameters, spatial_size, hist_bins): """ Process a single frame :param frame: A single image :param clf: A classifier that calculates the probability for each sliding window to be a car or not a car :param norm_scaler: A normalization scaler for each feature vector of a sliding window :param hog_parameters: HOG parameters to be used :param spatial_size: The parameters for the spatial colour binning (a pair containing the sample size in each dimension) :param hist_bins: Number of colour histogram bins :return: The original frame + blue bounding boxes applied where cars are detected """ # Convert to the right colour space hsv_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) # Construct sliding window area definitions and calculate candidate "car" sliding windows boxes = [] window_area_def1 = SlidingWindowAreaDefinition( x_start=550, x_stop=1024, y_start=370, y_stop=498, scaleX=1.0, scaleY=1.0 ) found_cars = find_cars(hsv_frame, clf=clf, feature_scaler=norm_scaler, window_area_def=window_area_def1, hog_parameters=hog_parameters, spatial_size=spatial_size, hist_bins=hist_bins) boxes.append(found_cars) window_area_def2 = SlidingWindowAreaDefinition( x_start=530, x_stop=1144, y_start=390, y_stop=534, scaleX=1.5, scaleY=1.5 ) found_cars = find_cars(hsv_frame, clf=clf, feature_scaler=norm_scaler, window_area_def=window_area_def2, hog_parameters=hog_parameters, spatial_size=spatial_size, hist_bins=hist_bins) boxes.append(found_cars) window_area_def3 = SlidingWindowAreaDefinition( x_start=480, x_stop=1280, y_start=400, y_stop=592, scaleX=2.0, scaleY=2.0 ) found_cars = find_cars(hsv_frame, clf=clf, feature_scaler=norm_scaler, window_area_def=window_area_def3, hog_parameters=hog_parameters, spatial_size=spatial_size, hist_bins=hist_bins) boxes.append(found_cars) window_area_def4 = SlidingWindowAreaDefinition( x_start=944, x_stop=1280, y_start=380, y_stop=620, scaleX=3.0, scaleY=2.5 ) found_cars = find_cars(hsv_frame, clf=clf, feature_scaler=norm_scaler, window_area_def=window_area_def4, hog_parameters=hog_parameters, spatial_size=spatial_size, hist_bins=hist_bins) boxes.append(found_cars) window_area_def5 = SlidingWindowAreaDefinition( x_start=896, x_stop=1280, y_start=396, y_stop=636, scaleX=4.0, scaleY=3.0 ) found_cars = find_cars(hsv_frame, clf=clf, feature_scaler=norm_scaler, window_area_def=window_area_def5, hog_parameters=hog_parameters, spatial_size=spatial_size, hist_bins=hist_bins) boxes.append(found_cars) boxes = [item for sublist in boxes for item in sublist] # Create a heatmap to prune false detections heat = np.zeros_like(frame[:, :, 0]).astype(np.float) heat, single_frame_heat = add_heat(heat, boxes, heatmap_history) heatmap_history.append(single_frame_heat) heat = apply_threshold(heat, 5.5) # Label the detections in the heatmaps (based on neighbouring strictly positive numbers) labels = label(heat) result_img = np.copy(frame) # Draw blue bounding boxes around the pixels with the same labels result_img = draw_labeled_bboxes(result_img, labels) return result_img