def get_frame(self): '''Gets a frame from the camera.''' self.frame_count += 1 if not self.capture and not self.image and not self.dc1394_capture: self.open_capture() if self.image: return cv.CloneImage(self.image) if self.dc1394_capture: frame = self.dc1394_capture.get_frame() else: frame = cv.QueryFrame(self.capture) # If the capture device doesn't work, OpenCV might not complain, but # just returns None when grabbing a frame. Here we check for errors, # and see if it was actualy an image, not a video that we're opening if not frame: # See if file is an image, not video if isinstance(self.identifier, basestring): try: self.image = cv.LoadImage(self.identifier) except IOError: if self.frame_count > 1: raise self.CaptureError( "The video has run out of frames.") else: raise self.CaptureError( "Either a read error occured " "with the file, or the file is not a valid video " "or image file.") if self.image: return cv.CloneImage(self.image) else: raise self.CaptureError( "This shouldn't happen. Please " " report a bug! Include this traceback!!") else: raise self.CaptureError("Could not capture frame from " 'identifier "%s"' % self.identifier) # TODO: Weird GStreamer error occured when a folder is incorrectly # specified as a video file, or when the user doesn't have # permissions for the file. #TODO: doesn't work. svr.debug doesn't accept debug request? if self.display: #cv.ShowImage(self.get_window_name(), frame) svr.debug(self.get_window_name(), frame) if self.record_path: filename = os.path.join(self.record_path, "%s.jpg" % self.frame_count) cv.SaveImage(filename, frame) return frame
def process_frame(self, frame): gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) self.candidates = self.cascade.detectMultiScale( gray, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv2.CASCADE_SCALE_IMAGE) self.candiates = cv2.detect(gray, self.cascade) vis = frame.copy() self.draw_rects(vis, self.candidates, (0, 255, 0)) svr.debug('facedetect', vis)
def get_frame(self): '''Gets a frame from the camera.''' self.frame_count += 1 if not self.capture and not self.image and not self.dc1394_capture: self.open_capture() if self.image: return cv.CloneImage(self.image) if self.dc1394_capture: frame = self.dc1394_capture.get_frame() else: frame = cv.QueryFrame(self.capture) # If the capture device doesn't work, OpenCV might not complain, but # just returns None when grabbing a frame. Here we check for errors, # and see if it was actualy an image, not a video that we're opening if not frame: # See if file is an image, not video if isinstance(self.identifier, basestring): try: self.image = cv.LoadImage(self.identifier) except IOError: if self.frame_count > 1: raise self.CaptureError("The video has run out of frames.") else: raise self.CaptureError("Either a read error occured " "with the file, or the file is not a valid video " "or image file.") if self.image: return cv.CloneImage(self.image) else: raise self.CaptureError("This shouldn't happen. Please " " report a bug! Include this traceback!!") else: raise self.CaptureError("Could not capture frame from " 'identifier "%s"' % self.identifier) # TODO: Weird GStreamer error occured when a folder is incorrectly # specified as a video file, or when the user doesn't have # permissions for the file. if self.display: #cv.ShowImage(self.get_window_name(), frame) svr.debug(self.get_window_name(), frame) if self.record_path: filename = os.path.join(self.record_path, "%s.jpg" % self.frame_count) cv.SaveImage(filename, frame) return frame
def print_frame(self, name, frame, on_svr=False): """prints out the given frame locally, or offers to stream the image via svr.""" name = "print{}: {}".format(self.step, name) # print using svr if on_svr: svr.debug(name, frame) # print using openCV else: self.debug_stream(name, frame) # increment step counter self.step += 1
def show(self): #adding positive noise to the frame noise = np.random.normal(loc = 0, scale = 4, size = self.frame.shape) noise = np.array(np.array([50, 50, 20]) * np.ones(self.frameSize) + noise,self.frame.dtype) self.frame = cv2.add(self.frame, noise) #cv2.imshow(self.name, self.frame) #cv2.imwrite("./PICS/" + self.name + str(self.count) + ".png", self.frame) self.count+= 1 #converting image to type desired by svr container = cv2.cv.fromarray(np.copy(self.frame)) cv_image = cv2.cv.GetImage(container) svr.debug(self.name, cv_image)
def process_frame(self, frame, debug=True): # Scale image to reduce processing #scale_in_place(frame, (frame.width*0.7, frame.height*0.7)) if debug: debug_frame = cv.CloneImage(frame) else: debug_frame = False # Searching State # Search for buoys, then move to tracking when they are found if self.state == "searching": trackers = self.initial_search(frame, debug_frame) if trackers: self.trackers = trackers self.state = "tracking" try: svr.debug_close("Saturation") svr.debug_close("Red") svr.debug_close("AdaptiveThreshold") except: pass # Tracking State num_buoys_found = 0 if self.state == "tracking": num_buoys_found, locations = self.buoy_track( frame, self.trackers, debug_frame) if num_buoys_found > 0: self.buoy_locations = locations if debug: svr.debug("Buoy2011", debug_frame) # Convert to output format self.output.buoys = [] for location in self.buoy_locations: buoy = Container() buoy.theta = location[0] * (34 / (frame.width / 2)) buoy.phi = location[1] * (30 / (frame.height / 2) ) # Complete guess on vertical fov buoy.id = 1 self.output.buoys.append(buoy) self.return_output()
def process_frame(self, frame): buoy_locations = [] # Scale image to reduce processing ''' scale = 0.7 frame_scaled = cv.CreateImage((int(frame.width*scale), int(frame.height*scale)), 8, 3) cv.Resize(frame, frame_scaled) cv.SetImageROI(frame, (0, 0, int(frame.width*scale), int(frame.height*scale))) ''' # Create debug_frame if self.debug: debug_frame = cv.CloneImage(frame) else: debug_frame = False # Look for buoys if there aren't any trackers yet if not self.trackers: self.trackers = self.initial_search(frame, debug_frame) if self.trackers: buoy_locations = map(lambda x: adjust_location(x.object_center, frame.width, frame.height), self.trackers) if debug_frame: svr.debug("Buoy", debug_frame) return # Tracking else: num_buoys_found, buoy_locations = self.buoy_track(frame, self.trackers, debug_frame) if debug_frame: svr.debug("Buoy", debug_frame) # Convert to output format self.output.buoys = [] for location in buoy_locations: buoy = Container() buoy.theta = location[0] buoy.phi = location[1] buoy.id = 1 self.output.buoys.append(buoy) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame): buoy_locations = [] # Create debug_frame if self.debug: debug_frame = cv.CloneImage(frame) else: debug_frame = False # Look for buoys if there aren't any trackers yet if not self.trackers: self.trackers = self.initial_search(frame, debug_frame) if self.trackers: buoy_locations = map(lambda x: adjust_location(x.object_center, frame.width, frame.height), self.trackers) if debug_frame: svr.debug("Buoy", debug_frame) return # Tracking else: num_buoys_found, buoy_locations = self.buoy_track(frame, self.trackers, debug_frame) # Debug info if debug_frame: for tracker in self.trackers: print "Drawing Circle!!!!", tracker.object_center cv.Circle(debug_frame, (int(tracker.object_center[0]), int(tracker.object_center[1])), tracker.size[0], (0, 0, 255), 2) if debug_frame: svr.debug("Buoy", debug_frame) # Convert to output format self.output.buoys = [] for location in buoy_locations: buoy = Container() buoy.theta = location[0] buoy.phi = location[1] buoy.id = 1 self.output.buoys.append(buoy) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame, debug=True): # Scale image to reduce processing #scale_in_place(frame, (frame.width*0.7, frame.height*0.7)) if debug: debug_frame = cv.CloneImage(frame) else: debug_frame = False # Searching State # Search for buoys, then move to tracking when they are found if self.state == "searching": trackers = self.initial_search(frame, debug_frame) if trackers: self.trackers = trackers self.state = "tracking" try: svr.debug_close("Saturation") svr.debug_close("Red") svr.debug_close("AdaptiveThreshold") except: pass # Tracking State num_buoys_found = 0 if self.state == "tracking": num_buoys_found, locations = self.buoy_track(frame, self.trackers, debug_frame) if num_buoys_found > 0: self.buoy_locations = locations if debug: svr.debug("Buoy2011", debug_frame) # Convert to output format self.output.buoys = [] for location in self.buoy_locations: buoy = Container() buoy.theta = location[0] * (34 / (frame.width / 2)) buoy.phi = location[1] * (30 / (frame.height / 2)) # Complete guess on vertical fov buoy.id = 1 self.output.buoys.append(buoy) self.return_output()
def process_frame(self, frame): '''Process the frame and search for path markers. Keyword Arguments: frame -- image in cv format to process ''' binary_img = self.preprocess(frame) grad = self.get_gradient(binary_img) hough_p = self.hough_p(grad) # Do not continue if hough_p is None type, it will cause errors. if hough_p is None: self.output = [] svr.debug("Paths", frame) return groups = self.group_lines(hough_p) self.output = groups self.do_debug(frame, groups) return
def do_debug(self, frame, groups): '''Create some feedback for people to look at for debugging. Input is really what every we happen to need or want to test. First this function finds the location of the center of the frame. Then it uses sin and cos to produce endpoints for lines showing each angle in 'groups'. Keyword Arguments: frame -- original image ''' width, height = cv.GetSize(frame) center_x = width // 2 center_y = height // 2 radius = (center_x ** 2 + center_y ** 2) ** .5 for theta in groups: x_off = np.int32(radius * math.cos(theta)) y_off = np.int32(radius * math.sin(theta)) cv.Line(frame, (center_x - x_off, center_x - y_off),\ (center_x + x_off, center_y + y_off), (255, 255, 255)) svr.debug("Paths", frame)
def process_frame(self, frame): #----------- BLOCK 1: FIND NEW BUOYS ---------- # # find new buoys new_buoys = self.find_buoys(frame) # send found buoys back to manager buoy_data = WorkerData() buoy_data.new_buoys = new_buoys self.send_message(buoy_data) #---------- BLOCK 2: DEBUG ----------- # if self.debug: # wait for debug information from parent debug_info = self.wait_for_parent(None) # display confirmed buoys for confirmed in debug_info.confirmed: if debug_info.camera == LEFT: x = confirmed.lx y = confirmed.ly if debug_info.camera == RIGHT: x = confirmed.rx y = confirmed.ry w = confirmed.width x = int(x) y = int(y) # draw rectangles on frame cv.Rectangle(frame, (x, y), (x + w, y + w), confirmed.debug_color, thickness=6) # show debug frame svr.debug("Buoy" + self.camera_name, frame)
def find_blobs(self, frame, debug_image): '''Find blobs in an image. Hopefully this gets blobs that correspond with buoys, but any intelligent checking is done outside of this function. ''' # Get Channels hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) saturation = libvision.misc.get_channel(hsv, 1) red = libvision.misc.get_channel(frame, 2) # Adaptive Threshold cv.AdaptiveThreshold( saturation, saturation, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.saturation_adaptive_thresh_blocksize - self.saturation_adaptive_thresh_blocksize % 2 + 1, self.saturation_adaptive_thresh, ) cv.AdaptiveThreshold( red, red, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY, self.red_adaptive_thresh_blocksize - self.red_adaptive_thresh_blocksize % 2 + 1, -1 * self.red_adaptive_thresh, ) kernel = cv.CreateStructuringElementEx(9, 9, 4, 4, cv.CV_SHAPE_ELLIPSE) cv.Erode(saturation, saturation, kernel, 1) cv.Dilate(saturation, saturation, kernel, 1) cv.Erode(red, red, kernel, 1) cv.Dilate(red, red, kernel, 1) buoys_filter = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.And(saturation, red, buoys_filter) if debug_image: svr.debug("Saturation", saturation) svr.debug("Red", red) svr.debug("AdaptiveThreshold", buoys_filter) # Get blobs labeled_image = cv.CreateImage(cv.GetSize(buoys_filter), 8, 1) blobs = libvision.blob.find_blobs(buoys_filter, labeled_image, MIN_BLOB_SIZE, 10) return blobs, labeled_image
def find_blobs(self, frame, debug_image): '''Find blobs in an image. Hopefully this gets blobs that correspond with buoys, but any intelligent checking is done outside of this function. ''' # Get Channels hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) saturation = libvision.misc.get_channel(hsv, 1) red = libvision.misc.get_channel(frame, 2) # Adaptive Threshold cv.AdaptiveThreshold(saturation, saturation, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.saturation_adaptive_thresh_blocksize - self.saturation_adaptive_thresh_blocksize % 2 + 1, self.saturation_adaptive_thresh, ) cv.AdaptiveThreshold(red, red, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY, self.red_adaptive_thresh_blocksize - self.red_adaptive_thresh_blocksize % 2 + 1, -1 * self.red_adaptive_thresh, ) kernel = cv.CreateStructuringElementEx(9, 9, 4, 4, cv.CV_SHAPE_ELLIPSE) cv.Erode(saturation, saturation, kernel, 1) cv.Dilate(saturation, saturation, kernel, 1) cv.Erode(red, red, kernel, 1) cv.Dilate(red, red, kernel, 1) buoys_filter = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.And(saturation, red, buoys_filter) if debug_image: svr.debug("Saturation", saturation) svr.debug("Red", red) svr.debug("AdaptiveThreshold", buoys_filter) # Get blobs labeled_image = cv.CreateImage(cv.GetSize(buoys_filter), 8, 1) blobs = libvision.blob.find_blobs(buoys_filter, labeled_image, MIN_BLOB_SIZE, 10) return blobs, labeled_image
def process_frame(self, frame): found_path = False cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # use RGB color finder binary = libvision.cmodules.target_color_rgb.find_target_color_rgb( frame, self.R, self.G, self.B, self.min_blob_size, self.dev_thresh, self.precision / 1000.0) if self.debug: color_filtered = cv.CloneImage(binary) svr.debug("color_filtered", color_filtered) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) lines = lines[:self.lines_to_consider] # Limit number of lines # If there are at least 2 lines and they are close to parallel... # There's a path! #print lines if len(lines) >= 2: # Find: min, max, average theta_max = lines[0][1] theta_min = lines[0][1] total_theta = 0 for rho, theta in lines: total_theta += theta if theta_max < theta: theta_max = theta if theta_min > theta: theta_min = theta theta_range = theta_max - theta_min # Near vertical angles will wrap around from pi to 0. If the range # crosses this vertical line, the range will be way too large. To # correct for this, we always take the smallest angle between the # min and max. if theta_range > math.pi / 2: theta_range = math.pi - theta_range if theta_range < self.theta_threshold: found_path = True angles = map(lambda line: line[1], lines) self.theta = circular_average(angles, math.pi) print found_path if found_path: self.seen_in_a_row += 1 else: self.seen_in_a_row = 0 # stores whether or not we are confident about the path's presence object_present = False print self.seen_in_a_row ### if self.seen_in_a_row >= self.seen_in_a_row_threshold: object_present = True self.image_coordinate_center = self.find_centroid(binary) # Move the origin to the center of the image self.center = (self.image_coordinate_center[0] - frame.width / 2, self.image_coordinate_center[1] * -1 + frame.height / 2) if self.debug: # Show color filtered color_filtered_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(color_filtered, color_filtered_rgb, cv.CV_GRAY2RGB) cv.SubS(color_filtered_rgb, (255, 0, 0), color_filtered_rgb) cv.Sub(frame, color_filtered_rgb, frame) # Show edges binary_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(binary, binary_rgb, cv.CV_GRAY2RGB) cv.Add(frame, binary_rgb, frame) # Add white to edge pixels cv.SubS(binary_rgb, (0, 0, 255), binary_rgb) cv.Sub(frame, binary_rgb, frame) # Remove all but Red # Show lines if self.seen_in_a_row >= self.seen_in_a_row_threshold: rounded_center = ( int(round(self.image_coordinate_center[0])), int(round(self.image_coordinate_center[1])), ) cv.Circle(frame, rounded_center, 5, (0, 255, 0)) libvision.misc.draw_lines(frame, [(frame.width / 2, self.theta)]) else: libvision.misc.draw_lines(frame, lines) #cv.ShowImage("Path", frame) svr.debug("Path", frame) # populate self.output with infos self.output.found = object_present self.output.theta = self.theta if self.center: # scale center coordinates of path based on frame size self.output.x = self.center[0] / (frame.width / 2) self.output.y = self.center[1] / (frame.height / 2) libvision.misc.draw_linesC(frame, [(frame.width / 2, self.output.theta)], [255, 0, 255]) print "Output Returned!!! ", self.output.theta else: self.output.x = None self.output.y = None print "No output..." if self.output.found and self.center: print self.output self.return_output()
def process_frame(self, frame): # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_hedge = False test_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, test_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have value channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 2) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_ELLIPSE) #cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 4) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges #cv.Canny(binary, binary, 30, 40) # Hough Transform ''' line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) ''' # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_PROBABILISTIC, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=self.min_length, param2=self.max_gap) self.hor_lines = [] for line in raw_lines: slope = line_slope(line[0], line[1]) if slope is None: continue if math.fabs(line_slope(line[0], line[1])) < self.hor_threshold: self.hor_lines.append(line) max_length = 0 for line in self.hor_lines: print line if math.fabs(line_distance(line[0], line[1])) > max_length: max_length = math.fabs(line_distance(line[0], line[1])) crossbar_seg = line ''' # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi-self.vertical_threshold: vertical_lines.append( (abs(line[0]), line[1]) ) # Group vertical lines vertical_line_groups = [] # A list of line groups which are each a line list for line in vertical_lines: group_found = False for line_group in vertical_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi/2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append( (abs(line[0]), line[1]) ) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 #TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / tan(radians(theta/2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1*horizontal_lines[0][0] + frame.height/2) / (frame.height/2) * 32 self.crossbar_depth = self.r * atan(radians(bar_phi)) else: self.crossbar_depth = None ''' self.left_pole = None self.right_pole = None self.seen_crossbar = False self.crossbar_depth = None if self.debug and max_length != 0: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) #libvision.misc.draw_lines(frame, vertical_lines) #libvision.misc.draw_lines(frame, horizontal_lines) # for line in raw_lines: # cv.Line(frame,line[0],line[1], (255,255,0), 10, cv.CV_AA, 0) # cv.Circle(frame, line[1], 15, (255,0,0), 2,8,0) # print len(raw_lines) #cv.ShowImage("Hedge", cv.CloneImage(frame)) if (crossbar_seg[0][0] - frame.width / 2) * 37 / ( frame.width / 2) < (crossbar_seg[1][0] - frame.width / 2) * 37 / (frame.width / 2): self.left_pole = round((crossbar_seg[0][0] - frame.width / 2) * 37 / (frame.width / 2)) self.right_pole = round( (crossbar_seg[1][0] - frame.width / 2) * 37 / (frame.width / 2)) else: self.left_pole = round((crossbar_seg[1][0] - frame.width / 2) * 37 / (frame.width / 2)) self.right_pole = round( (crossbar_seg[0][0] - frame.width / 2) * 37 / (frame.width / 2)) self.crossbar_depth = round( -1 * (crossbar_seg[1][1] - frame.height / 2) * 36 / (frame.height / 2)) if math.fabs(self.left_pole) <= 37 and math.fabs( self.left_pole) >= self.frame_boundary_thresh: self.left_pole = None if math.fabs(self.right_pole) <= 37 and math.fabs( self.right_pole) >= self.frame_boundary_thresh: self.right_pole = None self.seen_crossbar = True if self.left_pole and self.right_pole: self.returning = (self.left_pole + self.right_pole) / 2 print "Returning ", self.returning if self.last_seen < 0: self.last_center = None self.last_seen = 0 if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_pole = None cv.Line(frame, crossbar_seg[0], crossbar_seg[1], (255, 255, 0), 10, cv.CV_AA, 0) if self.left_pole and crossbar_seg[0][0] < crossbar_seg[1][0]: cv.Line(frame, crossbar_seg[0], (crossbar_seg[0][0], crossbar_seg[0][0] - 500), (255, 0, 0), 10, cv.CV_AA, 0) elif self.left_pole: cv.Line(frame, crossbar_seg[1], (crossbar_seg[1][0], crossbar_seg[1][1] - 500), (255, 0, 0), 10, cv.CV_AA, 0) if self.right_pole and crossbar_seg[0][0] > crossbar_seg[1][0]: cv.Line(frame, crossbar_seg[0], (crossbar_seg[0][0], crossbar_seg[0][0] - 500), (255, 0, 0), 10, cv.CV_AA, 0) elif self.right_pole: cv.Line(frame, crossbar_seg[1], (crossbar_seg[1][0], crossbar_seg[1][1] - 500), (255, 0, 0), 10, cv.CV_AA, 0) # populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole #self.output.r = self.r self.output.crossbar_depth = self.crossbar_depth self.return_output() print self else: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) svr.debug("Hedge", cv.CloneImage(frame)) svr.debug("Hedge2", test_frame)
def process_frame(self, frame): ################ #setup CV ###### ################ print "processing frame" (w, h) = cv.GetSize(frame) #generate hue selection frames ones = np.ones((h, w, 1), dtype='uint8') a = ones * (180 - self.target_hue) b = ones * (180 - self.target_hue + 20) a_array = cv.fromarray(a) b_array = cv.fromarray(b) #create locations for the test frame and binary frame frametest = cv.CreateImage(cv.GetSize(frame), 8, 3) binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1) #use the red channel for the binary frame (just for debugging purposes) cv.Copy(frame, frametest) cv.SetImageCOI(frametest, 3) cv.Copy(frametest, binarytest) #reset the COI for test frame to RGB. cv.SetImageCOI(frametest, 0) # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_gate = False #create a new frame for comparison purposes unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, unchanged_frame) #apply noise filter #1 cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) #spin the color wheel (psuedo-code for later if necessary) # truncate spectrum marked as end # shift all values up based on truncating value (mask out 0 regions) # take truncated bits, and flip them (180->0, 179->1...) # dnow that truncated bits are flipped, add them back in to final image #Reset hsv COI cv.SetImageCOI(hsv, 0) #correct for wraparound on red spectrum cv.InRange(binary, a_array, b_array, binarytest) #generate mask cv.Add(binary, cv.fromarray(ones * 180), binary, mask=binarytest) #use mask to selectively add values #run adaptive threshold for edge detection cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) # Get vertical lines vertical_lines = [] i = 0 for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > (math.pi-self.vertical_threshold): #absolute value does better grouping currently vertical_lines.append((abs(line[0]), line[1])) i += 1 # print message to user for performance purposes logging.debug("{} possibilities reduced to {} lines".format( i, len(vertical_lines))) # Group vertical lines vertical_line_groups = [ ] #A list of line groups which are each a line list i = 0 for line in vertical_lines: group_found = False for line_group in vertical_line_groups: i += 1 if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) #quick debugging statement logging.debug("{} internal iterations for {} groups".format( i, len(vertical_line_groups))) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) #get rho of each line angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) self.left_pole = None self.right_pole = None self.returning = 0 self.found = False if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.returning = (self.left_pole + self.right_pole) / 2 logging.info("Returning {}".format(self.returning)) #If this is first iteration, count this as seeing the gate if self.last_seen < 0: self.last_center = None self.last_seen = 0 #increment a counter if result is good. if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 #if not convinced, forget left/right pole. Else, proclaim success. if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_POLE = None #extra debugging stuff if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) if self.found: cv.Circle(frame, (int(frame.width / 2 + self.returning), int(frame.height / 2)), 15, (0, 255, 0), 2, 8, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3) cv.PutText(frame, "Gate Sent to Mission Control", (100, 400), font, (255, 255, 0)) #print frame.width #cv.ShowImage("Gate", cv.CloneImage(frame)) svr.debug("Gate", cv.CloneImage(frame)) svr.debug("Unchanged", cv.CloneImage(unchanged_frame)) self.return_output()
def process_frame(self, frame): (w, h) = cv.GetSize(frame) #generate hue selection frames #create locations for the a pair of test frames frametest = cv.CreateImage(cv.GetSize(frame), 8, 3) binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1) #use the red channel for the binary frame (just for debugging purposes) cv.Copy(frame, frametest) cv.SetImageCOI(frametest, 3) cv.Copy(frametest, binarytest) cv.SetImageCOI(frametest, 0) #reset COI #svr.debug("R?",binarytest) # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_gate = False #create a new frame just for comparison purposes unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, unchanged_frame) #apply a course noise filter cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) #reset COI #shift hue of image such that orange->red are at top of spectrum ''' binary = libvision.misc.cv_to_cv2(binary) binary = libvision.misc.shift_hueCV2(binary, self.target_shift) binary = libvision.misc.cv2_to_cv(binary) ''' #correct for wraparound on red spectrum #cv.InRange(binary,a_array,b_array,binarytest) #generate mask #cv.Add(binary,cv.fromarray(ones*180),binary,mask=binarytest) #use mask to selectively add values svr.debug("R2?", binary) svr.debug("R2?", binary) #run adaptive threshold for edge detection and more noise filtering cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi-self.vertical_threshold: #absolute value does better grouping currently vertical_lines.append((abs(line[0]), line[1])) #print message to user for performance purposes logging.debug("{} possibilities reduced to {} lines".format( len(raw_lines), len(vertical_lines))) # Group vertical lines vertical_line_groups = [ ] # A list of line groups which are each a line list i = 0 for line in vertical_lines: group_found = False for line_group in vertical_line_groups: i += 1 if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) #quick debugging statement logging.debug("{} internal iterations for {} groups".format( i, len(vertical_line_groups))) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) #################################################### #vvvv Horizontal line code isn't used for anything # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [ ] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True if self.debug: rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] #^^^ Horizontal line code isn't used for anything ################################################### self.left_pole = None self.right_pole = None #print vertical_lines self.returning = 0 self.found = False if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.returning = (self.left_pole + self.right_pole) / 2 logging.info("Returning {} as gate center delta.".format( self.returning)) #initialize first iteration with 2 known poles if self.last_seen < 0: self.last_center = None self.last_seen = 0 #increment a counter if result is good. if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 #if not conviced, forget left/right pole. Else proclaim success. if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_pole = None #TODO: If one pole is seen, is it left or right pole? if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) libvision.misc.draw_lines(frame, horizontal_lines) if self.found: cv.Circle(frame, (int(frame.width / 2 + self.returning), int(frame.height / 2)), 15, (0, 255, 0), 2, 8, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3) cv.PutText(frame, "Gate Sent to Mission Control", (100, 400), font, (255, 255, 0)) #print frame.width #cv.ShowImage("Gate", cv.CloneImage(frame)) svr.debug("Gate", cv.CloneImage(frame)) svr.debug("Unchanged", cv.CloneImage(unchanged_frame)) #populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole self.return_output()
def process_frame(self, frame, debug=True): """ process this frame, then place output in self.output """ if debug: # display frame svr.debug("Frame", frame) # create a new image, the size of frame gray = cv.CreateImage(cv.GetSize(frame), 8, 1) edge = cv.CreateImage(cv.GetSize(frame), 8, 1) lines = cv.CloneImage(frame) # copy BW frame into binary cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) # Get Edges cv.Canny(gray, edge, 60, 80) # Create a Binary Image binary = cv.CreateImage(cv.GetSize(frame), 8, 1) # Run Adaptive Threshold cv.AdaptiveThreshold(gray, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, 19, 4, ) # display adaptive threshold svr.debug("Thresh", binary) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(edge, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=50, param1=0, param2=0 ) # process line data found_lines = [] for line in raw_lines[:10]: found_lines.append((abs(line[0]), line[1])) print found_lines # display our transformed image svr.debug("Edges", edge) # draw found lines libvision.misc.draw_lines(lines, found_lines) # display image with lines svr.debug("Lines", lines) cv.WaitKey(10) self.output = "test data"
def process_frame(self, frame): self.numpy_frame = libvision.cv_to_cv2(frame) self.debug_frame = self.numpy_frame.copy() self.test_frame = self.numpy_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (rf1, rf2, rf3) = cv2.split(self.numpy_frame) Rbinary = rf3 Gbinary = rf1 # Adaptive Threshold Rbinary = cv2.adaptiveThreshold(Rbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) Gbinary = cv2.adaptiveThreshold(Gbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.Gadaptive_thresh_blocksize, self.Gadaptive_thresh) # Morphology kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) Rbinary = cv2.erode(Rbinary, kernel) Rbinary = cv2.dilate(Rbinary, kernel) Gbinary = cv2.erode(Gbinary, kernel) Gbinary = cv2.dilate(Gbinary, kernel) Rframe = cv2.cvtColor(Rbinary, cv2.COLOR_GRAY2RGB) Gframe = cv2.cvtColor(Gbinary, cv2.COLOR_GRAY2RGB) # Hough Transform raw_linesG = cv2.HoughLines(Gbinary, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdG) # Get vertical lines vertical_linesG = [] for line in raw_linesG[0]: rho = line[0] theta = line[1] if theta < self.vertical_thresholdG or \ theta > math.pi - self.vertical_thresholdG: vertical_linesG.append((abs(rho), theta)) # Group vertical lines vertical_line_groupsG = [] # A list of line groups which are each a line list for line in vertical_linesG: group_found = False for line_group in vertical_line_groupsG: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsG.append([line]) # Average line groups into lines vertical_linesG = [] for line_group in vertical_line_groupsG: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesG.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_linesG[0]: rho = line[0] theta = line[1] dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi - self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list print "Horizontal lines: ", for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None Rframe = libvision.cv2_to_cv(Rframe) Gframe = libvision.cv2_to_cv(self.debug_frame) Rbinary = libvision.cv2_to_cv(Rbinary) self.debug_frame = libvision.cv2_to_cv(self.debug_frame) self.test_frame = libvision.cv2_to_cv(self.test_frame) Gbinary = libvision.cv2_to_cv(Gbinary) if len(vertical_linesG) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 self.right_pole = round(max(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1 * horizontal_lines[0][0] + Gframe.height / 2) / (Gframe.height / 2) * 32 self.crossbar_depth = self.r * math.atan(math.radians(bar_phi)) else: self.crossbar_depth = None # Line Finding on Red pvc # Hough Transform line_storage = cv.CreateMemStorage() raw_linesR = cv.HoughLines2(Rbinary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdR, param1=0, param2=0 ) # Get vertical lines vertical_linesR = [] for line in raw_linesR: if line[1] < self.vertical_thresholdR or \ line[1] > math.pi - self.vertical_thresholdR: vertical_linesR.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groupsR = [] # A list of line groups which are each a line list for line in vertical_linesR: group_found = False for line_group in vertical_line_groupsR: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsR.append([line]) # Average line groups into lines vertical_linesR = [] for line_group in vertical_line_groupsR: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesR.append(line) ''' for red_line in vertical_linesR: print "Red Line:", red_line[0],", ",red_line[1] for green_line in vertical_linesG: print "Green Line:", green_line[0],", ",green_line[1] ''' for red_line in vertical_linesR: for green_line in vertical_linesG[:]: if math.fabs(green_line[0] - red_line[0]) < self.GR_Threshold0 and \ math.fabs(green_line[1] - red_line[1]) < self.GR_Threshold1: vertical_linesG.remove(green_line) for red_line in vertical_linesR: print "New Red Line:", red_line[0], ", ", red_line[1] for green_line in vertical_linesG: print "New Green Line:", green_line[0], ", ", green_line[1] if len(vertical_linesR) is 0: print "No Red Found" self.left_pole = None self.right_pole = None if len(vertical_linesR) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 self.right_pole = round(max(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None for i in range(len(vertical_linesR[:])): if vertical_linesR[i][1] > math.pi / 2: vertical_linesR[i] = (vertical_linesR[i][0], -(math.pi - vertical_linesR[i][1])) print "Line changed to ", vertical_linesR[i] for line in vertical_linesR: print line if line[1] > math.pi / 2: line = (line[0], math.pi - line[1]) print "Line changed to ", line libvision.misc.draw_lines(Gframe, vertical_linesG) libvision.misc.draw_lines(Gframe, horizontal_lines) libvision.misc.draw_lines(Rframe, vertical_linesR) # there was a merge error, these 3 lines conflicted b/c your copy out of date for line in vertical_linesR: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] x = line[0] * math.cos(line[1]) y = line[0] * math.sin(line[1]) cv.Circle(Rframe, (int(x), int(y)), 5, (0, 255, 0), -1, 8, 0) if x > width or y > width or x < 0 or y < 0: print "Lost point ", x svr.debug("Original", self.test_frame) svr.debug("Red", Rframe) svr.debug("Green", Gframe)
def process_frame(self, frame): self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) og_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, self.debug_frame) cv.Copy(self.debug_frame, og_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) #3 before competition #2 at competition cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) # Get Edges #cv.Canny(binary, binary, 30, 40) cv.CvtColor(binary, self.debug_frame, cv.CV_GRAY2RGB) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_PROBABILISTIC, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=self.min_length, param2=self.max_gap ) lines = [] for line in raw_lines: lines.append(line) #Grouping lines depending on endpoint similarities for line1 in lines[:]: for line2 in lines[:]: if line1 in lines and line2 in lines and line1 != line2: if math.fabs(line1[0][0] - line2[0][0]) < self.max_corner_range and \ math.fabs(line1[0][1] - line2[0][1]) < self.max_corner_range and \ math.fabs(line1[1][0] - line2[1][0]) < self.max_corner_range and \ math.fabs(line1[1][1] - line2[1][1]) < self.max_corner_range: if line_distance(line1[0], line1[1]) > line_distance(line2[0], line2[1]): lines.remove(line2) else: lines.remove(line1) elif math.fabs(line1[0][0] - line2[1][0]) < self.max_corner_range and \ math.fabs(line1[0][1] - line2[1][1]) < self.max_corner_range and \ math.fabs(line1[1][0] - line2[0][0]) < self.max_corner_range and \ math.fabs(line1[1][1] - line2[0][1]) < self.max_corner_range: if line_distance(line1[0], line1[1]) > line_distance(line2[0], line2[1]): lines.remove(line2) else: lines.remove(line1) self.hough_corners = [] for line in lines: self.hough_corners.append(line[0]) self.hough_corners.append(line[1]) for corner1 in self.hough_corners[:]: for corner2 in self.hough_corners[:]: if corner1 is not corner2 and corner1 in self.hough_corners and corner2 in self.hough_corners: if math.fabs(corner1[0] - corner2[0]) < self.max_corner_range4 and \ math.fabs(corner1[1] - corner2[1]) < self.max_corner_range4: corner1 = [(corner1[0] + corner2[0]) / 2, (corner1[1] + corner2[1]) / 2] self.hough_corners.remove(corner2) for line1 in lines: #cv.Line(self.debug_frame,line1[0],line1[1], (0,0,255), 10, cv.CV_AA, 0) for line2 in lines: if line1 is not line2: self.find_corners(line1, line2) for corner1 in self.corners: for corner2 in self.corners: if math.fabs(corner1[1][0] - corner2[1][0]) < self.max_corner_range2 and \ math.fabs(corner1[1][1] - corner2[1][1]) < self.max_corner_range2 and \ math.fabs(corner1[2][0] - corner2[2][0]) < self.max_corner_range2 and \ math.fabs(corner1[2][1] - corner2[2][1]) < self.max_corner_range2 and \ math.fabs(corner1[0][0] - corner2[0][0]) > self.max_corner_range2 and \ math.fabs(corner1[0][1] - corner2[0][1]) > self.max_corner_range2: pt1 = (int(corner1[0][0]), int(corner1[0][1])) pt4 = (int(corner2[0][0]), int(corner2[0][1])) pt3 = (int(corner1[1][0]), int(corner1[1][1])) pt2 = (int(corner1[2][0]), int(corner1[2][1])) #line_color = (0,255,0)s #cv.Line(self.debug_frame,pt1,pt2, line_color, 10, cv.CV_AA, 0) #cv.Line(self.debug_frame,pt1,pt3, line_color, 10, cv.CV_AA, 0) #cv.Line(self.debug_frame,pt4,pt2, line_color, 10, cv.CV_AA, 0) #cv.Line(self.debug_frame,pt4,pt3, line_color, 10, cv.CV_AA, 0) new_bin = Bin(pt1, pt2, pt3, pt4) new_bin.id = self.bin_id self.bin_id += 1 if math.fabs(line_distance(pt1, pt2) - line_distance(pt3, pt4)) < self.parallel_sides_length_thresh and \ math.fabs(line_distance(pt1, pt3) - line_distance(pt2, pt4)) < self.parallel_sides_length_thresh: self.Bins.append(new_bin) print "new_bin" elif (math.fabs(corner1[1][0] - corner2[2][0]) < self.max_corner_range2 and math.fabs(corner1[1][1] - corner2[2][1]) < self.max_corner_range2 and math.fabs(corner1[2][0] - corner2[1][0]) < self.max_corner_range2 and math.fabs(corner1[2][1] - corner2[1][1]) < self.max_corner_range2 and math.fabs(corner1[0][0] - corner2[0][0]) > self.max_corner_range2 and math.fabs(corner1[0][1] - corner2[0][1]) > self.max_corner_range2): continue self.corners = [] self.final_corners = self.sort_corners() #Results are not used. Experimental corners which have been seen twice, should be only the corners we want, but there were problems self.sort_bins() self.update_bins() self.group_bins() self.draw_bins() for corner in self.hough_corners: line_color = [255, 0, 0] cv.Circle(self.debug_frame, corner, 15, (255, 0, 0), 2, 8, 0) for line in lines: line_color = [255, 0, 0] cv.Line(self.debug_frame, line[0], line[1], line_color, 5, cv.CV_AA, 0) #cv.Circle(self.debug_frame, line[0], 15, (255,0,0), 2,8,0) #cv.Circle(self.debug_frame, line[1], 15, (255,0,0), 2,8,0) #Output bins self.output.bins = self.Bins anglesum = 0 for bins in self.output.bins: bins.theta = (bins.center[0] - frame.width / 2) * 37 / (frame.width / 2) bins.phi = -1 * (bins.center[1] - frame.height / 2) * 36 / (frame.height / 2) anglesum += bins.angle # bins.orientation = bins.angle if len(self.output.bins) > 0: self.output.orientation = anglesum / len(self.output.bins) else: self.output.orientation = None self.return_output() svr.debug("Bins", self.debug_frame) svr.debug("Original", og_frame) #BEGIN SHAPE PROCESSING #constants img_width = 128 img_height = 256 number_x = 23 number_y = 111 number_w = 82 number_h = 90 bin_thresh_blocksize = 11 bin_thresh = 1.9 red_significance_threshold = 0.4 #load templates - run once, accessible to number processor number_templates = [ (10, cv.LoadImage("number_templates/10.png")), (16, cv.LoadImage("number_templates/16.png")), (37, cv.LoadImage("number_templates/37.png")), (98, cv.LoadImage("number_templates/98.png")), ] #Begin Bin Contents Processing for bin in self.Bins: #Take the bin's corners, and get an image containing an img_width x img_height rectangle of it transf = cv.CreateMat(3, 3, cv.CV_32FC1) cv.GetPerspectiveTransform( [bin.corner1, bin.corner2, bin.corner3, bin.corner4], [(0, 0), (0, img_height), (img_width, 0), (img_width, img_height)], transf ) bin_image = cv.CreateImage([img_width, img_height], 8, 3) cv.WarpPerspective(frame, bin_image, transf) #AdaptaveThreshold to get black and white image highlighting the number (still works better than my yellow-vs-red threshold attempt hsv = cv.CreateImage(cv.GetSize(bin_image), 8, 3) bin_thresh_image = cv.CreateImage(cv.GetSize(bin_image), 8, 1) cv.CvtColor(bin_image, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 3) cv.Copy(hsv, bin_thresh_image) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold(bin_thresh_image, bin_thresh_image, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, bin_thresh_blocksize, bin_thresh, ) kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(bin_thresh_image, bin_thresh_image, kernel, 1) cv.Dilate(bin_thresh_image, bin_thresh_image, kernel, 1) #Here, we loop through all four different templates, and figure out which one we think is most likely. #The comparison function counts corresponding pixels that are non-zero in each image, and then corresponding pixels that are different in each image. The ratio of diff_count/both_count is our "unconfidence" ratio. The lower it is, the more confident we are. #There are two nearly identical pieces of code within this loop. One checks the bin right-side-up, and the other one checks it flipped 180. last_thought_number = -1 last_unconfidence_ratio = number_w * number_h + 2 for i in range(0, len(number_templates)): both_count = 0 diff_count = 0 this_number_image = number_templates[i][1] for y in range(0, number_h): for x in range(0, number_w): if (bin_thresh_image[y + number_y, x + number_x] != 0) and (this_number_image[y, x][0] != 0): both_count += 1 elif (bin_thresh_image[y + number_y, x + number_x] != 0) or (this_number_image[y, x][0] != 0): diff_count += 1 if both_count == 0: unconfidence_ratio = number_w * number_h + 1 # max unconfidence else: unconfidence_ratio = 1.0 * diff_count / both_count if unconfidence_ratio < last_unconfidence_ratio: last_thought_number = number_templates[i][0] last_unconfidence_ratio = unconfidence_ratio both_count = 0 diff_count = 0 for y in range(0, number_h): for x in range(0, number_w): if (bin_thresh_image[img_height - number_y - 1 - y, img_width - number_x - 1 - x] != 0) and ( this_number_image[y, x][0] != 0): both_count += 1 elif (bin_thresh_image[img_height - number_y - 1 - y, img_width - number_x - 1 - x] != 0) or ( this_number_image[y, x][0] != 0): diff_count += 1 if both_count == 0: unconfidence_ratio = number_w * number_h + 1 # max unconfidence else: unconfidence_ratio = 1.0 * diff_count / both_count if unconfidence_ratio < last_unconfidence_ratio: last_thought_number = number_templates[i][0] last_unconfidence_ratio = unconfidence_ratio print str(last_thought_number) + " | " + str(last_unconfidence_ratio) try: #check if it's defined bin.number_unconfidence_ratio except: bin.number_unconfidence_ratio = last_unconfidence_ratio bin.number = last_thought_number print "Set Speed Limit Number" else: if last_unconfidence_ratio < bin.number_unconfidence_ratio: bin.number_unconfidence_ratio = last_unconfidence_ratio if bin.number == last_thought_number: print "More Confident on Same Number: Updated" else: print "More Confident on Different Number: Updated" bin.icon = last_thought_number
def process_frame(self, frame): self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) og_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, self.debug_frame) cv.Copy(self.debug_frame, og_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) # Get Edges #cv.Canny(binary, binary, 30, 40) cv.CvtColor(binary, self.debug_frame, cv.CV_GRAY2RGB) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_PROBABILISTIC, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=self.min_length, param2=self.max_gap) lines = [] corners = [] for line in raw_lines: lines.append(line) # Grouping lines depending on endpoint similarities for line1 in lines[:]: for line2 in lines[:]: if line1 in lines and line2 in lines and line1 != line2: if math.fabs(line1[0][0] - line2[0][0]) < self.max_corner_range and \ math.fabs(line1[0][1] - line2[0][1]) < self.max_corner_range and \ math.fabs(line1[1][0] - line2[1][0]) < self.max_corner_range and \ math.fabs(line1[1][1] - line2[1][1]) < self.max_corner_range: if line_distance(line1[0], line1[1]) > line_distance( line2[0], line2[1]): lines.remove(line2) else: lines.remove(line1) elif math.fabs(line1[0][0] - line2[1][0]) < self.max_corner_range and \ math.fabs(line1[0][1] - line2[1][1]) < self.max_corner_range and \ math.fabs(line1[1][0] - line2[0][0]) < self.max_corner_range and \ math.fabs(line1[1][1] - line2[0][1]) < self.max_corner_range: if line_distance(line1[0], line1[1]) > line_distance( line2[0], line2[1]): lines.remove(line2) else: lines.remove(line1) for line in lines: corners.append(line[0]) corners.append(line[1]) for corner1 in corners: for corner2 in corners: for corner3 in corners: for corner4 in corners: # Checks that corners are not the same and are in the proper orientation if corner4[0] != corner3[0] and corner4[0] != corner2[0] and corner4[0] != corner1[0] and \ corner3[0] != corner2[0] and corner3[0] != corner1[0] and corner2[0] != corner1[0] and \ corner4[1] != corner3[1] and corner4[1] != corner2[1] and corner4[1] != corner1[1] and \ corner3[1] != corner2[1] and corner3[1] != corner1[1] and corner2[1] != corner1[1] and \ corner2[0] >= corner3[0] and corner1[1] >= corner4[1] and corner2[0] >= corner1[0]: # Checks that the side ratios are correct if math.fabs(line_distance(corner1, corner3) - line_distance(corner2, corner4)) < self.size_threshold and \ math.fabs(line_distance(corner1, corner2) - line_distance(corner3, corner4)) < self.size_threshold and \ math.fabs(line_distance(corner1, corner3) / line_distance(corner1, corner2)) < self.ratio_threshold and \ math.fabs(line_distance(corner1, corner2) / line_distance(corner1, corner3)) < self.ratio_threshold: #^^^ CHANGED OR TO AND --> DID MUCH BETTER. CONSIDER CHANGING ON BINSCORNER # Checks that angles are roughly 90 degrees angle_cnr_2 = math.fabs( angle_between_lines( line_slope(corner1, corner2), line_slope(corner2, corner4))) if self.angle_min < angle_cnr_2 < self.angle_max: angle_cnr_3 = math.fabs( angle_between_lines( line_slope(corner1, corner3), line_slope(corner3, corner4))) if self.angle_min2 < angle_cnr_3 < self.angle_max2: new_box = Pizza( corner1, corner2, corner3, corner4) self.match_Boxes(new_box) for Box in self.Boxes[:]: Box.lastseen -= 1 if Box.lastseen < 0: self.Boxes.remove(Box) self.draw_pizza() for line in lines: cv.Line(self.debug_frame, line[0], line[1], (255, 255, 0), 10, cv.CV_AA, 0) cv.Circle(self.debug_frame, line[0], 15, (255, 0, 0), 2, 8, 0) cv.Circle(self.debug_frame, line[1], 15, (255, 0, 0), 2, 8, 0) self.output.pizza = self.Boxes anglesum = 0 for Box in self.Boxes: Box.theta = (Box.center[0] - frame.width / 2) * 37 / (frame.width / 2) Box.phi = -1 * (Box.center[1] - frame.height / 2) * 36 / (frame.height / 2) anglesum += Box.angle if len(self.output.pizza) > 0: self.output.orientation = anglesum / len(self.output.pizza) else: self.output.orientation = None self.return_output() svr.debug("Pizza", self.debug_frame) svr.debug("Original", og_frame)
def process_frame(self, frame): self.output.found = False cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Use RGB color finder binary = libvision.cmodules.target_color_rgb.find_target_color_rgb( frame, 250, 125, 0, 1500, 500, .3) color_filtered = cv.CloneImage(binary) blob_map = cv.CloneImage(binary) blobs = libvision.blob.find_blobs(binary, blob_map, min_blob_size=50, max_blobs=10) if not blobs: return binary = cv.CloneImage(blob_map) mapping = [0] * 256 for blob in blobs: mapping[blob.id] = 255 libvision.greymap.greymap(blob_map, binary, mapping) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) print "hough transform found", len(lines), " lines" lines = lines[:self.lines_to_consider] # Limit number of lines # if not lines: # return paths = self.path_manager.process(lines, blobs) if paths and not self.path: # If path[1] is clockwise of paths[0] distance = circular_distance(paths[0].angle, paths[1].angle) if distance > 0: self.path = paths[self.which_path] else: self.path = paths[1 - self.which_path] if paths and self.path in paths and self.path.blobs: temp_map = cv.CloneImage(blob_map) mapping = [0] * 256 for blob in self.path.blobs: mapping[blob.id] = 255 libvision.greymap.greymap(blob_map, temp_map, mapping) center = self.find_centroid(temp_map) svr.debug("map", temp_map) self.path.center = (center[0] - (frame.width / 2), -center[1] + (frame.height / 2)) random = 0 if random == 0: # Show color filtered color_filtered_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(color_filtered, color_filtered_rgb, cv.CV_GRAY2RGB) cv.SubS(color_filtered_rgb, (255, 0, 0), color_filtered_rgb) cv.Sub(frame, color_filtered_rgb, frame) # Show edges binary_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(binary, binary_rgb, cv.CV_GRAY2RGB) cv.Add(frame, binary_rgb, frame) # Add white to edge pixels cv.SubS(binary_rgb, (0, 0, 255), binary_rgb) cv.Sub(frame, binary_rgb, frame) # Remove all but Red test_lines = [] new_path = None for line in lines[:]: if self.candidates == []: new_path = Path(line[0], line[1]) new_path.id = self.path_id self.path_id += 1 new_path.last_seen += 1 self.candidates.append(new_path) print "got a candidate" for candidate in self.candidates: if len(self.confirmed) == 0: self.confirmed.append(candidate) for line in lines[:]: for candidate in self.candidates: if math.fabs(line[0] - candidate.loc) < self.distance_threshold and \ math.fabs(line[1] - candidate.angle) < self.angle_threshold: candidate.loc = (candidate.loc + line[0]) / 2 candidate.angle = (candidate.angle + line[1]) / 2 if candidate.last_seen < self.max_lastseen: candidate.last_seen += 1 # print line1 if line in lines: lines.remove(line) else: new_path = Path(line[0], line[1]) new_path.id = self.path_id self.path_id += 1 new_path.last_seen += 1 new_path.seencount += 5 self.candidates.append(new_path) for candidate in self.candidates[:]: candidate.last_seen -= 1 if candidate.seencount > self.min_seencount: self.confirmed.append(candidate) self.candidates.remove(candidate) if candidate.last_seen == -1: self.candidates.remove(candidate) for confirmed in self.confirmed: for line in lines[:]: if math.fabs(line[0] - confirmed.loc) < self.distance_trans and \ math.fabs(line[1] - confirmed.angle) < self.angle_trans: confirmed.loc = line[0] confirmed.angle = line[1] if confirmed.last_seen < self.max_lastseen: confirmed.last_seen += 2 if line in lines: self.lines.remove(line) print "line removed" for confirmed in self.confirmed: for candidate in self.candidates[:]: if math.fabs(candidate.loc - confirmed.loc) < self.distance_trans and \ math.fabs(candidate.angle - confirmed.angle) < self.angle_trans: confirmed.loc = candidate.loc confirmed.angle = candidate.angle if confirmed.last_seen < self.max_lastseen: confirmed.last_seen += 2 print "lines" if candidate in self.candidates: self.candidates.remove(candidate) print "line removed" for confirmed1 in self.confirmed[:]: for confirmed2 in self.confirmed[:]: if math.fabs(confirmed1.loc - confirmed2.loc) < self.distance_threshold and \ math.fabs(confirmed1.angle - confirmed2.angle) < self.angle_threshold: if confirmed1.id > confirmed2.id and confirmed1 in self.confirmed: confirmed2.loc == (confirmed2.loc + confirmed1.loc) / 2 confirmed2.angle == (confirmed2.angle + confirmed1.angle) / 2 self.confirmed.remove(confirmed1) if confirmed2.last_seen < self.max_lastseen: confirmed2.last_seen += 2 if confirmed2.id > confirmed1.id and confirmed2 in self.confirmed: confirmed2.loc == (confirmed2.loc + confirmed1.loc) / 2 confirmed2.angle == (confirmed2.angle + confirmed1.angle) / 2 self.confirmed.remove(confirmed2) if confirmed1.last_seen < self.max_lastseen: confirmed1.last_seen += 2 for confirmed in self.confirmed[:]: confirmed.last_seen -= 1 if confirmed.last_seen < -10: self.confirmed.remove(confirmed) final_lines = [] for confirmed in self.confirmed: final_line = [confirmed.loc, confirmed.angle] final_lines.append(final_line) print confirmed.id candidate_ids = [] for candidate in self.candidates: new_id = candidate.id candidate_ids.append(new_id) print candidate_ids print len(self.candidates) libvision.misc.draw_lines(frame, final_lines) #libvision.misc.draw_lines2(frame, lines) print "Number of Paths:", len(self.confirmed) print "Number of Candidates:", len(self.candidates) # type -s after the command to run vision for this to work and not produce errors. # if len(self.confirmed)>1: # raw_input() self.output.paths = [] center_x = 0 center_y = 0 self.output.paths = self.confirmed for path in self.output.paths: path.theta = path.angle center_x = frame.width / 2 path.x = center_x center_y = (-math.cos(path.angle) / (math.sin(path.angle) + .001)) * center_x + ( path.loc / ((math.sin(path.angle) + .001))) path.y = center_y if center_y > frame.height or center_y < 0 or \ center_y < self.min_center_distance or \ frame.height - center_y < self.min_center_distance: center_y2 = frame.height / 2 center_x2 = (center_y2 - (path.loc / (math.sin(path.angle) + .0001))) / ( -math.cos(path.angle) / (math.sin(path.angle) + .0001)) if center_x2 > frame.width or center_x2 < 0: path.center = [center_x, center_y] else: path.center = [center_x2, center_y2] else: path.center = [center_x, center_y] cv.Circle(frame, (int(path.center[0]), int(path.center[1])), 15, (255, 255, 255), 2, 8, 0) self.return_output() svr.debug("Path", frame)
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) # kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel2) self.adaptive_frame = self.numpy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_buoys = [] if len(contours) > 0: cnt = contours[0] cv2.drawContours( self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) center, radius = cv2.minEnclosingCircle(cnt) x, y = center if len(approx) > 12: if (radius > 30): new_buoy = Buoy(int(x), int(y), int(radius), "unknown") new_buoy.id = self.recent_id self.recent_id += 1 self.raw_buoys.append(new_buoy) cv2.drawContours( self.numpy_frame, [cnt], 0, (0, 0, 255), -1) self.raw_buoys.append(new_buoy) for buoy1 in self.raw_buoys[:]: for buoy2 in self.raw_buoys[:]: if buoy1 is buoy2: continue if buoy1 in self.raw_buoys and buoy2 in self.raw_buoys and \ math.fabs(buoy1.centerx - buoy2.centerx) > self.mid_sep and \ math.fabs(buoy1.centery - buoy2.centery) > self.mid_sep: if buoy1.radius < buoy2.radius: self.raw_buoys.remove(buoy1) elif buoy2.radius < buoy1.radius: self.raw_buoys.remove(buoy2) for buoy in self.raw_buoys: self.match_buoys(buoy) self.sort_buoys() self.draw_buoys() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv) # Convert to output format self.output.buoys = [] if self.raw_buoys is not None and len(self.raw_buoys) > 0: for buoy in self.raw_buoys: x = buoy.centerx y = buoy.centery buoy = Container() buoy.theta = x buoy.phi = y buoy.id = 1 self.output.buoys.append(buoy) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame): # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_hedge = False cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have value channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology ''' kernel = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) ''' if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges #cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi - self.vertical_threshold: vertical_lines.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groups = [ ] # A list of line groups which are each a line list for line in vertical_lines: group_found = False for line_group in vertical_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi - self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [ ] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / tan(radians(theta / 2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1 * horizontal_lines[0][0] + frame.height / 2) / (frame.height / 2) * 32 self.crossbar_depth = self.r * atan(radians(bar_phi)) else: self.crossbar_depth = None if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) libvision.misc.draw_lines(frame, horizontal_lines) #cv.ShowImage("Hedge", cv.CloneImage(frame)) svr.debug("Hedge", cv.CloneImage(frame)) # populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole self.output.r = self.r self.output.crossbar_depth = self.crossbar_depth self.return_output() print self
def process_frame(self, frame): # Creation of frames self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, self.debug_frame) self.debug_numpy_frame = cv_to_cv2(self.debug_frame) # CV2 Transforms self.numpy_frame = cv_to_cv2(self.debug_frame) self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv.CV_BGR2HSV) [self.frame1, self.frame2, self.frame3] = numpy.dsplit( self.numpy_frame, 3) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame3 '''Temporarily converts the image to a cv frame to do the adaptive threshold ''' self.temp_cv2frame = cv2_to_cv(self.numpy_frame) cv.AdaptiveThreshold(self.temp_cv2frame, self.temp_cv2frame, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(self.temp_cv2frame, self.temp_cv2frame, kernel, 1) cv.Dilate(self.temp_cv2frame, self.temp_cv2frame, kernel, 1) self.adaptive_frame = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Copy(self.temp_cv2frame, self.adaptive_frame) '''Returns the frame to a cv2 image''' self.numpy_frame = cv_to_cv2(self.temp_cv2frame) '''Begins finding contours''' contours, hierarchy = cv2.findContours( self.numpy_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours( self.numpy_frame, contours, -1, (255, 255, 255), 3) self.masks = [] pts = [] for h, cnt in enumerate(contours): mask = numpy.zeros(self.numpy_frame.shape, numpy.uint8) cv2.drawContours(mask, [cnt], 0, 255, -1) mean = cv2.mean(cv_to_cv2(self.debug_frame), mask=mask) self.masks.append(mask) hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = numpy.int0(box) new_bin = Bin(tuple(box[0]), tuple( box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id self.recent_id = self.recent_id + 1 self.raw_bins.append(new_bin) for pt in box: type(tuple(pt)) cv2.circle(self.numpy_frame, tuple( pt), 5, (255, 255, 255), -1, 8, 0) pts.append(pt) '''Removes bins that have centers too close to others (to prevent bins inside bins), and bins that are too small''' for bin1 in self.raw_bins[:]: for bin2 in self.raw_bins[:]: if bin1 in self.raw_bins and bin2 in self.raw_bins and math.fabs(bin1.midx - bin2.midx) < self.mid_sep and math.fabs(bin1.midy - bin2.midy) < self.mid_sep: if bin1.area < bin2.area: self.raw_bins.remove(bin1) elif bin2.area < bin1.area: self.raw_bins.remove(bin2) if bin1 in self.raw_bins and bin2 in self.raw_bins: if bin1.area < self.min_area: self.raw_bins.remove(bin1) if bin2.area < self.min_area and bin2 in self.raw_bins: self.raw_bins.remove(bin2) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.numpy_to_cv = cv2_to_cv(self.numpy_frame) self.debug_final_frame = cv2_to_cv(self.debug_numpy_frame) self.draw_bins() svr.debug("CV", self.debug_final_frame) svr.debug("CV2", self.numpy_to_cv) svr.debug("Adaptive", self.adaptive_frame)
def process_frame(self, frame): # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.buoy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) self.buoy_frame = cv2.erode(self.numpy_frame, kernel) self.buoy_frame = cv2.dilate(self.numpy_frame, kernel2) self.buoy_adaptive = self.buoy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.raw_led = [] self.raw_buoys = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours( self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) center, radius = cv2.minEnclosingCircle(cnt) x, y = center if len(approx) > 12: if (radius > 30): new_buoy = Buoy(int(x), int(y), int(radius), "unknown") new_buoy.id = self.recent_id self.recent_id += 1 self.raw_buoys.append(new_buoy) cv2.drawContours( self.numpy_frame, [cnt], 0, (0, 0, 255), -1) self.raw_buoys.append(new_buoy) for buoy1 in self.raw_buoys[:]: for buoy2 in self.raw_buoys[:]: if buoy1 is buoy2: continue if buoy1 in self.raw_buoys and buoy2 in self.raw_buoys and \ math.fabs(buoy1.centerx - buoy2.centerx) > self.mid_sep and \ math.fabs(buoy1.centery - buoy2.centery) > self.mid_sep: if buoy1.area < buoy2.area: self.raw_buoys.remove(buoy1) elif buoy2.area < buoy1.area: self.raw_buoys.remove(buoy2) for buoy in self.raw_buoys: self.match_buoys(buoy) self.sort_buoys() self.draw_buoys() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv)
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold( self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) # kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel2) self.adaptive_frame = self.numpy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_buoys = [] if len(contours) > 0: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True) center, radius = cv2.minEnclosingCircle(cnt) x, y = center if len(approx) > 12: if (radius > 30): new_buoy = Buoy(int(x), int(y), int(radius), "unknown") new_buoy.id = self.recent_id self.recent_id += 1 self.raw_buoys.append(new_buoy) cv2.drawContours(self.numpy_frame, [cnt], 0, (0, 0, 255), -1) self.raw_buoys.append(new_buoy) for buoy1 in self.raw_buoys[:]: for buoy2 in self.raw_buoys[:]: if buoy1 is buoy2: continue if buoy1 in self.raw_buoys and buoy2 in self.raw_buoys and \ math.fabs(buoy1.centerx - buoy2.centerx) > self.mid_sep and \ math.fabs(buoy1.centery - buoy2.centery) > self.mid_sep: if buoy1.radius < buoy2.radius: self.raw_buoys.remove(buoy1) elif buoy2.radius < buoy1.radius: self.raw_buoys.remove(buoy2) for buoy in self.raw_buoys: self.match_buoys(buoy) self.sort_buoys() self.draw_buoys() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv) # Convert to output format self.output.buoys = [] if self.raw_buoys is not None and len(self.raw_buoys) > 0: for buoy in self.raw_buoys: x = buoy.centerx y = buoy.centery buoy = Container() buoy.theta = x buoy.phi = y buoy.id = 1 self.output.buoys.append(buoy) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame): # Get Channels hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) grey = libvision.misc.get_channel(hsv, 2) # load a haar classifier #hc = cv.Load("/home/seawolf/software/seawolf5/vision/output.xml") hc = cv.Load( os.path.join(os.path.dirname(os.path.dirname(__file__)), "buoy_cascade_4.xml")) #hc = cv.Load("/home/seawolf/software/seawolf5/vision/buoy_cascade_4.xml") # use classifier to detect buoys minsize = (int(self.minsize), int(self.minsize)) maxsize = (int(self.maxsize), int(self.maxsize)) buoys = cv.HaarDetectObjects(grey, hc, cv.CreateMemStorage(), min_size=minsize) # compute average buoy size and extract to a list avg_w = 0 for (x, y, w, h), n in buoys: # create a buoy class for this new buoys new_buoy = self.new_buoy(x, y, w) # make note of size avg_w = avg_w + w # add this buoy to our list of new buoys self.new.append(new_buoy) # update search size based on sizes found this frame if buoys: avg_w = avg_w / len(buoys) self.minsize = int(avg_w * .7) self.maxsize = int(avg_w * 1.3) # determine color of new buoys (if possible) libvision.buoy_analyzer(frame, self.new) # sort these new buoys into appropriate tier self.sort_buoys() # must be called every frame for upkeep ####### DEBUG ####### if self.debug: # display confirmed buoys for confirmed in self.confirmed: x = confirmed.x y = confirmed.y w = confirmed.width # draw rectangles on frame cv.Rectangle(frame, (x, y), (x + w, y + w), confirmed.debug_color, thickness=6) cv.Rectangle(frame, (x, y), (x + w, y + w), COLORS[confirmed.color], thickness=-1) # show debug frame svr.debug("BuoyTest", frame) ####### END DEBUG ####### self.output.buoys = self.confirmed for buoy in self.output.buoys: buoy.theta = (buoy.x - frame.width / 2) * 37 / (frame.width / 2) buoy.phi = -1 * (buoy.y - frame.height / 2) * 36 / (frame.height / 2) self.return_output()
def process_frame(self, frame): frametest = cv.CreateImage(cv.GetSize(frame), 8, 3) binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Copy(frame, frametest) cv.SetImageCOI(frametest, 3) cv.Copy(frametest, binarytest) cv.SetImageCOI(frametest, 0) svr.debug("R?",binarytest) # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_gate = False unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame,unchanged_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi-self.vertical_threshold: #absolute value does better grouping currently vertical_lines.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groups = [] # A list of line groups which are each a line list for line in vertical_lines: group_found = False for line_group in vertical_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi/2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append( (abs(line[0]), line[1]) ) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True if self.debug: rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None print vertical_lines self.returning = 0 self.found = False if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.returning = (self.left_pole + self.right_pole)/2 print "Returning ", self.returning if self.last_seen < 0: self.last_center = None self.last_seen = 0 if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_pole = None #TODO: If one pole is seen, is it left or right pole? if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) libvision.misc.draw_lines(frame, horizontal_lines) if self.found: cv.Circle(frame, (int(frame.width/2 + self.returning), int(frame.height/2)), 15, (0, 255,0), 2, 8, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3) cv.PutText(frame, "Gate Sent to Mission Control", (100, 400) , font, (255, 255, 0)) print frame.width #cv.ShowImage("Gate", cv.CloneImage(frame)) svr.debug("Gate", cv.CloneImage(frame)) svr.debug("Unchanged",cv.CloneImage(unchanged_frame)) #populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole self.return_output() print self
def process_frame(self, frame): ################ #setup CV ###### ################ print "processing frame" (w,h) = cv.GetSize(frame) #generate hue selection frames ones = np.ones((h,w,1), dtype='uint8') a = ones*(180 - self.target_hue) b = ones*(180 - self.target_hue + 20) a_array = cv.fromarray(a) b_array = cv.fromarray(b) #create locations for the test frame and binary frame frametest = cv.CreateImage(cv.GetSize(frame), 8, 3) binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1) #use the red channel for the binary frame (just for debugging purposes) cv.Copy(frame, frametest) cv.SetImageCOI(frametest, 3) cv.Copy(frametest, binarytest) #reset the COI for test frame to RGB. cv.SetImageCOI(frametest, 0) # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_gate = False #create a new frame for comparison purposes unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame,unchanged_frame) #apply noise filter #1 cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) #spin the color wheel (psuedo-code for later if necessary) # truncate spectrum marked as end # shift all values up based on truncating value (mask out 0 regions) # take truncated bits, and flip them (180->0, 179->1...) # dnow that truncated bits are flipped, add them back in to final image #Reset hsv COI cv.SetImageCOI(hsv, 0) #correct for wraparound on red spectrum cv.InRange(binary,a_array,b_array,binarytest) #generate mask cv.Add(binary,cv.fromarray(ones*180),binary,mask=binarytest) #use mask to selectively add values #run adaptive threshold for edge detection cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) # Get vertical lines vertical_lines = [] i = 0 for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > (math.pi-self.vertical_threshold): #absolute value does better grouping currently vertical_lines.append( (abs(line[0]),line[1]) ) i += 1 # print message to user for performance purposes logging.debug("{} possibilities reduced to {} lines".format( i, len(vertical_lines) ) ) # Group vertical lines vertical_line_groups = [] #A list of line groups which are each a line list i = 0 for line in vertical_lines: group_found = False for line_group in vertical_line_groups: i+=1 if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) #quick debugging statement logging.debug("{} internal iterations for {} groups".format(i, len(vertical_line_groups))) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) #get rho of each line angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) self.left_pole = None self.right_pole = None self.returning = 0 self.found = False if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.returning = (self.left_pole + self.right_pole)/2 logging.info("Returning {}".format(self.returning)) #If this is first iteration, count this as seeing the gate if self.last_seen < 0: self.last_center = None self.last_seen = 0 #increment a counter if result is good. if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 #if not convinced, forget left/right pole. Else, proclaim success. if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_POLE = None #extra debugging stuff if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) if self.found: cv.Circle(frame, (int(frame.width/2 + self.returning), int(frame.height/2)), 15, (0, 255,0), 2, 8, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3) cv.PutText(frame, "Gate Sent to Mission Control", (100, 400) , font, (255, 255, 0)) #print frame.width #cv.ShowImage("Gate", cv.CloneImage(frame)) svr.debug("Gate", cv.CloneImage(frame)) svr.debug("Unchanged", cv.CloneImage(unchanged_frame)) self.return_output()
def process_frame(self, frame): if self.path_manager.start_angle is None: self.path_manager.start_angle = get_yaw() self.output.found = False cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Use RGB color finder binary = libvision.cmodules.target_color_rgb.find_target_color_rgb(frame, 250, 125, 0, 1500, 500, .3) color_filtered = cv.CloneImage(binary) blob_map = cv.CloneImage(binary) blobs = libvision.blob.find_blobs(binary, blob_map, min_blob_size=50, max_blobs=10) if not blobs: return binary = cv.CloneImage(blob_map) mapping = [0] * 256 for blob in blobs: mapping[blob.id] = 255 libvision.greymap.greymap(blob_map, binary, mapping) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) lines = lines[:self.lines_to_consider] # Limit number of lines if not lines: return paths = self.path_manager.process(lines, blobs) if paths and not self.path: # If path[1] is clockwise of paths[0] distance = circular_distance(paths[0].angle, paths[1].angle) print print "Distance: ", distance print paths[0].theta, paths[0].angle print paths[1].theta, paths[1].angle if distance > 0: self.path = paths[self.which_path] else: self.path = paths[1 - self.which_path] print self.path.angle, self.path.theta print if paths and self.path in paths and self.path.blobs: temp_map = cv.CloneImage(blob_map) mapping = [0] * 256 for blob in self.path.blobs: mapping[blob.id] = 255 libvision.greymap.greymap(blob_map, temp_map, mapping) center = self.find_centroid(temp_map) svr.debug("map", temp_map) self.path.center = ( center[0] - (frame.width / 2), -center[1] + (frame.height / 2) ) self.output.found = True self.output.theta = self.path.theta self.output.x = self.path.center[0] / (frame.width / 2) self.output.y = self.path.center[1] / (frame.height / 2) print self.output if self.debug: # Show color filtered color_filtered_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(color_filtered, color_filtered_rgb, cv.CV_GRAY2RGB) cv.SubS(color_filtered_rgb, (255, 0, 0), color_filtered_rgb) cv.Sub(frame, color_filtered_rgb, frame) # Show edges binary_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(binary, binary_rgb, cv.CV_GRAY2RGB) cv.Add(frame, binary_rgb, frame) # Add white to edge pixels cv.SubS(binary_rgb, (0, 0, 255), binary_rgb) cv.Sub(frame, binary_rgb, frame) # Remove all but Red theta = math.radians(circular_distance(self.path_manager.start_angle, get_yaw())) if theta < 0: scale = math.cos(-2 * theta) theta = pi + theta libvision.misc.draw_lines(frame, [((-frame.width/2)*scale, theta)]) else: libvision.misc.draw_lines(frame, [(frame.width/2, theta)]) # Show lines if self.output.found: rounded_center = ( int(round(center[0])), int(round(center[1])), ) cv.Circle(frame, rounded_center, 5, (0, 255, 0)) libvision.misc.draw_lines(frame, [(frame.width/2, self.path.theta)]) else: libvision.misc.draw_lines(frame, lines) svr.debug("Path", frame) self.return_output()
def process_frame(self, frame): # frame types: #self.debug_frame -- Frame containing helpful debug information # Debug numpy in CV2 raw_frame = libvision.cv_to_cv2(frame) self.debug_frame = raw_frame # CV2 blur blur_frame = cv2.medianBlur(self.debug_frame, 5) # collect brightly colored areas frame1 = self.adaptive_threshold(blur_frame, 4, self.adaptive_thresh_blocksize, self.adaptive_thresh) # collect shadowes under colored areas frame2 = self.adaptive_threshold(blur_frame, 1, self.shadow_thresh_blocksize, self.shadow_thresh) # use composite as the adaptive threshold adaptive_frame = cv2.add(frame1, frame2*0) frame = adaptive_frame # morphology sequence = ([-self.erode_factor, self.erode_factor]*1 +[self.bloom_factor, -self.bloom_factor]*1) despeckled_frame = self.morphology(frame, sequence) frame = despeckled_frame self.debug_stream("despeckled", despeckled_frame) # collect edges # ROI_edge detection edge_frame = self.ROI_edge_detection(raw_frame, frame, self.edge_threshold, 0, True) # collect buoy candidates using hough circles self.raw_circles = [] self.raw_buoys = [] self.raw_circles = cv2.HoughCircles( image =edge_frame, method =cv2.cv.CV_HOUGH_GRADIENT, dp =self.inv_res_ratio, minDist =self.center_sep, param1 =self.upper_canny_thresh, param2 =self.acc_thresh, minRadius=self.min_radius, maxRadius=self.max_radius, ) if self.raw_circles is not None: self.raw_circles = np.round(self.raw_circles[:,0]).astype(int) # create a new buoy object for every circle that is detected #print(self.raw_circles) if self.raw_circles is not None: #print self.confirmed for circle in self.raw_circles: (x, y, radius) = circle new_buoy = Buoy(x, y, radius, "unknown", self.next_id) self.next_id += 1 self.raw_buoys.append(new_buoy) self.match_buoys(new_buoy) cv2.circle(self.debug_frame, (x, y), int(radius), (0, 255, 0), 5) # sort buoys among confirmed/canditates self.sort_buoys() # self.debug_frame= cv2.add(<HUD_FRAME>,cv2.cvtColor(<annotated_frame>, cv2.COLOR_GRAY2BGR) ) # perform color detection self.hsv_frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2HSV)[:,:,:] if self.confirmed is not None and len(self.confirmed) > 0: # vvv start color detection for buoy in self.confirmed: self.debug_frame = self.detect_buoy(buoy,self.debug_frame,self.hsv_frame) """ # draw a cirle around the confirmed bouy cv2.circle(self.debug_frame, (int(buoy.centerx), int(buoy.centery)), int(buoy.radius) + 10, (255, 255, 255), 5) # attain hue from a pixel on the buoy color_pick_point = ( int(buoy.centerx), int(buoy.centery - buoy.radius/2) ) _c = color_pick_point # ^^offset a couple pixels upward for some reason (total_height, total_width, _) = self.hsv_frame.shape colorHue = np.mean(self.hsv_frame[in_range(_c[1]-buoy.radius/2,0,total_width) : in_range(_c[1]+buoy.radius/2, 0, total_width), in_range(_c[0]-buoy.radius/2, 0, total_height) : in_range(_c[0]+buoy.radius/2, 0, total_height), 0]) print(_c[0],_c[1], buoy.radius/2) print(buoy.centery-20, buoy.centerx) if BUOY_COLOR_PRINTS: print("buoy%d has a hue of %d" %(buoy.id,int(colorHue))) # note: color wraps around at 180. Range is 0->180 if (colorHue >= 0 and colorHue < 45) or colorHue >= 95: # 105->180->45 cv2.putText(self.debug_frame,str(buoy.id)+"RED", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) buoy.color = "red" elif (colorHue >= 80 and colorHue < 95): # green is hardest to detect cv2.putText(self.debug_frame,str(buoy.id)+"GRE", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) #if buoy.color != "red" and buoy.color != "yellow": #print "switched from ", buoy.color buoy.color = "green" else: #yellow is about 50->80 cv2.putText(self.debug_frame,str(buoy.id)+"YEL", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) buoy.color = "yellow" #print(buoy.centerx) cv2.putText(self.debug_frame,"HUE="+str(int(colorHue)), (int(buoy.centerx), int(buoy.centery-20)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) cv2.putText(self.debug_frame,"last_seen="+str(int(buoy.lastseen)), (int(buoy.centerx), int(buoy.centery-40)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) cv2.putText(self.debug_frame,"candidate="+str(int(buoy in self.candidates)), (int(buoy.centerx), int(buoy.centery-60)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) # ^^^ end color detection """ # debug frames self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) #self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(adaptive_frame) #svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv) # generate vision output FOV_x = 71.0 FOV_y = 40.0 x_resolution = frame.shape[1] y_resolution = frame.shape[0] self.output.buoys = [] if self.confirmed is not None and len(self.confirmed) > 0: for buoy in self.confirmed: buoy.theta = (buoy.centerx - x_resolution/2.0) / (x_resolution/2.0) * (FOV_x/2.0) #<- a rough approximation buoy.phi = -(buoy.centery - y_resolution/2.0) / (y_resolution/2.0) * (FOV_y/2.0) #<- a rough approximation buoy.id = buoy.id self.output.buoys.append(buoy) # publish output #print ("%d buoys currently confirmed." % len(self.confirmed)) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame): # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_hedge = False cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have value channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology ''' kernel = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) ''' if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges #cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi-self.vertical_threshold: vertical_lines.append( (abs(line[0]), line[1]) ) # Group vertical lines vertical_line_groups = [] # A list of line groups which are each a line list for line in vertical_lines: group_found = False for line_group in vertical_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi/2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append( (abs(line[0]), line[1]) ) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 #TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / tan(radians(theta/2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1*horizontal_lines[0][0] + frame.height/2) / (frame.height/2) * 32 self.crossbar_depth = self.r * atan(radians(bar_phi)) else: self.crossbar_depth = None if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) libvision.misc.draw_lines(frame, horizontal_lines) #cv.ShowImage("Hedge", cv.CloneImage(frame)) svr.debug("Hedge", cv.CloneImage(frame)) #populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole self.output.r = self.r self.output.crossbar_depth = self.crossbar_depth self.return_output() print self
def process_frame(self, frame): self.numpy_frame = libvision.cv_to_cv2(frame) self.debug_frame = self.numpy_frame.copy() self.test_frame = self.numpy_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (rf1, rf2, rf3) = cv2.split(self.numpy_frame) Rbinary = rf3 Gbinary = rf1 # Adaptive Threshold Rbinary = cv2.adaptiveThreshold(Rbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) Gbinary = cv2.adaptiveThreshold(Gbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.Gadaptive_thresh_blocksize, self.Gadaptive_thresh) # Morphology kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) Rbinary = cv2.erode(Rbinary, kernel) Rbinary = cv2.dilate(Rbinary, kernel) Gbinary = cv2.erode(Gbinary, kernel) Gbinary = cv2.dilate(Gbinary, kernel) Rframe = cv2.cvtColor(Rbinary, cv2.COLOR_GRAY2RGB) Gframe = cv2.cvtColor(Gbinary, cv2.COLOR_GRAY2RGB) # Hough Transform raw_linesG = cv2.HoughLines(Gbinary, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdG) # Get vertical lines vertical_linesG = [] if raw_linesG is None: raw_linesG = [] if len(raw_linesG) > 0: for line in raw_linesG[0]: rho = line[0] theta = line[1] if theta < self.vertical_thresholdG or theta > ( math.pi - self.vertical_thresholdG): vertical_linesG.append((rho, theta)) # Group vertical lines vertical_line_groupsG = [ ] # A list of line groups which are each a line list for line in vertical_linesG: #print "Green Line Grouping Possibility:", line[0], ", ", line[1] group_found = False for line_group in vertical_line_groupsG: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsG.append([line]) # Average line groups into lines vertical_linesG = [] for line_group in vertical_line_groupsG: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesG.append(line) # Get horizontal lines horizontal_lines = [] if len(raw_linesG) > 0: for line in raw_linesG[0]: rho = line[0] theta = line[1] dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or dist_from_horizontal > math.pi - self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [ ] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None Rframe = libvision.cv2_to_cv(Rframe) Gframe = libvision.cv2_to_cv(self.debug_frame) Rbinary = libvision.cv2_to_cv(Rbinary) self.debug_frame = libvision.cv2_to_cv(self.debug_frame) self.test_frame = libvision.cv2_to_cv(self.test_frame) Gbinary = libvision.cv2_to_cv(Gbinary) if len(vertical_linesG) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1 * horizontal_lines[0][0] + Gframe.height / 2) / (Gframe.height / 2) * 32 self.crossbar_depth = self.r * math.atan(math.radians(bar_phi)) else: self.crossbar_depth = None # Line Finding on Red pvc # Hough Transform line_storage = cv.CreateMemStorage() raw_linesR = cv.HoughLines2(Rbinary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdR, param1=0, param2=0) # Get vertical lines vertical_linesR = [] for line in raw_linesR: if line[1] < self.vertical_thresholdR or \ line[1] > math.pi - self.vertical_thresholdR: vertical_linesR.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groupsR = [ ] # A list of line groups which are each a line list for line in vertical_linesR: group_found = False for line_group in vertical_line_groupsR: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsR.append([line]) # Average line groups into lines vertical_linesR = [] for line_group in vertical_line_groupsR: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesR.append(line) ''' for red_line in vertical_linesR: print "Red Line:", red_line[0],", ",red_line[1] for green_line in vertical_linesG: print "Green Line:", green_line[0],", ",green_line[1] ''' for red_line in vertical_linesR: for green_line in vertical_linesG[:]: if math.fabs(green_line[0] - red_line[0]) < self.GR_Threshold0 and \ math.fabs(green_line[1] - red_line[1]) < self.GR_Threshold1: vertical_linesG.remove(green_line) for red_line in vertical_linesR: print "New Red Line:", red_line[0], ", ", red_line[1] for green_line in vertical_linesG: print "New Green VLine:", green_line[0], ", ", green_line[1] for green_line in horizontal_lines: print "New Green HLine:", green_line[0], ", ", green_line[1] if len(vertical_linesR) is 0: print "No Red Found" self.left_pole = None self.right_pole = None if len(vertical_linesR) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None for i in range(len(vertical_linesR[:])): if vertical_linesR[i][1] > math.pi / 2: vertical_linesR[i] = (vertical_linesR[i][0], -(math.pi - vertical_linesR[i][1])) print "Line changed to ", vertical_linesR[i] for line in vertical_linesR: print line if line[1] > math.pi / 2: line = (line[0], math.pi - line[1]) print "Line changed to ", line libvision.misc.draw_lines(Gframe, vertical_linesG) libvision.misc.draw_lines(Gframe, horizontal_lines) libvision.misc.draw_lines(Rframe, vertical_linesR) # there was a merge error, these 3 lines conflicted b/c your copy out of date for line in vertical_linesR: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] x = line[0] * math.cos(line[1]) y = line[0] * math.sin(line[1]) cv.Circle(Rframe, (int(x), int(y)), 5, (0, 255, 0), -1, 8, 0) if x > width or y > width or x < 0 or y < 0: print "Lost point ", x svr.debug("Original", self.test_frame) svr.debug("Red", Rframe) svr.debug("Green", Gframe) svr.debug("Green Binary", Gbinary)
def process_frame(self, frame): # frame directors #self.debug_frame -- Frame containing helpful debug information # Debug numpy in CV2 raw_frame = libvision.cv_to_cv2(frame) self.debug_frame = raw_frame # CV2 blur blur_frame = cv2.medianBlur(self.debug_frame, 5) hsv_blur_frame = # collect brightly colored areas frame1 = self.adaptive_threshold(blur_frame, 0, self.adaptive_thresh_blocksize, self.adaptive_thresh) # collect shadowes under colored areas frame2 = self.adaptive_threshold(blur_frame, 1, self.shadow_thresh_blocksize, self.shadow_thresh) # use composite as the adaptive threshold adaptive_frame = cv2.add(frame1, frame2*0) frame = adaptive_frame #self.debug_stream("help", <frame>) # morphology sequence = ([-self.erode_factor, self.erode_factor]*1 +[self.bloom_factor, -self.bloom_factor]*1) despeckled_frame = self.morphology(frame, sequence) frame = despeckled_frame self.debug_stream("despeckled", despeckled_frame) # collect edges #a = 800 # TODO: ROI_edge detection edge_frame = self.ROI_edge_detection(raw_frame, frame, True) #edge_frame = cv2.Canny(frame, 150, 250, apertureSize=3) # collect buoy candidates using hough circles self.raw_circles = [] self.raw_buoys = [] self.raw_circles = cv2.HoughCircles( edge_frame, cv2.cv.CV_HOUGH_GRADIENT, self.inv_res_ratio, self.center_sep, np.array([]), self.upper_canny_thresh, self.acc_thresh, self.min_radius, self.max_radius, ) # create a new buoy object for every circle that is detected if self.raw_circles is not None and len(self.raw_circles[0] > 0): #print self.confirmed for circle in self.raw_circles[0]: (x, y, radius) = circle new_buoy = Buoy(x, y, radius, "unknown", self.next_id) self.next_id += 1 self.raw_buoys.append(new_buoy) self.match_buoys(new_buoy) # sort buoys among confirmed/canditates self.sort_buoys() # self.debug_frame= cv2.add(<HUD_FRAME>,cv2.cvtColor(<annotated_frame>, cv2.COLOR_GRAY2BGR) ) # perform color detection if self.confirmed is not None and len(self.confirmed) > 0: # vvv start color detection for buoy in self.confirmed: # draw a cirle around the confirmed bouy cv2.circle(self.debug_frame, (int(buoy.centerx), int(buoy.centery)), int(buoy.radius) + 10, (255, 255, 255), 5) # attain hue from a pixel on the buoy color_pick_point = ( int(buoy.centerx), int(buoy.centery - buoy.radius/2) ) _c = color_pick_point # ^^offset a couple pixels upward for some reason colorHue = np.mean(self.hsv_frame[_c[1]-buoy.radius/2 : _c[1]+buoy.radius/2, _c[0]-buoy.radius/2 : _c[0]+buoy.radius/2, 0]) if BUOY_COLOR_PRINTS: print("buoy%d has a hue of %d" %(buoy.id,int(colorHue))) # note: color wraps around at 180. Range is 0->180 if (colorHue >= 0 and colorHue < 45) or colorHue >= 95: # 105->180->45 cv2.putText(self.debug_frame,str(buoy.id)+"RED", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) buoy.color = "red" elif (colorHue >= 80 and colorHue < 95): # green is hardest to detect cv2.putText(self.debug_frame,str(buoy.id)+"GRE", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) #if buoy.color != "red" and buoy.color != "yellow": #print "switched from ", buoy.color buoy.color = "green" else: #yellow is about 50->80 cv2.putText(self.debug_frame,str(buoy.id)+"YEL", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) buoy.color = "yellow" cv2.putText(self.debug_frame,"HUE="+str(int(colorHue)), (int(buoy.centerx), int(buoy.centery-20)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) # ^^^ end color detection # debug frames self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) #self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(adaptive_frame) #svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv) # generate vision output self.output.buoys = [] if self.confirmed is not None and len(self.confirmed) > 0: for buoy in self.confirmed: buoy.theta = buoy.centerx #<- a rough approximation buoy.phi = buoy.centery #<- a rough approximation buoy.id = buoy.id self.output.buoys.append(buoy) # publish output #print ("%d buoys currently confirmed." % len(self.confirmed)) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame): numpy_frame = libvision.cv_to_cv2(frame) svr.debug("Original", frame) numpy_frame = cv2.medianBlur(numpy_frame, 7) debug_frame = numpy_frame.copy() numpy_frame = cv2.cvtColor(numpy_frame, cv2.COLOR_BGR2HSV) (h, s, v) = cv2.split(numpy_frame) binary = h binary = cv2.adaptiveThreshold(binary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) # Morphology kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) binary = cv2.dilate(binary, kernel) binary = cv2.dilate(binary, kernel) # Hough Transform raw_lines = cv2.HoughLinesP(binary, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, minLineLength=50, maxLineGap=10) if raw_lines is None: raw_lines = [] else: raw_lines = raw_lines[0] def slope(line): """ Determine the slope [in degrees] of a line """ (x1, y1, x2, y2) = line p1 = (x1, y1) p2 = (x2, y2) leftx = min(x1, x2) if p1[0] == leftx: left = p1 right = p2 else: right = p1 left = p2 slope = (right[1]-left[1]) / (right[0]-left[0]) return slope def angle(line): sl = slope(line) return math.degrees(math.atan2(sl, 1)) def length(line): (x1, y1, x2, y2) = line return ((x2-x1)**2 + (y2-y1)**2) ** .5 def center(line): """ Determine the center of a line """ (x1, y1, x2, y2) = line p1 = (x1, y1) p2 = (x2, y2) leftx = min(x1, x2) if p1[0] == leftx: left = p1 right = p2 else: right = p1 left = p2 centerx = int(left[0] + length(line)/2*math.cos(math.atan2(slope(line), 1))) centery = int(left[1] + length(line)/2*math.sin(math.atan2(slope(line), 1))) return (centerx, centery) def is_vertical(line): return 60 <= abs(angle(line)) <= 90 def is_horizontal(line): return 0 <= abs(angle(line)) <= 30 def get_avg_endpoints(lines): lefts = [] rights = [] for line in lines: (x1, y1, x2, y2) = line p1 = (x1, y1) p2 = (x2, y2) leftx = min(x1, x2) if p1[0] == leftx: left = p1 right = p2 else: right = p1 left = p2 lefts.append(left) rights.append(right) return (average_pts(lefts), average_pts(rights)) def get_med_endpoints(lines): lefts = [] rights = [] for line in lines: (x1, y1, x2, y2) = line p1 = (x1, y1) p2 = (x2, y2) leftx = min(x1, x2) if p1[0] == leftx: left = p1 right = p2 else: right = p1 left = p2 lefts.append(left) rights.append(right) return (bad_median(lefts, .25), bad_median(rights, .75)) def average_pts(pts): num = len(pts) if num == 0: return None avg_x = sum(x for (x, y) in pts) / num avg_y = sum(y for (x, y) in pts) / num return (int(avg_x), int(avg_y)) def median_pts(pts): num = len(pts) if num == 0: return None pts = sorted(pts, key=lambda x: x[0]) return pts[num//2] def bad_median(pts, val=.5): num = len(pts) if num == 0: return None pts = sorted(pts, key=lambda x: x[0]) return pts[int(num*val)] def get_normal_vec(line): sl = slope(line) return line h_lines = [] v_lines = [] for line in raw_lines: if is_horizontal(line): h_lines.append(line) elif is_vertical(line): v_lines.append(line) else: (x1, y1, x2, y2) = line cv2.line(debug_frame, (x1, y1), (x2, y2), (0, 255, 0), 2) if h_lines: self.seen_crossbar = False self.crossbar_depth = None for line in h_lines: (x1, y1, x2, y2) = line cv2.line(debug_frame, (x1, y1), (x2, y2), (0, 0, 255), 2) for line in v_lines: (x1, y1, x2, y2) = line cv2.line(debug_frame, (x1, y1), (x2, y2), (255, 0, 0), 2) h_centers = [center(line) for line in h_lines] v_centers = sorted([center(line) for line in v_lines], key=lambda x: x[0]) h_avg_center = median_pts(h_centers) v_avg_center = average_pts(v_centers) if h_avg_center: cv2.circle(debug_frame, h_avg_center, 5, (0, 0, 0), -1) if v_avg_center: cv2.circle(debug_frame, v_avg_center, 5, (0, 0, 0), -1) split_pt = None for i in range(len(v_centers)): if i < len(v_centers)-1 and v_centers[i+1][0] - v_centers[i][0] > 40: split_pt = i+1 break left_pole_center = None right_pole_center = None if split_pt: left_centers = v_centers[:split_pt] right_centers = v_centers[split_pt:] avg_left = average_pts(left_centers) avg_right = average_pts(right_centers) left_pole_center = avg_left right_pole_center = avg_right elif v_avg_center and h_avg_center and h_avg_center[0] - v_avg_center[0] > 60: left_pole_center = v_avg_center cv2.circle(debug_frame, v_avg_center, 5, (0, 0, 0), -1) elif v_avg_center and h_avg_center and h_avg_center[0] - v_avg_center[0] < -60: right_pole_center = v_avg_center cv2.circle(debug_frame, v_avg_center, 5, (0, 0, 0), -1) else: avg_endpoints = get_med_endpoints(h_lines) lefts = avg_endpoints[0] rights = avg_endpoints[1] if lefts: cv2.circle(debug_frame, lefts, 5, (0, 0, 0), -1) left_pole_center = (lefts[0], lefts[1] - 80) if rights: cv2.circle(debug_frame, rights, 5, (0, 0, 0), -1) right_pole_center = (rights[0], rights[1] - 80) if left_pole_center: self.left_pole = left_pole_center[0] cv2.circle(debug_frame, left_pole_center, 5, (0, 0, 0), -1) if right_pole_center: self.right_pole = right_pole_center[0] cv2.circle(debug_frame, right_pole_center, 5, (0, 0, 0), -1) # median_slope_h = np.median(list(slope(line) for line in h_lines)) # average_slope_v = None if len(v_lines) == 0 else sum(slope(line) for line in v_lines) / len(v_lines) # center_horiz = # points = [] # for x1, y1, x2, y2 in raw_lines: # points.append((x1, y1)) # points.append((x2, y2)) # if points: # rect = cv2.minAreaRect(np.array(points)) # box = cv2.cv.BoxPoints(rect) # box = np.int0(box) # # test aspect ratio & area, create bin if matches # (x, y), (w, h), theta = rect # cv2.drawContours(debug_frame, [box], 0, (0, 0, 255), 2) binary = libvision.cv2_to_cv(binary) svr.debug("Binary", binary) debug_frame = libvision.cv2_to_cv(debug_frame) svr.debug("Debug", debug_frame)
def process_frame(self, frame): # Get Channels hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) grey = libvision.misc.get_channel(hsv, 2) # load a haar classifier #hc = cv.Load("/home/seawolf/software/seawolf5/vision/output.xml") hc = cv.Load(os.path.join(os.path.dirname(os.path.dirname(__file__)), "buoy_cascade_4.xml")) #hc = cv.Load("/home/seawolf/software/seawolf5/vision/buoy_cascade_4.xml") # use classifier to detect buoys minsize = (int(self.minsize), int(self.minsize)) maxsize = (int(self.maxsize), int(self.maxsize)) buoys = cv.HaarDetectObjects(grey, hc, cv.CreateMemStorage(), min_size=minsize) # compute average buoy size and extract to a list avg_w = 0 for (x, y, w, h), n in buoys: # create a buoy class for this new buoys new_buoy = self.new_buoy(x, y, w) # make note of size avg_w = avg_w + w # add this buoy to our list of new buoys self.new.append(new_buoy) # update search size based on sizes found this frame if buoys: avg_w = avg_w / len(buoys) self.minsize = int(avg_w * .7) self.maxsize = int(avg_w * 1.3) # determine color of new buoys (if possible) libvision.buoy_analyzer(frame, self.new) # sort these new buoys into appropriate tier self.sort_buoys() # must be called every frame for upkeep ####### DEBUG ####### if self.debug: # display confirmed buoys for confirmed in self.confirmed: x = confirmed.x y = confirmed.y w = confirmed.width # draw rectangles on frame cv.Rectangle(frame, (x, y), (x + w, y + w), confirmed.debug_color, thickness=6) cv.Rectangle(frame, (x, y), (x + w, y + w), COLORS[confirmed.color], thickness=-1) # show debug frame svr.debug("BuoyTest", frame) ####### END DEBUG ####### self.output.buoys = self.confirmed for buoy in self.output.buoys: buoy.theta = (buoy.x - frame.width / 2) * 37 / (frame.width / 2) buoy.phi = -1 * (buoy.y - frame.height / 2) * 36 / (frame.height / 2) self.return_output()
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh ) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) # #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel2) self.adaptive_frame = self.numpy_frame.copy() # # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.raw_led = [] self.raw_buoys = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True) center, radius = cv2.minEnclosingCircle(cnt) x, y = center if len(approx) > 10: if (radius > 17): new_buoy = Buoy(int(x), int(y), int(radius)) new_buoy.id = self.recent_id self.recent_id += 1 self.raw_buoys.append(new_buoy) cv2.drawContours(self.numpy_frame, [cnt], 0, (0, 0, 255), -1) for buoy in self.raw_buoys: self.match_buoys(buoy) self.sort_buoys() self.draw_buoys() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv)
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) inv_res_ratio = 2 center_sep = 100 upper_canny_thresh = 40 # 40 acc_thresh = 10 # 20, 50 with green settings min_radius = 3 max_radius = 50 # Debug numpy is CV2 debug_frame = libvision.cv_to_cv2(frame) svr.debug("original", frame) # CV2 Transforms numpy_frame = debug_frame.copy() numpy_frame = cv2.medianBlur(numpy_frame, 5) numpy_frame = cv2.cvtColor(numpy_frame, cv2.COLOR_BGR2HSV) # Kernel for erosion/dilation kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) # Split into HSV frames (h_frame, s_frame, v_frame) = cv2.split(numpy_frame) # Run inverse adaptive thresh on saturation channel s_adapt_thresh = cv2.adaptiveThreshold(s_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 47, 10) # Erode and dilate the value frame s_eroded = cv2.erode(s_adapt_thresh, kernel) s_dilated = cv2.dilate(s_eroded, kernel) # Threshold the value frame _, v_thresh = cv2.threshold(v_frame, 250, 255, cv2.THRESH_BINARY) # Erode and dilate the value frame v_eroded = cv2.erode(v_thresh, kernel) v_dilated = cv2.dilate(v_eroded, kernel) s_contours = s_dilated.copy() # Find contours on the dilated saturation channel s_cnt, hy = cv2.findContours( s_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) v_contours = v_dilated.copy() # Find contours on the dilated v_cnt, hy = cv2.findContours( v_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) if len(s_contours) > 0: cv2.drawContours(s_contours, s_cnt, -1, (255, 255, 255), 3) if len(v_contours) > 0: cv2.drawContours(v_contours, v_cnt, -1, (255, 255, 255), 3) s_circles = cv2.HoughCircles( s_contours, cv2.cv.CV_HOUGH_GRADIENT, inv_res_ratio, center_sep, np.array([]), upper_canny_thresh, acc_thresh, min_radius, max_radius, ) v_circles = cv2.HoughCircles( v_contours, cv2.cv.CV_HOUGH_GRADIENT, inv_res_ratio, center_sep, np.array([]), upper_canny_thresh, acc_thresh, min_radius, max_radius, ) for circle in s_circles[0]: (x, y, radius) = circle cv2.circle(debug_frame, (int(x), int(y)), int(radius) + 10, (0, 255, 0), 5) # for circle in v_circles[0]: # (x, y, radius) = circle # cv2.circle(debug_frame, (int(x), int(y)), int(radius) + 10, (0, 0, 255), 5) # debug_to_cv = libvision.cv2_to_cv(v_circles) # svr.debug("v_frame", debug_to_cv) # debug_to_cv = libvision.cv2_to_cv(s_circles) # svr.debug("s_frame", debug_to_cv) debug_to_cv = libvision.cv2_to_cv(debug_frame) svr.debug("debug_frame", debug_to_cv)
def process_frame(self, frame): if self.path_manager.start_angle is None: self.path_manager.start_angle = get_yaw() self.output.found = False cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Use RGB color finder binary = libvision.cmodules.target_color_rgb.find_target_color_rgb( frame, 250, 125, 0, 1500, 500, .3) color_filtered = cv.CloneImage(binary) blob_map = cv.CloneImage(binary) blobs = libvision.blob.find_blobs(binary, blob_map, min_blob_size=50, max_blobs=10) if not blobs: return binary = cv.CloneImage(blob_map) mapping = [0] * 256 for blob in blobs: mapping[blob.id] = 255 libvision.greymap.greymap(blob_map, binary, mapping) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) lines = lines[:self.lines_to_consider] # Limit number of lines if not lines: return paths = self.path_manager.process(lines, blobs) if paths and not self.path: # If path[1] is clockwise of paths[0] distance = circular_distance(paths[0].angle, paths[1].angle) print print "Distance: ", distance print paths[0].theta, paths[0].angle print paths[1].theta, paths[1].angle if distance > 0: self.path = paths[self.which_path] else: self.path = paths[1 - self.which_path] print self.path.angle, self.path.theta print if paths and self.path in paths and self.path.blobs: temp_map = cv.CloneImage(blob_map) mapping = [0] * 256 for blob in self.path.blobs: mapping[blob.id] = 255 libvision.greymap.greymap(blob_map, temp_map, mapping) center = self.find_centroid(temp_map) svr.debug("map", temp_map) self.path.center = (center[0] - (frame.width / 2), -center[1] + (frame.height / 2)) self.output.found = True self.output.theta = self.path.theta self.output.x = self.path.center[0] / (frame.width / 2) self.output.y = self.path.center[1] / (frame.height / 2) print self.output if self.debug: # Show color filtered color_filtered_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(color_filtered, color_filtered_rgb, cv.CV_GRAY2RGB) cv.SubS(color_filtered_rgb, (255, 0, 0), color_filtered_rgb) cv.Sub(frame, color_filtered_rgb, frame) # Show edges binary_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(binary, binary_rgb, cv.CV_GRAY2RGB) cv.Add(frame, binary_rgb, frame) # Add white to edge pixels cv.SubS(binary_rgb, (0, 0, 255), binary_rgb) cv.Sub(frame, binary_rgb, frame) # Remove all but Red theta = math.radians( circular_distance(self.path_manager.start_angle, get_yaw())) if theta < 0: scale = math.cos(-2 * theta) theta = pi + theta libvision.misc.draw_lines( frame, [((-frame.width / 2) * scale, theta)]) else: libvision.misc.draw_lines(frame, [(frame.width / 2, theta)]) # Show lines if self.output.found: rounded_center = ( int(round(center[0])), int(round(center[1])), ) cv.Circle(frame, rounded_center, 5, (0, 255, 0)) libvision.misc.draw_lines(frame, [(frame.width / 2, self.path.theta)]) else: libvision.misc.draw_lines(frame, lines) svr.debug("Path", frame) self.return_output()
def process_frame(self, frame): self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) self.test_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, self.debug_frame) cv.Copy(frame, self.test_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) # Adaptive Threshold cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) cv.CvtColor(binary, self.debug_frame, cv.CV_GRAY2RGB) # Find Corners temp1 = cv.CreateImage(cv.GetSize(frame), 8, 1) temp2 = cv.CreateImage(cv.GetSize(frame), 8, 1) self.corners = cv.GoodFeaturesToTrack( binary, temp1, temp2, self.max_corners, self.quality_level, self.min_distance, None, self.good_features_blocksize, 0, 0.4, ) # Display Corners for corner in self.corners: corner_color = (0, 0, 255) text_color = (0, 255, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.6, 0.6, 0, 1, 1) cv.Circle(self.debug_frame, (int(corner[0]), int(corner[1])), 15, corner_color, 2, 8, 0) # Find Candidates for confirmed in self.confirmed: confirmed.corner1_repl_check = 0 confirmed.corner2_repl_check = 0 confirmed.corner3_repl_check = 0 confirmed.corner4_repl_check = 0 for corner in self.corners: if ( math.fabs(confirmed.corner1[0] - corner[0]) < self.MaxCornerTrans and math.fabs(confirmed.corner1[1] - corner[1]) < self.MaxCornerTrans ): confirmed.corner1_repl_check = 1 confirmed.corner1_repl = corner elif ( math.fabs(confirmed.corner2[0] - corner[0]) < self.MaxCornerTrans and math.fabs(confirmed.corner2[1] - corner[1]) < self.MaxCornerTrans ): confirmed.corner2_repl_check = 1 confirmed.corner2_repl = corner elif ( math.fabs(confirmed.corner3[0] - corner[0]) < self.MaxCornerTrans and math.fabs(confirmed.corner3[1] - corner[1]) < self.MaxCornerTrans ): confirmed.corner3_repl_check = 1 confirmed.corner3_repl = corner elif ( math.fabs(confirmed.corner4[0] - corner[0]) < self.MaxCornerTrans and math.fabs(confirmed.corner4[1] - corner[1]) < self.MaxCornerTrans ): confirmed.corner4_repl_check = 1 confirmed.corner4_repl = corner if ( confirmed.corner4_repl_check == 1 and confirmed.corner3_repl_check == 1 and confirmed.corner2_repl_check == 1 and confirmed.corner1_repl_check == 1 ): confirmed.corner1 = confirmed.corner1_repl confirmed.corner2 = confirmed.corner2_repl confirmed.corner3 = confirmed.corner3_repl confirmed.corner4 = confirmed.corner4_repl confirmed.midx = rect_midpointx( confirmed.corner1, confirmed.corner2, confirmed.corner3, confirmed.corner4 ) confirmed.midy = rect_midpointy( confirmed.corner1, confirmed.corner2, confirmed.corner3, confirmed.corner4 ) if confirmed.last_seen < self.last_seen_max: confirmed.last_seen += 5 for corner1 in self.corners: for corner2 in self.corners: for corner3 in self.corners: for corner4 in self.corners: # Checks that corners are not the same and are in the proper orientation if ( corner4[0] != corner3[0] and corner4[0] != corner2[0] and corner4[0] != corner1[0] and corner3[0] != corner2[0] and corner3[0] != corner1[0] and corner2[0] != corner1[0] and corner4[1] != corner3[1] and corner4[1] != corner2[1] and corner4[1] != corner1[1] and corner3[1] != corner2[1] and corner3[1] != corner1[1] and corner2[1] != corner1[1] and corner2[0] >= corner3[0] and corner1[1] >= corner4[1] and corner2[0] >= corner1[0] ): # Checks that the side ratios are correct if ( math.fabs(line_distance(corner1, corner3) - line_distance(corner2, corner4)) < self.size_threshold and math.fabs(line_distance(corner1, corner2) - line_distance(corner3, corner4)) < self.size_threshold and math.fabs(line_distance(corner1, corner3) / line_distance(corner1, corner2)) < self.ratio_threshold or math.fabs(line_distance(corner1, corner2) / line_distance(corner1, corner3)) < self.ratio_threshold ): # Checks that angles are roughly 90 degrees angle_cnr_2 = math.fabs( angle_between_lines(line_slope(corner1, corner2), line_slope(corner2, corner4)) ) if self.angle_min < angle_cnr_2 < self.angle_max: angle_cnr_3 = math.fabs( angle_between_lines(line_slope(corner1, corner3), line_slope(corner3, corner4)) ) if self.angle_min2 < angle_cnr_3 < self.angle_max2: new_bin = Bin(corner1, corner2, corner3, corner4) self.match_bins(new_bin) self.sort_bins() """ #START SHAPE PROCESSING #TODO load these ONCE somewhere samples = np.loadtxt('generalsamples.data',np.float32) responses = np.loadtxt('generalresponses.data',np.float32) responses = responses.reshape((responses.size,1)) model = cv2.KNearest() model.train(samples,responses) for bin in self.confirmed: try: bin.speedlimit except: continue transf = cv.CreateMat(3, 3, cv.CV_32FC1) corner_orders = [ [bin.corner1, bin.corner2, bin.corner3, bin.corner4], #0 degrees [bin.corner4, bin.corner3, bin.corner2, bin.corner1], #180 degrees [bin.corner2, bin.corner4, bin.corner1, bin.corner3], #90 degrees [bin.corner3, bin.corner1, bin.corner4, bin.corner2], #270 degrees [bin.corner3, bin.corner4, bin.corner1, bin.corner2], #0 degrees and flipped X [bin.corner2, bin.corner1, bin.corner4, bin.corner3], #180 degrees and flipped X [bin.corner1, bin.corner3, bin.corner2, bin.corner4], #90 degrees and flipped X [bin.corner4, bin.corner2, bin.corner3, bin.corner1]] #270 degrees andf flipped X for i in range(0, 8): cv.GetPerspectiveTransform( corner_orders[i], [(0, 0), (0, 256), (128, 0), (128, 256)], transf ) shape = cv.CreateImage([128, 256], 8, 3) cv.WarpPerspective(frame, shape, transf) shape_thresh = np.zeros((256-104,128,1), np.uint8) j = 104 while j<256: i = 0 while i<128: pixel = cv.Get2D(shape, j, i) if int(pixel[2]) > (int(pixel[1]) + int(pixel[0])) * 0.7: shape_thresh[j-104,i] = 255 else: shape_thresh[j-104,i] = 0 i = i+1 j = j+1 cv2.imshow("Bin " + str(i), shape_thresh) contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) for cnt in contours: if cv2.contourArea(cnt)>50: [x,y,w,h] = cv2.boundingRect(cnt) if h>54 and w>36: roi = thresh[y:y+h,x:x+w] roismall = cv2.resize(roi,(10,10)) roismall = roismall.reshape((1,100)) roismall = np.float32(roismall) retval, results, neigh_resp, dists = model.find_nearest(roismall, k = 1) digit_tuples.append( (x, int((results[0][0]))) ) if len(digit_tuples) == 2: digit_tuples_sorted = sorted(digit_tuples, key=lambda digit_tuple: digit_tuple[0]) speedlimit = 0 for i in range(0, len(digit_tuples_sorted)): speedlimit = speedlimit * 10 + digit_tuples_sorted[i][1] bin.speedlimit = speedlimit print "Found speed limit: " + str(speedlimit) break else: print "Unable to determine speed limit" #... TODO more #END SHAPE PROCESSING """ svr.debug("Bins", self.debug_frame) svr.debug("Bins2", self.test_frame) # Output bins self.output.bins = self.confirmed anglesum = 0 for bins in self.output.bins: bins.theta = (bins.midx - frame.width / 2) * 37 / (frame.width / 2) bins.phi = -1 * (bins.midy - frame.height / 2) * 36 / (frame.height / 2) bins.shape = bins.object anglesum += bins.angle # bins.orientation = bins.angle if len(self.output.bins) > 0: self.output.orientation = anglesum / len(self.output.bins) else: self.output.orientation = None self.return_output()
def process_frame(self, frame): self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) og_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, self.debug_frame) cv.Copy(self.debug_frame, og_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 3) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) # Get Edges #cv.Canny(binary, binary, 30, 40) cv.CvtColor(binary, self.debug_frame, cv.CV_GRAY2RGB) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_PROBABILISTIC, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=self.min_length, param2=self.max_gap ) lines = [] corners = [] for line in raw_lines: lines.append(line) #Grouping lines depending on endpoint simularities for line1 in lines[:]: for line2 in lines[:]: if line1 in lines and line2 in lines and line1 != line2: if (math.fabs(line1[0][0] - line2[0][0]) < self.max_corner_range and math.fabs(line1[0][1] - line2[0][1]) < self.max_corner_range and math.fabs(line1[1][0] - line2[1][0]) < self.max_corner_range and math.fabs(line1[1][1] - line2[1][1]) < self.max_corner_range): if line_distance(line1[0], line1[1]) > line_distance(line2[0], line2[1]): lines.remove(line2) else: lines.remove(line1) elif (math.fabs(line1[0][0] - line2[1][0]) < self.max_corner_range and math.fabs(line1[0][1] - line2[1][1]) < self.max_corner_range and math.fabs(line1[1][0] - line2[0][0]) < self.max_corner_range and math.fabs(line1[1][1] - line2[0][1]) < self.max_corner_range): if line_distance(line1[0], line1[1]) > line_distance(line2[0], line2[1]): lines.remove(line2) else: lines.remove(line1) for line in lines: corners.append(line[0]) corners.append(line[1]) for corner1 in corners: for corner2 in corners: for corner3 in corners: for corner4 in corners: #Checks that corners are not the same and are in the proper orientation if corner4[0] != corner3[0] and corner4[0] != corner2[0] and corner4[0] != corner1[0] and \ corner3[0] != corner2[0] and corner3[0] != corner1[0] and corner2[0] != corner1[0] and \ corner4[1] != corner3[1] and corner4[1] != corner2[1] and corner4[1] != corner1[1] and \ corner3[1] != corner2[1] and corner3[1] != corner1[1] and corner2[1] != corner1[1] and \ corner2[0] >= corner3[0] and corner1[1] >= corner4[1] and corner2[0] >= corner1[0] and \ math.fabs(corner1[0] - corner4[0]) > self.min_corner_distance and \ math.fabs(corner1[1] - corner4[1]) > self.min_corner_distance and \ math.fabs(corner2[0] - corner3[0]) > self.min_corner_distance and \ math.fabs(corner2[1] - corner3[1]) > self.min_corner_distance: #Checks that the side ratios are correct if math.fabs(line_distance(corner1, corner3) - line_distance(corner2, corner4)) < self.size_threshold and \ math.fabs(line_distance(corner1, corner2) - line_distance(corner3, corner4)) < self.size_threshold and \ math.fabs(line_distance(corner1, corner3) / line_distance(corner1, corner2)) < .5 * self.ratio_threshold: #^^^ CHANGED OR TO AND --> DID MUCH BETTER. CONSIDER CHANGING ON BINSCORNER #Checks that angles are roughly 90 degrees angle_cnr_2 = math.fabs(angle_between_lines(line_slope(corner1, corner2), line_slope(corner2, corner4))) if self.angle_min < angle_cnr_2 < self.angle_max: angle_cnr_3 = math.fabs(angle_between_lines(line_slope(corner1, corner3), line_slope(corner3, corner4))) if self.angle_min2 < angle_cnr_3 < self.angle_max2: new_box = Pizza(corner1, corner2, corner3, corner4) self.match_Boxes(new_box) self.min_perimeter = 500000 for Box in self.Boxes[:]: Box.lastseen -= 2 if Box.lastseen < 0: self.Boxes.remove(Box) if math.fabs(line_distance(Box.corner1, Box.corner3) * 2 + math.fabs(line_distance(Box.corner1, Box.corner2) * 2) - self.min_perimeter) > self.min_perimeter * self.perimeter_threshold and \ line_distance(Box.corner1, Box.corner3) * 2 + line_distance(Box.corner1, Box.corner2) * 2 > self.min_perimeter: print "perimeter error (this is a good thing)" print math.fabs(line_distance(Box.corner1, Box.corner3) * 2 + math.fabs(line_distance(Box.corner1, Box.corner2) * 2) - self.min_perimeter), "is greater than", self.min_perimeter * self.perimeter_threshold self.draw_pizza() for line in lines: cv.Circle(self.debug_frame, line[0], 15, (255, 0, 0), 2, 8, 0) cv.Circle(self.debug_frame, line[1], 15, (255, 0, 0), 2, 8, 0) self.output.pizza = self.Boxes anglesum = 0 for Box in self.Boxes: Box.theta = (Box.center[0] - frame.width / 2) * 37 / (frame.width / 2) Box.phi = -1 * (Box.center[1] - frame.height / 2) * 36 / (frame.height / 2) anglesum += Box.angle if len(self.output.pizza) > 0: self.output.orientation = anglesum / len(self.output.pizza) else: self.output.orientation = None self.return_output() svr.debug("Pizza", self.debug_frame) svr.debug("Original", og_frame)
def process_frame(self, frame): (w,h) = cv.GetSize(frame) #generate hue selection frames #create locations for the a pair of test frames frametest = cv.CreateImage(cv.GetSize(frame), 8, 3) binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1) #use the red channel for the binary frame (just for debugging purposes) cv.Copy(frame, frametest) cv.SetImageCOI(frametest, 3) cv.Copy(frametest, binarytest) cv.SetImageCOI(frametest, 0) #reset COI #svr.debug("R?",binarytest) # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_gate = False #create a new frame just for comparison purposes unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame,unchanged_frame) #apply a course noise filter cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) #reset COI #shift hue of image such that orange->red are at top of spectrum binary = libvision.misc.cv_to_cv2(binary) binary = libvision.misc.shift_hueCV2(binary, self.target_shift) binary = libvision.misc.cv2_to_cv(binary) #correct for wraparound on red spectrum #cv.InRange(binary,a_array,b_array,binarytest) #generate mask #cv.Add(binary,cv.fromarray(ones*180),binary,mask=binarytest) #use mask to selectively add values #svr.debug("R2?",binary) svr.debug("R2?",binary) #run adaptive threshold for edge detection and more noise filtering cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi-self.vertical_threshold: #absolute value does better grouping currently vertical_lines.append((abs(line[0]), line[1])) #print message to user for performance purposes logging.debug("{} possibilities reduced to {} lines".format( len(raw_lines), len(vertical_lines) )) # Group vertical lines vertical_line_groups = [] # A list of line groups which are each a line list i = 0 for line in vertical_lines: group_found = False for line_group in vertical_line_groups: i += 1 if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) #quick debugging statement logging.debug("{} internal iterations for {} groups".format(i, len(vertical_line_groups))) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) #################################################### #vvvv Horizontal line code isn't used for anything # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi/2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append( (abs(line[0]), line[1]) ) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True if self.debug: rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] #^^^ Horizontal line code isn't used for anything ################################################### self.left_pole = None self.right_pole = None #print vertical_lines self.returning = 0 self.found = False if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.returning = (self.left_pole + self.right_pole)/2 logging.info("Returning {} as gate center delta.".format(self.returning)) #initialize first iteration with 2 known poles if self.last_seen < 0: self.last_center = None self.last_seen = 0 #increment a counter if result is good. if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 #if not conviced, forget left/right pole. Else proclaim success. if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_pole = None #TODO: If one pole is seen, is it left or right pole? if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) libvision.misc.draw_lines(frame, horizontal_lines) if self.found: cv.Circle(frame, (int(frame.width/2 + self.returning), int(frame.height/2)), 15, (0, 255,0), 2, 8, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3) cv.PutText(frame, "Gate Sent to Mission Control", (100, 400) , font, (255, 255, 0)) #print frame.width #cv.ShowImage("Gate", cv.CloneImage(frame)) svr.debug("Gate", cv.CloneImage(frame)) svr.debug("Unchanged",cv.CloneImage(unchanged_frame)) #populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole self.return_output()
def process_frame(self, frame): # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) self.hsv_frame = self.numpy_frame (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold( self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel2) self.adaptive_frame = self.numpy_frame.copy() self.numpy_frame = cv2.Canny(self.adaptive_frame, 100, 250, apertureSize=3) self.raw_circles = [] self.raw_buoys = [] self.raw_circles = cv2.HoughCircles( self.numpy_frame, cv2.cv.CV_HOUGH_GRADIENT, self.inv_res_ratio, self.center_sep, np.array([]), self.upper_canny_thresh, self.acc_thresh, self.min_radius, self.max_radius, ) if self.raw_circles is not None and len(self.raw_circles[0] > 0): for circle in self.raw_circles[0]: (x, y, radius) = circle new_buoy = Buoy(x, y, radius, "unknown", self.next_id) self.next_id += 1 self.raw_buoys.append(new_buoy) self.match_buoys(new_buoy) self.sort_buoys() if self.confirmed is not None and len(self.confirmed) > 0: for buoy in self.confirmed: cv2.circle(self.debug_frame, (int(buoy.centerx), int(buoy.centery)), int(buoy.radius) + 10, (255, 255, 255), 5) colorHue = self.hsv_frame[buoy.centery + buoy.radius / 2, buoy.centerx][0] if (colorHue >= 0 and colorHue < 45) or colorHue >= 300: cv2.putText(self.debug_frame, str(buoy.id) + "RED", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255)) buoy.color = "red" elif (colorHue >= 70 and colorHue < 180): cv2.putText(self.debug_frame, str(buoy.id) + "GRE", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255)) if buoy.color != "red" and buoy.color != "yellow": print "switched from ", buoy.color buoy.color = "green" else: cv2.putText(self.debug_frame, str(buoy.id) + "YEL", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255)) buoy.color = "yellow" self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv) # Convert to output format self.output.buoys = [] if self.confirmed is not None and len(self.confirmed) > 0: for buoy in self.confirmed: buoy.theta = buoy.centerx buoy.phi = buoy.centery buoy.id = buoy.id self.output.buoys.append(buoy) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame): # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.buoy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) self.buoy_frame = cv2.erode(self.numpy_frame, kernel) self.buoy_frame = cv2.dilate(self.numpy_frame, kernel2) self.buoy_adaptive = self.buoy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.raw_led = [] self.raw_buoys = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True) center, radius = cv2.minEnclosingCircle(cnt) x, y = center if len(approx) > 12: if (radius > 30): new_buoy = Buoy(int(x), int(y), int(radius), "unknown") new_buoy.id = self.recent_id self.recent_id += 1 self.raw_buoys.append(new_buoy) cv2.drawContours(self.numpy_frame, [cnt], 0, (0, 0, 255), -1) self.raw_buoys.append(new_buoy) for buoy1 in self.raw_buoys[:]: for buoy2 in self.raw_buoys[:]: if buoy1 is buoy2: continue if buoy1 in self.raw_buoys and buoy2 in self.raw_buoys and \ math.fabs(buoy1.centerx - buoy2.centerx) > self.mid_sep and \ math.fabs(buoy1.centery - buoy2.centery) > self.mid_sep: if buoy1.area < buoy2.area: self.raw_buoys.remove(buoy1) elif buoy2.area < buoy1.area: self.raw_buoys.remove(buoy2) for buoy in self.raw_buoys: self.match_buoys(buoy) self.sort_buoys() self.draw_buoys() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv)
def process_frame(self, frame): # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_hedge = False test_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, test_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have value channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 2) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_ELLIPSE) #cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 4) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges #cv.Canny(binary, binary, 30, 40) # Hough Transform ''' line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) ''' # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_PROBABILISTIC, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=self.min_length, param2=self.max_gap ) self.hor_lines = [] for line in raw_lines: slope = line_slope(line[0], line[1]) if slope is None: continue if math.fabs(line_slope(line[0], line[1])) < self.hor_threshold: self.hor_lines.append(line) max_length = 0 for line in self.hor_lines: print line if math.fabs(line_distance(line[0], line[1])) > max_length: max_length = math.fabs(line_distance(line[0], line[1])) crossbar_seg = line ''' # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi-self.vertical_threshold: vertical_lines.append( (abs(line[0]), line[1]) ) # Group vertical lines vertical_line_groups = [] # A list of line groups which are each a line list for line in vertical_lines: group_found = False for line_group in vertical_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi/2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append( (abs(line[0]), line[1]) ) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 #TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / tan(radians(theta/2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1*horizontal_lines[0][0] + frame.height/2) / (frame.height/2) * 32 self.crossbar_depth = self.r * atan(radians(bar_phi)) else: self.crossbar_depth = None ''' self.left_pole = None self.right_pole = None self.seen_crossbar = False self.crossbar_depth = None if self.debug and max_length != 0: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) #libvision.misc.draw_lines(frame, vertical_lines) #libvision.misc.draw_lines(frame, horizontal_lines) # for line in raw_lines: # cv.Line(frame,line[0],line[1], (255,255,0), 10, cv.CV_AA, 0) # cv.Circle(frame, line[1], 15, (255,0,0), 2,8,0) # print len(raw_lines) #cv.ShowImage("Hedge", cv.CloneImage(frame)) if (crossbar_seg[0][0] - frame.width / 2) * 37 / (frame.width / 2) < (crossbar_seg[1][0] - frame.width / 2) * 37 / (frame.width / 2): self.left_pole = round((crossbar_seg[0][0] - frame.width / 2) * 37 / (frame.width / 2)) self.right_pole = round((crossbar_seg[1][0] - frame.width / 2) * 37 / (frame.width / 2)) else: self.left_pole = round((crossbar_seg[1][0] - frame.width / 2) * 37 / (frame.width / 2)) self.right_pole = round((crossbar_seg[0][0] - frame.width / 2) * 37 / (frame.width / 2)) self.crossbar_depth = round(-1 * (crossbar_seg[1][1] - frame.height / 2) * 36 / (frame.height / 2)) if math.fabs(self.left_pole) <= 37 and math.fabs(self.left_pole) >= self.frame_boundary_thresh: self.left_pole = None if math.fabs(self.right_pole) <= 37 and math.fabs(self.right_pole) >= self.frame_boundary_thresh: self.right_pole = None self.seen_crossbar = True if self.left_pole and self.right_pole: self.returning = (self.left_pole + self.right_pole)/2 print "Returning ", self.returning if self.last_seen < 0: self.last_center = None self.last_seen = 0 if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_pole = None cv.Line(frame, crossbar_seg[0], crossbar_seg[1], (255, 255, 0), 10, cv.CV_AA, 0) if self.left_pole and crossbar_seg[0][0] < crossbar_seg[1][0] : cv.Line(frame, crossbar_seg[0], (crossbar_seg[0][0], crossbar_seg[0][0] - 500), (255, 0, 0), 10, cv.CV_AA, 0) elif self.left_pole: cv.Line(frame, crossbar_seg[1], (crossbar_seg[1][0], crossbar_seg[1][1] - 500), (255, 0, 0), 10, cv.CV_AA, 0) if self.right_pole and crossbar_seg[0][0] > crossbar_seg[1][0]: cv.Line(frame, crossbar_seg[0], (crossbar_seg[0][0], crossbar_seg[0][0] - 500), (255, 0, 0), 10, cv.CV_AA, 0) elif self.right_pole: cv.Line(frame, crossbar_seg[1], (crossbar_seg[1][0], crossbar_seg[1][1] - 500), (255, 0, 0), 10, cv.CV_AA, 0) # populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole #self.output.r = self.r self.output.crossbar_depth = self.crossbar_depth self.return_output() print self else: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) svr.debug("Hedge", cv.CloneImage(frame)) svr.debug("Hedge2", test_frame)
def process_frame(self, frame): debug_frame = cv.CreateImage(cv.GetSize(frame),8,3) cv.Copy(frame, debug_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 2) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) # Get Edges #cv.Canny(binary, binary, 30, 40) cv.CvtColor(binary, debug_frame, cv.CV_GRAY2RGB) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) line_groups = [] # A list of line groups which are each a line list for line in raw_lines: group_found = False for line_group in line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: line_groups.append([line]) # Average line groups into lines lines = [] for line_group in line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) lines.append(line) libvision.misc.draw_lines(debug_frame, raw_lines) # cv.CvtColor(color_filtered,debug_frame, cv.CV_GRAY2RGB) svr.debug("Bins", debug_frame)
def process_frame(self, frame): debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, debug_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 2) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) # Get Edges #cv.Canny(binary, binary, 30, 40) cv.CvtColor(binary, debug_frame, cv.CV_GRAY2RGB) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) line_groups = [] # A list of line groups which are each a line list for line in raw_lines: group_found = False for line_group in line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: line_groups.append([line]) # Average line groups into lines lines = [] for line_group in line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) lines.append(line) libvision.misc.draw_lines(debug_frame, raw_lines) # cv.CvtColor(color_filtered,debug_frame, cv.CV_GRAY2RGB) svr.debug("Bins", debug_frame)
def display(self): #Draws objects on frames svr.debug("Unchanged",self.default_frame)
def process_frame(self, frame): self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) self.test_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, self.debug_frame) cv.Copy(frame, self.test_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) # Adaptive Threshold cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) cv.CvtColor(binary, self.debug_frame, cv.CV_GRAY2RGB) # Find Corners temp1 = cv.CreateImage(cv.GetSize(frame), 8, 1) temp2 = cv.CreateImage(cv.GetSize(frame), 8, 1) self.corners = cv.GoodFeaturesToTrack(binary, temp1, temp2, self.max_corners, self.quality_level, self.min_distance, None, self.good_features_blocksize, 0, 0.4) # Display Corners for corner in self.corners: corner_color = (0, 0, 255) text_color = (0, 255, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, .6, .6, 0, 1, 1) cv.Circle(self.debug_frame, (int(corner[0]), int(corner[1])), 15, corner_color, 2, 8, 0) # Find Candidates for confirmed in self.confirmed: confirmed.corner1_repl_check = 0 confirmed.corner2_repl_check = 0 confirmed.corner3_repl_check = 0 confirmed.corner4_repl_check = 0 for corner in self.corners: if math.fabs(confirmed.corner1[0] - corner[0]) < self.MaxCornerTrans and \ math.fabs(confirmed.corner1[1] - corner[1]) < self.MaxCornerTrans: confirmed.corner1_repl_check = 1 confirmed.corner1_repl = corner elif math.fabs(confirmed.corner2[0] - corner[0]) < self.MaxCornerTrans and \ math.fabs(confirmed.corner2[1] - corner[1]) < self.MaxCornerTrans: confirmed.corner2_repl_check = 1 confirmed.corner2_repl = corner elif math.fabs(confirmed.corner3[0] - corner[0]) < self.MaxCornerTrans and \ math.fabs(confirmed.corner3[1] - corner[1]) < self.MaxCornerTrans: confirmed.corner3_repl_check = 1 confirmed.corner3_repl = corner elif math.fabs(confirmed.corner4[0] - corner[0]) < self.MaxCornerTrans and \ math.fabs(confirmed.corner4[1] - corner[1]) < self.MaxCornerTrans: confirmed.corner4_repl_check = 1 confirmed.corner4_repl = corner if confirmed.corner4_repl_check == 1 and confirmed.corner3_repl_check == 1 and confirmed.corner2_repl_check == 1 and confirmed.corner1_repl_check == 1: confirmed.corner1 = confirmed.corner1_repl confirmed.corner2 = confirmed.corner2_repl confirmed.corner3 = confirmed.corner3_repl confirmed.corner4 = confirmed.corner4_repl confirmed.midx = rect_midpointx(confirmed.corner1, confirmed.corner2, confirmed.corner3, confirmed.corner4) confirmed.midy = rect_midpointy(confirmed.corner1, confirmed.corner2, confirmed.corner3, confirmed.corner4) if confirmed.last_seen < self.last_seen_max: confirmed.last_seen += 5 for corner1 in self.corners: for corner2 in self.corners: for corner3 in self.corners: for corner4 in self.corners: # Checks that corners are not the same and are in the proper orientation if corner4[0] != corner3[0] and corner4[0] != corner2[0] and corner4[0] != corner1[0] and \ corner3[0] != corner2[0] and corner3[0] != corner1[0] and corner2[0] != corner1[0] and \ corner4[1] != corner3[1] and corner4[1] != corner2[1] and corner4[1] != corner1[1] and \ corner3[1] != corner2[1] and corner3[1] != corner1[1] and corner2[1] != corner1[1] and \ corner2[0] >= corner3[0] and corner1[1] >= corner4[1] and corner2[0] >= corner1[0]: # Checks that the side ratios are correct if math.fabs(line_distance(corner1, corner3) - line_distance(corner2, corner4)) < self.size_threshold and \ math.fabs(line_distance(corner1, corner2) - line_distance(corner3, corner4)) < self.size_threshold and \ math.fabs(line_distance(corner1, corner3) / line_distance(corner1, corner2)) < self.ratio_threshold or \ math.fabs(line_distance(corner1, corner2) / line_distance(corner1, corner3)) < self.ratio_threshold: # Checks that angles are roughly 90 degrees angle_cnr_2 = math.fabs(angle_between_lines(line_slope(corner1, corner2), line_slope(corner2, corner4))) if self.angle_min < angle_cnr_2 < self.angle_max: angle_cnr_3 = math.fabs(angle_between_lines(line_slope(corner1, corner3), line_slope(corner3, corner4))) if self.angle_min2 < angle_cnr_3 < self.angle_max2: new_bin = Bin(corner1, corner2, corner3, corner4) self.match_bins(new_bin) self.sort_bins() ''' #START SHAPE PROCESSING #TODO load these ONCE somewhere samples = np.loadtxt('generalsamples.data',np.float32) responses = np.loadtxt('generalresponses.data',np.float32) responses = responses.reshape((responses.size,1)) model = cv2.KNearest() model.train(samples,responses) for bin in self.confirmed: try: bin.speedlimit except: continue transf = cv.CreateMat(3, 3, cv.CV_32FC1) corner_orders = [ [bin.corner1, bin.corner2, bin.corner3, bin.corner4], #0 degrees [bin.corner4, bin.corner3, bin.corner2, bin.corner1], #180 degrees [bin.corner2, bin.corner4, bin.corner1, bin.corner3], #90 degrees [bin.corner3, bin.corner1, bin.corner4, bin.corner2], #270 degrees [bin.corner3, bin.corner4, bin.corner1, bin.corner2], #0 degrees and flipped X [bin.corner2, bin.corner1, bin.corner4, bin.corner3], #180 degrees and flipped X [bin.corner1, bin.corner3, bin.corner2, bin.corner4], #90 degrees and flipped X [bin.corner4, bin.corner2, bin.corner3, bin.corner1]] #270 degrees andf flipped X for i in range(0, 8): cv.GetPerspectiveTransform( corner_orders[i], [(0, 0), (0, 256), (128, 0), (128, 256)], transf ) shape = cv.CreateImage([128, 256], 8, 3) cv.WarpPerspective(frame, shape, transf) shape_thresh = np.zeros((256-104,128,1), np.uint8) j = 104 while j<256: i = 0 while i<128: pixel = cv.Get2D(shape, j, i) if int(pixel[2]) > (int(pixel[1]) + int(pixel[0])) * 0.7: shape_thresh[j-104,i] = 255 else: shape_thresh[j-104,i] = 0 i = i+1 j = j+1 cv2.imshow("Bin " + str(i), shape_thresh) contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) for cnt in contours: if cv2.contourArea(cnt)>50: [x,y,w,h] = cv2.boundingRect(cnt) if h>54 and w>36: roi = thresh[y:y+h,x:x+w] roismall = cv2.resize(roi,(10,10)) roismall = roismall.reshape((1,100)) roismall = np.float32(roismall) retval, results, neigh_resp, dists = model.find_nearest(roismall, k = 1) digit_tuples.append( (x, int((results[0][0]))) ) if len(digit_tuples) == 2: digit_tuples_sorted = sorted(digit_tuples, key=lambda digit_tuple: digit_tuple[0]) speedlimit = 0 for i in range(0, len(digit_tuples_sorted)): speedlimit = speedlimit * 10 + digit_tuples_sorted[i][1] bin.speedlimit = speedlimit print "Found speed limit: " + str(speedlimit) break else: print "Unable to determine speed limit" #... TODO more #END SHAPE PROCESSING ''' svr.debug("Bins", self.debug_frame) svr.debug("Bins2", self.test_frame) # Output bins self.output.bins = self.confirmed anglesum = 0 for bins in self.output.bins: bins.theta = (bins.midx - frame.width / 2) * 37 / (frame.width / 2) bins.phi = -1 * (bins.midy - frame.height / 2) * 36 / (frame.height / 2) bins.shape = bins.object anglesum += bins.angle # bins.orientation = bins.angle if len(self.output.bins) > 0: self.output.orientation = anglesum / len(self.output.bins) else: self.output.orientation = None self.return_output()
def process_frame(self, frame): self.numpy_frame = libvision.cv_to_cv2(frame) self.debug_frame = self.numpy_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (rf1, rf2, rf3) = cv2.split(self.numpy_frame) # RF2-inverted for red # RF1 for green rBinary = rf2 # rBinary = cv2.bitwise_not(rBinary) gBinary = rf1 # Adaptive Threshold rBinary = cv2.adaptiveThreshold(rBinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) gBinary = cv2.adaptiveThreshold(gBinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.Gadaptive_thresh_blocksize, self.Gadaptive_thresh) rFrame = rBinary.copy() gFrame = gBinary.copy() # Morphology kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) rBinary = cv2.erode(rBinary, kernel) rBinary = cv2.dilate(rBinary, kernel) gBinary = cv2.erode(gBinary, kernel) gBinary = cv2.dilate(gBinary, kernel) gray = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 150, 200, apertureSize=3) lines = cv2.HoughLines(edges, 1, np.pi / 180, 275) for rho, theta in lines[0]: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) # Here i have used int() instead of rounding the decimal value, so 3.8 --> 3 y1 = int(y0 + 1000 * (a)) # But if you want to round the number, then use np.around() function, then 3.8 --> 4.0 x2 = int(x0 - 1000 * (-b)) # But we need integers, so use int() function after that, ie int(np.around(x)) y2 = int(y0 - 1000 * (a)) cv2.line(self.debug_frame, (x1, y1), (x2, y2), (0, 255, 0), 2) rFrame = libvision.cv2_to_cv(rFrame) gFrame = libvision.cv2_to_cv(gFrame) self.debug_frame = libvision.cv2_to_cv(self.debug_frame) # svr.debug("Rframe", rFrame) # svr.debug("Gframe", gFrame) svr.debug("debug", self.debug_frame)