def process_frame(self, frame): ################ #setup CV ###### ################ print "processing frame" (w, h) = cv.GetSize(frame) #generate hue selection frames ones = np.ones((h, w, 1), dtype='uint8') a = ones * (180 - self.target_hue) b = ones * (180 - self.target_hue + 20) a_array = cv.fromarray(a) b_array = cv.fromarray(b) #create locations for the test frame and binary frame frametest = cv.CreateImage(cv.GetSize(frame), 8, 3) binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1) #use the red channel for the binary frame (just for debugging purposes) cv.Copy(frame, frametest) cv.SetImageCOI(frametest, 3) cv.Copy(frametest, binarytest) #reset the COI for test frame to RGB. cv.SetImageCOI(frametest, 0) # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_gate = False #create a new frame for comparison purposes unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, unchanged_frame) #apply noise filter #1 cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) #spin the color wheel (psuedo-code for later if necessary) # truncate spectrum marked as end # shift all values up based on truncating value (mask out 0 regions) # take truncated bits, and flip them (180->0, 179->1...) # dnow that truncated bits are flipped, add them back in to final image #Reset hsv COI cv.SetImageCOI(hsv, 0) #correct for wraparound on red spectrum cv.InRange(binary, a_array, b_array, binarytest) #generate mask cv.Add(binary, cv.fromarray(ones * 180), binary, mask=binarytest) #use mask to selectively add values #run adaptive threshold for edge detection cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) # Get vertical lines vertical_lines = [] i = 0 for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > (math.pi-self.vertical_threshold): #absolute value does better grouping currently vertical_lines.append((abs(line[0]), line[1])) i += 1 # print message to user for performance purposes logging.debug("{} possibilities reduced to {} lines".format( i, len(vertical_lines))) # Group vertical lines vertical_line_groups = [ ] #A list of line groups which are each a line list i = 0 for line in vertical_lines: group_found = False for line_group in vertical_line_groups: i += 1 if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) #quick debugging statement logging.debug("{} internal iterations for {} groups".format( i, len(vertical_line_groups))) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) #get rho of each line angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) self.left_pole = None self.right_pole = None self.returning = 0 self.found = False if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.returning = (self.left_pole + self.right_pole) / 2 logging.info("Returning {}".format(self.returning)) #If this is first iteration, count this as seeing the gate if self.last_seen < 0: self.last_center = None self.last_seen = 0 #increment a counter if result is good. if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 #if not convinced, forget left/right pole. Else, proclaim success. if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_POLE = None #extra debugging stuff if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) if self.found: cv.Circle(frame, (int(frame.width / 2 + self.returning), int(frame.height / 2)), 15, (0, 255, 0), 2, 8, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3) cv.PutText(frame, "Gate Sent to Mission Control", (100, 400), font, (255, 255, 0)) #print frame.width #cv.ShowImage("Gate", cv.CloneImage(frame)) svr.debug("Gate", cv.CloneImage(frame)) svr.debug("Unchanged", cv.CloneImage(unchanged_frame)) self.return_output()
def process_frame(self, frame): found_path = False cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # use RGB color finder binary = libvision.cmodules.target_color_rgb.find_target_color_rgb( frame, self.R, self.G, self.B, self.min_blob_size, self.dev_thresh, self.precision / 1000.0) if self.debug: color_filtered = cv.CloneImage(binary) svr.debug("color_filtered", color_filtered) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) lines = lines[:self.lines_to_consider] # Limit number of lines # If there are at least 2 lines and they are close to parallel... # There's a path! #print lines if len(lines) >= 2: # Find: min, max, average theta_max = lines[0][1] theta_min = lines[0][1] total_theta = 0 for rho, theta in lines: total_theta += theta if theta_max < theta: theta_max = theta if theta_min > theta: theta_min = theta theta_range = theta_max - theta_min # Near vertical angles will wrap around from pi to 0. If the range # crosses this vertical line, the range will be way too large. To # correct for this, we always take the smallest angle between the # min and max. if theta_range > math.pi / 2: theta_range = math.pi - theta_range if theta_range < self.theta_threshold: found_path = True angles = map(lambda line: line[1], lines) self.theta = circular_average(angles, math.pi) print found_path if found_path: self.seen_in_a_row += 1 else: self.seen_in_a_row = 0 # stores whether or not we are confident about the path's presence object_present = False print self.seen_in_a_row ### if self.seen_in_a_row >= self.seen_in_a_row_threshold: object_present = True self.image_coordinate_center = self.find_centroid(binary) # Move the origin to the center of the image self.center = (self.image_coordinate_center[0] - frame.width / 2, self.image_coordinate_center[1] * -1 + frame.height / 2) if self.debug: # Show color filtered color_filtered_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(color_filtered, color_filtered_rgb, cv.CV_GRAY2RGB) cv.SubS(color_filtered_rgb, (255, 0, 0), color_filtered_rgb) cv.Sub(frame, color_filtered_rgb, frame) # Show edges binary_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(binary, binary_rgb, cv.CV_GRAY2RGB) cv.Add(frame, binary_rgb, frame) # Add white to edge pixels cv.SubS(binary_rgb, (0, 0, 255), binary_rgb) cv.Sub(frame, binary_rgb, frame) # Remove all but Red # Show lines if self.seen_in_a_row >= self.seen_in_a_row_threshold: rounded_center = ( int(round(self.image_coordinate_center[0])), int(round(self.image_coordinate_center[1])), ) cv.Circle(frame, rounded_center, 5, (0, 255, 0)) libvision.misc.draw_lines(frame, [(frame.width / 2, self.theta)]) else: libvision.misc.draw_lines(frame, lines) #cv.ShowImage("Path", frame) svr.debug("Path", frame) # populate self.output with infos self.output.found = object_present self.output.theta = self.theta if self.center: # scale center coordinates of path based on frame size self.output.x = self.center[0] / (frame.width / 2) self.output.y = self.center[1] / (frame.height / 2) libvision.misc.draw_linesC(frame, [(frame.width / 2, self.output.theta)], [255, 0, 255]) print "Output Returned!!! ", self.output.theta else: self.output.x = None self.output.y = None print "No output..." if self.output.found and self.center: print self.output self.return_output()
def process_frame(self, frame): (w, h) = cv.GetSize(frame) #generate hue selection frames #create locations for the a pair of test frames frametest = cv.CreateImage(cv.GetSize(frame), 8, 3) binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1) #use the red channel for the binary frame (just for debugging purposes) cv.Copy(frame, frametest) cv.SetImageCOI(frametest, 3) cv.Copy(frametest, binarytest) cv.SetImageCOI(frametest, 0) #reset COI #svr.debug("R?",binarytest) # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_gate = False #create a new frame just for comparison purposes unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, unchanged_frame) #apply a course noise filter cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) #reset COI #shift hue of image such that orange->red are at top of spectrum ''' binary = libvision.misc.cv_to_cv2(binary) binary = libvision.misc.shift_hueCV2(binary, self.target_shift) binary = libvision.misc.cv2_to_cv(binary) ''' #correct for wraparound on red spectrum #cv.InRange(binary,a_array,b_array,binarytest) #generate mask #cv.Add(binary,cv.fromarray(ones*180),binary,mask=binarytest) #use mask to selectively add values svr.debug("R2?", binary) svr.debug("R2?", binary) #run adaptive threshold for edge detection and more noise filtering cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi-self.vertical_threshold: #absolute value does better grouping currently vertical_lines.append((abs(line[0]), line[1])) #print message to user for performance purposes logging.debug("{} possibilities reduced to {} lines".format( len(raw_lines), len(vertical_lines))) # Group vertical lines vertical_line_groups = [ ] # A list of line groups which are each a line list i = 0 for line in vertical_lines: group_found = False for line_group in vertical_line_groups: i += 1 if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) #quick debugging statement logging.debug("{} internal iterations for {} groups".format( i, len(vertical_line_groups))) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) #################################################### #vvvv Horizontal line code isn't used for anything # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [ ] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True if self.debug: rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] #^^^ Horizontal line code isn't used for anything ################################################### self.left_pole = None self.right_pole = None #print vertical_lines self.returning = 0 self.found = False if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.returning = (self.left_pole + self.right_pole) / 2 logging.info("Returning {} as gate center delta.".format( self.returning)) #initialize first iteration with 2 known poles if self.last_seen < 0: self.last_center = None self.last_seen = 0 #increment a counter if result is good. if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 #if not conviced, forget left/right pole. Else proclaim success. if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_pole = None #TODO: If one pole is seen, is it left or right pole? if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) libvision.misc.draw_lines(frame, horizontal_lines) if self.found: cv.Circle(frame, (int(frame.width / 2 + self.returning), int(frame.height / 2)), 15, (0, 255, 0), 2, 8, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3) cv.PutText(frame, "Gate Sent to Mission Control", (100, 400), font, (255, 255, 0)) #print frame.width #cv.ShowImage("Gate", cv.CloneImage(frame)) svr.debug("Gate", cv.CloneImage(frame)) svr.debug("Unchanged", cv.CloneImage(unchanged_frame)) #populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole self.return_output()
def process_frame(self, frame): # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_hedge = False cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have value channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology ''' kernel = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) ''' if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges #cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi-self.vertical_threshold: vertical_lines.append( (abs(line[0]), line[1]) ) # Group vertical lines vertical_line_groups = [] # A list of line groups which are each a line list for line in vertical_lines: group_found = False for line_group in vertical_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi/2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append( (abs(line[0]), line[1]) ) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 #TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / tan(radians(theta/2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1*horizontal_lines[0][0] + frame.height/2) / (frame.height/2) * 32 self.crossbar_depth = self.r * atan(radians(bar_phi)) else: self.crossbar_depth = None if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) libvision.misc.draw_lines(frame, horizontal_lines) #cv.ShowImage("Hedge", cv.CloneImage(frame)) svr.debug("Hedge", cv.CloneImage(frame)) #populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole self.output.r = self.r self.output.crossbar_depth = self.crossbar_depth self.return_output() print self
def process_frame(self, frame): self.numpy_frame = libvision.cv_to_cv2(frame) self.debug_frame = self.numpy_frame.copy() self.test_frame = self.numpy_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (rf1, rf2, rf3) = cv2.split(self.numpy_frame) Rbinary = rf3 Gbinary = rf1 # Adaptive Threshold Rbinary = cv2.adaptiveThreshold(Rbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) Gbinary = cv2.adaptiveThreshold(Gbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.Gadaptive_thresh_blocksize, self.Gadaptive_thresh) # Morphology kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) Rbinary = cv2.erode(Rbinary, kernel) Rbinary = cv2.dilate(Rbinary, kernel) Gbinary = cv2.erode(Gbinary, kernel) Gbinary = cv2.dilate(Gbinary, kernel) Rframe = cv2.cvtColor(Rbinary, cv2.COLOR_GRAY2RGB) Gframe = cv2.cvtColor(Gbinary, cv2.COLOR_GRAY2RGB) # Hough Transform raw_linesG = cv2.HoughLines(Gbinary, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdG) # Get vertical lines vertical_linesG = [] for line in raw_linesG[0]: rho = line[0] theta = line[1] if theta < self.vertical_thresholdG or \ theta > math.pi - self.vertical_thresholdG: vertical_linesG.append((abs(rho), theta)) # Group vertical lines vertical_line_groupsG = [] # A list of line groups which are each a line list for line in vertical_linesG: group_found = False for line_group in vertical_line_groupsG: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsG.append([line]) # Average line groups into lines vertical_linesG = [] for line_group in vertical_line_groupsG: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesG.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_linesG[0]: rho = line[0] theta = line[1] dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi - self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list print "Horizontal lines: ", for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None Rframe = libvision.cv2_to_cv(Rframe) Gframe = libvision.cv2_to_cv(self.debug_frame) Rbinary = libvision.cv2_to_cv(Rbinary) self.debug_frame = libvision.cv2_to_cv(self.debug_frame) self.test_frame = libvision.cv2_to_cv(self.test_frame) Gbinary = libvision.cv2_to_cv(Gbinary) if len(vertical_linesG) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 self.right_pole = round(max(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1 * horizontal_lines[0][0] + Gframe.height / 2) / (Gframe.height / 2) * 32 self.crossbar_depth = self.r * math.atan(math.radians(bar_phi)) else: self.crossbar_depth = None # Line Finding on Red pvc # Hough Transform line_storage = cv.CreateMemStorage() raw_linesR = cv.HoughLines2(Rbinary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdR, param1=0, param2=0 ) # Get vertical lines vertical_linesR = [] for line in raw_linesR: if line[1] < self.vertical_thresholdR or \ line[1] > math.pi - self.vertical_thresholdR: vertical_linesR.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groupsR = [] # A list of line groups which are each a line list for line in vertical_linesR: group_found = False for line_group in vertical_line_groupsR: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsR.append([line]) # Average line groups into lines vertical_linesR = [] for line_group in vertical_line_groupsR: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesR.append(line) ''' for red_line in vertical_linesR: print "Red Line:", red_line[0],", ",red_line[1] for green_line in vertical_linesG: print "Green Line:", green_line[0],", ",green_line[1] ''' for red_line in vertical_linesR: for green_line in vertical_linesG[:]: if math.fabs(green_line[0] - red_line[0]) < self.GR_Threshold0 and \ math.fabs(green_line[1] - red_line[1]) < self.GR_Threshold1: vertical_linesG.remove(green_line) for red_line in vertical_linesR: print "New Red Line:", red_line[0], ", ", red_line[1] for green_line in vertical_linesG: print "New Green Line:", green_line[0], ", ", green_line[1] if len(vertical_linesR) is 0: print "No Red Found" self.left_pole = None self.right_pole = None if len(vertical_linesR) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 self.right_pole = round(max(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None for i in range(len(vertical_linesR[:])): if vertical_linesR[i][1] > math.pi / 2: vertical_linesR[i] = (vertical_linesR[i][0], -(math.pi - vertical_linesR[i][1])) print "Line changed to ", vertical_linesR[i] for line in vertical_linesR: print line if line[1] > math.pi / 2: line = (line[0], math.pi - line[1]) print "Line changed to ", line libvision.misc.draw_lines(Gframe, vertical_linesG) libvision.misc.draw_lines(Gframe, horizontal_lines) libvision.misc.draw_lines(Rframe, vertical_linesR) # there was a merge error, these 3 lines conflicted b/c your copy out of date for line in vertical_linesR: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] x = line[0] * math.cos(line[1]) y = line[0] * math.sin(line[1]) cv.Circle(Rframe, (int(x), int(y)), 5, (0, 255, 0), -1, 8, 0) if x > width or y > width or x < 0 or y < 0: print "Lost point ", x svr.debug("Original", self.test_frame) svr.debug("Red", Rframe) svr.debug("Green", Gframe)
def process_frame(self, frame): self.numpy_frame = libvision.cv_to_cv2(frame) self.debug_frame = self.numpy_frame.copy() self.test_frame = self.numpy_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (rf1, rf2, rf3) = cv2.split(self.numpy_frame) Rbinary = rf3 Gbinary = rf1 # Adaptive Threshold Rbinary = cv2.adaptiveThreshold(Rbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) Gbinary = cv2.adaptiveThreshold(Gbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.Gadaptive_thresh_blocksize, self.Gadaptive_thresh) # Morphology kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) Rbinary = cv2.erode(Rbinary, kernel) Rbinary = cv2.dilate(Rbinary, kernel) Gbinary = cv2.erode(Gbinary, kernel) Gbinary = cv2.dilate(Gbinary, kernel) Rframe = cv2.cvtColor(Rbinary, cv2.COLOR_GRAY2RGB) Gframe = cv2.cvtColor(Gbinary, cv2.COLOR_GRAY2RGB) # Hough Transform raw_linesG = cv2.HoughLines(Gbinary, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdG) # Get vertical lines vertical_linesG = [] if raw_linesG is None: raw_linesG = [] if len(raw_linesG) > 0: for line in raw_linesG[0]: rho = line[0] theta = line[1] if theta < self.vertical_thresholdG or theta > ( math.pi - self.vertical_thresholdG): vertical_linesG.append((rho, theta)) # Group vertical lines vertical_line_groupsG = [ ] # A list of line groups which are each a line list for line in vertical_linesG: #print "Green Line Grouping Possibility:", line[0], ", ", line[1] group_found = False for line_group in vertical_line_groupsG: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsG.append([line]) # Average line groups into lines vertical_linesG = [] for line_group in vertical_line_groupsG: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesG.append(line) # Get horizontal lines horizontal_lines = [] if len(raw_linesG) > 0: for line in raw_linesG[0]: rho = line[0] theta = line[1] dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or dist_from_horizontal > math.pi - self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [ ] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None Rframe = libvision.cv2_to_cv(Rframe) Gframe = libvision.cv2_to_cv(self.debug_frame) Rbinary = libvision.cv2_to_cv(Rbinary) self.debug_frame = libvision.cv2_to_cv(self.debug_frame) self.test_frame = libvision.cv2_to_cv(self.test_frame) Gbinary = libvision.cv2_to_cv(Gbinary) if len(vertical_linesG) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1 * horizontal_lines[0][0] + Gframe.height / 2) / (Gframe.height / 2) * 32 self.crossbar_depth = self.r * math.atan(math.radians(bar_phi)) else: self.crossbar_depth = None # Line Finding on Red pvc # Hough Transform line_storage = cv.CreateMemStorage() raw_linesR = cv.HoughLines2(Rbinary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdR, param1=0, param2=0) # Get vertical lines vertical_linesR = [] for line in raw_linesR: if line[1] < self.vertical_thresholdR or \ line[1] > math.pi - self.vertical_thresholdR: vertical_linesR.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groupsR = [ ] # A list of line groups which are each a line list for line in vertical_linesR: group_found = False for line_group in vertical_line_groupsR: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsR.append([line]) # Average line groups into lines vertical_linesR = [] for line_group in vertical_line_groupsR: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesR.append(line) ''' for red_line in vertical_linesR: print "Red Line:", red_line[0],", ",red_line[1] for green_line in vertical_linesG: print "Green Line:", green_line[0],", ",green_line[1] ''' for red_line in vertical_linesR: for green_line in vertical_linesG[:]: if math.fabs(green_line[0] - red_line[0]) < self.GR_Threshold0 and \ math.fabs(green_line[1] - red_line[1]) < self.GR_Threshold1: vertical_linesG.remove(green_line) for red_line in vertical_linesR: print "New Red Line:", red_line[0], ", ", red_line[1] for green_line in vertical_linesG: print "New Green VLine:", green_line[0], ", ", green_line[1] for green_line in horizontal_lines: print "New Green HLine:", green_line[0], ", ", green_line[1] if len(vertical_linesR) is 0: print "No Red Found" self.left_pole = None self.right_pole = None if len(vertical_linesR) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None for i in range(len(vertical_linesR[:])): if vertical_linesR[i][1] > math.pi / 2: vertical_linesR[i] = (vertical_linesR[i][0], -(math.pi - vertical_linesR[i][1])) print "Line changed to ", vertical_linesR[i] for line in vertical_linesR: print line if line[1] > math.pi / 2: line = (line[0], math.pi - line[1]) print "Line changed to ", line libvision.misc.draw_lines(Gframe, vertical_linesG) libvision.misc.draw_lines(Gframe, horizontal_lines) libvision.misc.draw_lines(Rframe, vertical_linesR) # there was a merge error, these 3 lines conflicted b/c your copy out of date for line in vertical_linesR: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] x = line[0] * math.cos(line[1]) y = line[0] * math.sin(line[1]) cv.Circle(Rframe, (int(x), int(y)), 5, (0, 255, 0), -1, 8, 0) if x > width or y > width or x < 0 or y < 0: print "Lost point ", x svr.debug("Original", self.test_frame) svr.debug("Red", Rframe) svr.debug("Green", Gframe) svr.debug("Green Binary", Gbinary)
def process_frame(self, frame): debug_frame = cv.CreateImage(cv.GetSize(frame),8,3) cv.Copy(frame, debug_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 2) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) # Get Edges #cv.Canny(binary, binary, 30, 40) cv.CvtColor(binary, debug_frame, cv.CV_GRAY2RGB) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) line_groups = [] # A list of line groups which are each a line list for line in raw_lines: group_found = False for line_group in line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: line_groups.append([line]) # Average line groups into lines lines = [] for line_group in line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) lines.append(line) libvision.misc.draw_lines(debug_frame, raw_lines) # cv.CvtColor(color_filtered,debug_frame, cv.CV_GRAY2RGB) svr.debug("Bins", debug_frame)
def process_frame(self, frame): ################ #setup CV ###### ################ print "processing frame" (w,h) = cv.GetSize(frame) #generate hue selection frames ones = np.ones((h,w,1), dtype='uint8') a = ones*(180 - self.target_hue) b = ones*(180 - self.target_hue + 20) a_array = cv.fromarray(a) b_array = cv.fromarray(b) #create locations for the test frame and binary frame frametest = cv.CreateImage(cv.GetSize(frame), 8, 3) binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1) #use the red channel for the binary frame (just for debugging purposes) cv.Copy(frame, frametest) cv.SetImageCOI(frametest, 3) cv.Copy(frametest, binarytest) #reset the COI for test frame to RGB. cv.SetImageCOI(frametest, 0) # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_gate = False #create a new frame for comparison purposes unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame,unchanged_frame) #apply noise filter #1 cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) #spin the color wheel (psuedo-code for later if necessary) # truncate spectrum marked as end # shift all values up based on truncating value (mask out 0 regions) # take truncated bits, and flip them (180->0, 179->1...) # dnow that truncated bits are flipped, add them back in to final image #Reset hsv COI cv.SetImageCOI(hsv, 0) #correct for wraparound on red spectrum cv.InRange(binary,a_array,b_array,binarytest) #generate mask cv.Add(binary,cv.fromarray(ones*180),binary,mask=binarytest) #use mask to selectively add values #run adaptive threshold for edge detection cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) # Get vertical lines vertical_lines = [] i = 0 for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > (math.pi-self.vertical_threshold): #absolute value does better grouping currently vertical_lines.append( (abs(line[0]),line[1]) ) i += 1 # print message to user for performance purposes logging.debug("{} possibilities reduced to {} lines".format( i, len(vertical_lines) ) ) # Group vertical lines vertical_line_groups = [] #A list of line groups which are each a line list i = 0 for line in vertical_lines: group_found = False for line_group in vertical_line_groups: i+=1 if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) #quick debugging statement logging.debug("{} internal iterations for {} groups".format(i, len(vertical_line_groups))) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) #get rho of each line angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) self.left_pole = None self.right_pole = None self.returning = 0 self.found = False if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.returning = (self.left_pole + self.right_pole)/2 logging.info("Returning {}".format(self.returning)) #If this is first iteration, count this as seeing the gate if self.last_seen < 0: self.last_center = None self.last_seen = 0 #increment a counter if result is good. if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 #if not convinced, forget left/right pole. Else, proclaim success. if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_POLE = None #extra debugging stuff if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) if self.found: cv.Circle(frame, (int(frame.width/2 + self.returning), int(frame.height/2)), 15, (0, 255,0), 2, 8, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3) cv.PutText(frame, "Gate Sent to Mission Control", (100, 400) , font, (255, 255, 0)) #print frame.width #cv.ShowImage("Gate", cv.CloneImage(frame)) svr.debug("Gate", cv.CloneImage(frame)) svr.debug("Unchanged", cv.CloneImage(unchanged_frame)) self.return_output()
def process_frame(self, frame): debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, debug_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 2) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) # Get Edges #cv.Canny(binary, binary, 30, 40) cv.CvtColor(binary, debug_frame, cv.CV_GRAY2RGB) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) line_groups = [] # A list of line groups which are each a line list for line in raw_lines: group_found = False for line_group in line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: line_groups.append([line]) # Average line groups into lines lines = [] for line_group in line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) lines.append(line) libvision.misc.draw_lines(debug_frame, raw_lines) # cv.CvtColor(color_filtered,debug_frame, cv.CV_GRAY2RGB) svr.debug("Bins", debug_frame)
def process_frame(self, frame): (w,h) = cv.GetSize(frame) #generate hue selection frames #create locations for the a pair of test frames frametest = cv.CreateImage(cv.GetSize(frame), 8, 3) binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1) #use the red channel for the binary frame (just for debugging purposes) cv.Copy(frame, frametest) cv.SetImageCOI(frametest, 3) cv.Copy(frametest, binarytest) cv.SetImageCOI(frametest, 0) #reset COI #svr.debug("R?",binarytest) # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_gate = False #create a new frame just for comparison purposes unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame,unchanged_frame) #apply a course noise filter cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) #reset COI #shift hue of image such that orange->red are at top of spectrum binary = libvision.misc.cv_to_cv2(binary) binary = libvision.misc.shift_hueCV2(binary, self.target_shift) binary = libvision.misc.cv2_to_cv(binary) #correct for wraparound on red spectrum #cv.InRange(binary,a_array,b_array,binarytest) #generate mask #cv.Add(binary,cv.fromarray(ones*180),binary,mask=binarytest) #use mask to selectively add values #svr.debug("R2?",binary) svr.debug("R2?",binary) #run adaptive threshold for edge detection and more noise filtering cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi-self.vertical_threshold: #absolute value does better grouping currently vertical_lines.append((abs(line[0]), line[1])) #print message to user for performance purposes logging.debug("{} possibilities reduced to {} lines".format( len(raw_lines), len(vertical_lines) )) # Group vertical lines vertical_line_groups = [] # A list of line groups which are each a line list i = 0 for line in vertical_lines: group_found = False for line_group in vertical_line_groups: i += 1 if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) #quick debugging statement logging.debug("{} internal iterations for {} groups".format(i, len(vertical_line_groups))) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) #################################################### #vvvv Horizontal line code isn't used for anything # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi/2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append( (abs(line[0]), line[1]) ) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True if self.debug: rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] #^^^ Horizontal line code isn't used for anything ################################################### self.left_pole = None self.right_pole = None #print vertical_lines self.returning = 0 self.found = False if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.returning = (self.left_pole + self.right_pole)/2 logging.info("Returning {} as gate center delta.".format(self.returning)) #initialize first iteration with 2 known poles if self.last_seen < 0: self.last_center = None self.last_seen = 0 #increment a counter if result is good. if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 #if not conviced, forget left/right pole. Else proclaim success. if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_pole = None #TODO: If one pole is seen, is it left or right pole? if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) libvision.misc.draw_lines(frame, horizontal_lines) if self.found: cv.Circle(frame, (int(frame.width/2 + self.returning), int(frame.height/2)), 15, (0, 255,0), 2, 8, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3) cv.PutText(frame, "Gate Sent to Mission Control", (100, 400) , font, (255, 255, 0)) #print frame.width #cv.ShowImage("Gate", cv.CloneImage(frame)) svr.debug("Gate", cv.CloneImage(frame)) svr.debug("Unchanged",cv.CloneImage(unchanged_frame)) #populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole self.return_output()
def process_frame(self, frame): frametest = cv.CreateImage(cv.GetSize(frame), 8, 3) binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Copy(frame, frametest) cv.SetImageCOI(frametest, 3) cv.Copy(frametest, binarytest) cv.SetImageCOI(frametest, 0) svr.debug("R?",binarytest) # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_gate = False unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame,unchanged_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold(binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_threshold, param1=0, param2=0 ) # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi-self.vertical_threshold: #absolute value does better grouping currently vertical_lines.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groups = [] # A list of line groups which are each a line list for line in vertical_lines: group_found = False for line_group in vertical_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi/2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append( (abs(line[0]), line[1]) ) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True if self.debug: rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None print vertical_lines self.returning = 0 self.found = False if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2 self.returning = (self.left_pole + self.right_pole)/2 print "Returning ", self.returning if self.last_seen < 0: self.last_center = None self.last_seen = 0 if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_pole = None #TODO: If one pole is seen, is it left or right pole? if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) libvision.misc.draw_lines(frame, horizontal_lines) if self.found: cv.Circle(frame, (int(frame.width/2 + self.returning), int(frame.height/2)), 15, (0, 255,0), 2, 8, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3) cv.PutText(frame, "Gate Sent to Mission Control", (100, 400) , font, (255, 255, 0)) print frame.width #cv.ShowImage("Gate", cv.CloneImage(frame)) svr.debug("Gate", cv.CloneImage(frame)) svr.debug("Unchanged",cv.CloneImage(unchanged_frame)) #populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole self.return_output() print self
def process_frame(self, frame): # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_hedge = False cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have value channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology ''' kernel = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) ''' if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges #cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi - self.vertical_threshold: vertical_lines.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groups = [ ] # A list of line groups which are each a line list for line in vertical_lines: group_found = False for line_group in vertical_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi - self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [ ] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / tan(radians(theta / 2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1 * horizontal_lines[0][0] + frame.height / 2) / (frame.height / 2) * 32 self.crossbar_depth = self.r * atan(radians(bar_phi)) else: self.crossbar_depth = None if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) libvision.misc.draw_lines(frame, horizontal_lines) #cv.ShowImage("Hedge", cv.CloneImage(frame)) svr.debug("Hedge", cv.CloneImage(frame)) # populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole self.output.r = self.r self.output.crossbar_depth = self.crossbar_depth self.return_output() print self
def process_frame(self, frame): self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) self.test_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) Rframe = cv.CreateImage(cv.GetSize(frame), 8, 3) Gframe = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, self.test_frame) cv.Copy(frame, Gframe) cv.Copy(frame, Rframe) # self.debug_frame = libvision.cv_to_cv2(frame) # self.test_frame = self.debug_frame.copy() # Rframe = self.debug_frame.copy() # Gframe = self.debug_frame.copy() Rframe = cv2.boxFilter(Rframe, (7, 7)) # Red frame cv.Smooth(Rframe, Rframe, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(Rframe), 8, 3) Rbinary = cv.CreateImage(cv.GetSize(Rframe), 8, 1) cv.CvtColor(Rframe, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 3) #1 for only green pvc #3 for red pvc cv.Copy(hsv, Rbinary) cv.SetImageCOI(hsv, 0) #Adaptive Threshold cv.AdaptiveThreshold(Rbinary, Rbinary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(Rbinary, Rbinary, kernel, 1) cv.Dilate(Rbinary, Rbinary, kernel, 1) cv.CvtColor(Rbinary, Rframe, cv.CV_GRAY2RGB) #Green frame cv.Smooth(Gframe, Gframe, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(Gframe), 8, 3) Gbinary = cv.CreateImage(cv.GetSize(Gframe), 8, 1) cv.CvtColor(Gframe, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) #1 for only green pvc #3 for red pvc cv.Copy(hsv, Gbinary) cv.SetImageCOI(hsv, 0) #Adaptive Threshold cv.AdaptiveThreshold(Gbinary, Gbinary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.Gadaptive_thresh_blocksize, self.Gadaptive_thresh ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(Gbinary, Gbinary, kernel, 1) cv.Dilate(Gbinary, Gbinary, kernel, 1) cv.CvtColor(Gbinary, Gframe, cv.CV_GRAY2RGB) # Line Finding on Green pvc # Hough Transform line_storage = cv.CreateMemStorage() raw_linesG = cv.HoughLines2(Gbinary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_thresholdG, param1=0, param2=0 ) # Get vertical lines vertical_linesG = [] for line in raw_linesG: if line[1] < self.vertical_thresholdG or \ line[1] > math.pi-self.vertical_thresholdG: vertical_linesG.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groupsG = [] # A list of line groups which are each a line list for line in vertical_linesG: group_found = False for line_group in vertical_line_groupsG: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsG.append([line]) # Average line groups into lines vertical_linesG = [] for line_group in vertical_line_groupsG: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_linesG.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_linesG: dist_from_horizontal = (math.pi/2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list print "Horizontal lines: ", for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None if len(vertical_linesG) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width/2 self.right_pole = round(max(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width/2 #TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta/2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1*horizontal_lines[0][0] + Gframe.height/2) / (Gframe.height/2) * 32 self.crossbar_depth = self.r * math.atan(math.radians(bar_phi)) else: self.crossbar_depth = None # Line Finding on Red pvc # Hough Transform line_storage = cv.CreateMemStorage() raw_linesR = cv.HoughLines2(Rbinary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi/180, threshold=self.hough_thresholdR, param1=0, param2=0 ) # Get vertical lines vertical_linesR = [] for line in raw_linesR: if line[1] < self.vertical_thresholdR or \ line[1] > math.pi-self.vertical_thresholdR: vertical_linesR.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groupsR = [] # A list of line groups which are each a line list for line in vertical_linesR: group_found = False for line_group in vertical_line_groupsR: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsR.append([line]) # Average line groups into lines vertical_linesR = [] for line_group in vertical_line_groupsR: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos)/len(rhos), circular_average(angles, math.pi)) vertical_linesR.append(line) ''' for red_line in vertical_linesR: print "Red Line:", red_line[0],", ",red_line[1] for green_line in vertical_linesG: print "Green Line:", green_line[0],", ",green_line[1] ''' for red_line in vertical_linesR: for green_line in vertical_linesG[:]: if math.fabs(green_line[0] - red_line[0]) < self.GR_Threshold0 and \ math.fabs(green_line[1] - red_line[1]) < self.GR_Threshold1: vertical_linesG.remove(green_line) for red_line in vertical_linesR: print "New Red Line:", red_line[0], ", ", red_line[1] for green_line in vertical_linesG: print "New Green Line:", green_line[0], ", ", green_line[1] if len(vertical_linesR) is 0: print "No Red Found" self.left_pole = None self.right_pole = None if len(vertical_linesR) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 self.right_pole = round(max(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 #TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None for i in range(len(vertical_linesR[:])): if vertical_linesR[i][1] > math.pi/2: vertical_linesR[i] = (vertical_linesR[i][0],-(math.pi-vertical_linesR[i][1])) print "Line changed to ", vertical_linesR[i] for line in vertical_linesR: print line if line[1] > math.pi / 2: line = (line[0], math.pi - line[1]) print "Line changed to ", line libvision.misc.draw_lines(Gframe, vertical_linesG) libvision.misc.draw_lines(Gframe, horizontal_lines) libvision.misc.draw_lines(Rframe, vertical_linesR) libvision.misc.draw_linesC(self.test_frame, vertical_linesG,(0,100,0)) libvision.misc.draw_linesC(self.test_frame, horizontal_lines,(0,100,0)) libvision.misc.draw_lines(self.test_frame, vertical_linesR) for line in vertical_linesR: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] x = line[0]*math.cos(line[1]) y = line[0]*math.sin(line[1]) cv.Circle(Rframe, (int(x), int(y)), 5, (0, 255, 0), -1, 8, 0) if x > width or y > width or x < 0 or y < 0: print "Lost point ", x svr.debug("Original", self.test_frame) svr.debug("Red", Rframe) svr.debug("Green", Gframe)
def process_frame(self, frame): found_path = False cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # use RGB color finder binary = libvision.cmodules.target_color_rgb.find_target_color_rgb(frame, 250, 125, 0, 1500, 500, .3) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0 ) lines = lines[:self.lines_to_consider] # Limit number of lines # If there are at least 2 lines and they are close to parallel... # There's a path! if len(lines) >= 2: # Find: min, max, average theta_max = lines[0][1] theta_min = lines[0][1] total_theta = 0 for rho, theta in lines: total_theta += theta if theta_max < theta: theta_max = theta if theta_min > theta: theta_min = theta theta_range = theta_max - theta_min # Near vertical angles will wrap around from pi to 0. If the range # crosses this vertical line, the range will be way too large. To # correct for this, we always take the smallest angle between the # min and max. if theta_range > math.pi / 2: theta_range = math.pi - theta_range if theta_range < self.theta_threshold: found_path = True angles = map(lambda line: line[1], lines) self.theta = circular_average(angles, math.pi) if found_path: self.seen_in_a_row += 1 else: self.seen_in_a_row = 0 # stores whether or not we are confident about the path's presence object_present = False if self.seen_in_a_row >= self.seen_in_a_row_threshold: object_present = True self.image_coordinate_center = self.find_centroid(binary) # Move the origin to the center of the image self.center = ( self.image_coordinate_center[0] - frame.width / 2, self.image_coordinate_center[1] * -1 + frame.height / 2 ) if self.debug: # Show color filtered color_filtered_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(color_filtered, color_filtered_rgb, cv.CV_GRAY2RGB) cv.SubS(color_filtered_rgb, (255, 0, 0), color_filtered_rgb) cv.Sub(frame, color_filtered_rgb, frame) # Show edges binary_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(binary, binary_rgb, cv.CV_GRAY2RGB) cv.Add(frame, binary_rgb, frame) # Add white to edge pixels cv.SubS(binary_rgb, (0, 0, 255), binary_rgb) cv.Sub(frame, binary_rgb, frame) # Remove all but Red # Show lines if self.seen_in_a_row >= self.seen_in_a_row_threshold: rounded_center = ( int(round(self.image_coordinate_center[0])), int(round(self.image_coordinate_center[1])), ) cv.Circle(frame, rounded_center, 5, (0, 255, 0)) libvision.misc.draw_lines(frame, [(frame.width / 2, self.theta)]) else: libvision.misc.draw_lines(frame, lines) #cv.ShowImage("Path", frame) svr.debug("Path", frame) # populate self.output with infos self.output.found = object_present self.output.theta = self.theta if self.center: # scale center coordinates of path based on frame size self.output.x = self.center[0] / (frame.width / 2) self.output.y = self.center[1] / (frame.height / 2) libvision.misc.draw_linesC(frame, [(frame.width / 2, self.output.theta)],[255,0,255]) print "Output Returned!!! ", self.output.theta else: self.output.x = None self.output.y = None print "No output..." if self.output.found and self.center: print self.output self.return_output()
def process_frame(self, frame): frametest = cv.CreateImage(cv.GetSize(frame), 8, 3) binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Copy(frame, frametest) cv.SetImageCOI(frametest, 3) cv.Copy(frametest, binarytest) cv.SetImageCOI(frametest, 0) svr.debug("R?", binarytest) # Resize image to 320x240 #copy = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.Copy(frame, copy) #cv.SetImageROI(frame, (0, 0, 320, 240)) #cv.Resize(copy, frame, cv.CV_INTER_NN) found_gate = False unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Copy(frame, unchanged_frame) cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7) # Set binary image to have saturation channel hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) binary = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) cv.SetImageCOI(hsv, 1) cv.Copy(hsv, binary) cv.SetImageCOI(hsv, 0) cv.AdaptiveThreshold( binary, binary, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh, ) # Morphology kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE) cv.Erode(binary, binary, kernel, 1) cv.Dilate(binary, binary, kernel, 1) if self.debug: color_filtered = cv.CloneImage(binary) # Get Edges cv.Canny(binary, binary, 30, 40) # Hough Transform line_storage = cv.CreateMemStorage() raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, param1=0, param2=0) # Get vertical lines vertical_lines = [] for line in raw_lines: if line[1] < self.vertical_threshold or \ line[1] > math.pi-self.vertical_threshold: #absolute value does better grouping currently vertical_lines.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groups = [ ] # A list of line groups which are each a line list for line in vertical_lines: group_found = False for line_group in vertical_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groups.append([line]) # Average line groups into lines vertical_lines = [] for line_group in vertical_line_groups: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_lines.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_lines: dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi-self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [ ] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True if self.debug: rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None print vertical_lines self.returning = 0 self.found = False if len(vertical_lines) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2 self.returning = (self.left_pole + self.right_pole) / 2 print "Returning ", self.returning if self.last_seen < 0: self.last_center = None self.last_seen = 0 if self.last_center is None: self.last_center = self.returning self.seen_count = 1 elif math.fabs(self.last_center - self.returning) < self.center_trans_thresh: self.seen_count += 1 self.last_seen += 2 else: self.last_seen -= 1 if self.seen_count < self.seen_count_thresh: self.left_pole = None self.right_pole = None else: print "FOUND CENTER AND RETURNED IT" self.found = True else: self.returning = 0 if self.last_seen < 0: self.last_center = None self.last_seen = 0 self.last_seen -= 1 self.left_pole = None self.right_pole = None #TODO: If one pole is seen, is it left or right pole? if self.debug: cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB) libvision.misc.draw_lines(frame, vertical_lines) libvision.misc.draw_lines(frame, horizontal_lines) if self.found: cv.Circle(frame, (int(frame.width / 2 + self.returning), int(frame.height / 2)), 15, (0, 255, 0), 2, 8, 0) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3) cv.PutText(frame, "Gate Sent to Mission Control", (100, 400), font, (255, 255, 0)) print frame.width #cv.ShowImage("Gate", cv.CloneImage(frame)) svr.debug("Gate", cv.CloneImage(frame)) svr.debug("Unchanged", cv.CloneImage(unchanged_frame)) #populate self.output with infos self.output.seen_crossbar = self.seen_crossbar self.output.left_pole = self.left_pole self.output.right_pole = self.right_pole self.return_output() print self