def subimage(self, image, center, theta, width, height): print "theta is:", theta theta = theta image = libvision.cv2_to_cv(image) output_image = cv.CreateImage((int(width), int(height)), image.depth, image.nChannels) mapping = np.array([[np.cos(theta), -np.sin(theta), center[0]], [np.sin(theta), np.cos(theta), center[1]]]) map_matrix_cv = cv.fromarray(mapping) print mapping cv.GetQuadrangleSubPix(image, output_image, map_matrix_cv) return output_image
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.adaptive_frame = self.numpy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] self.failed_bins = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w if self.min_area < area < self.max_area: approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case if ((1/self.ratio_range[1]) < aspect_ratio < (1/self.ratio_range[0]) or self.ratio_range[0] < aspect_ratio < self.ratio_range[1]): new_bin = Bin(tuple(box[0]), tuple( box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id new_bin.area = area self.recent_id += 1 self.raw_bins.append(new_bin) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.draw_bins() #populate self.output with infos self.output.found = False if len(self.confirmed) > 0: self.output.found = True self.return_output() print self self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) # svr.debug("processed", self.numpy_to_cv) # svr.debug("adaptive", self.adaptive_to_cv) # svr.debug("debug", self.debug_to_cv) self.debug_stream("debug", self.debug_frame) self.debug_stream("processed", self.numpy_frame) self.debug_stream("adaptive", self.adaptive_frame)
def process_frame(self, frame): # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.buoy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) self.buoy_frame = cv2.erode(self.numpy_frame, kernel) self.buoy_frame = cv2.dilate(self.numpy_frame, kernel2) self.buoy_adaptive = self.buoy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.raw_led = [] self.raw_buoys = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True) center, radius = cv2.minEnclosingCircle(cnt) x, y = center if len(approx) > 12: if (radius > 30): new_buoy = Buoy(int(x), int(y), int(radius), "unknown") new_buoy.id = self.recent_id self.recent_id += 1 self.raw_buoys.append(new_buoy) cv2.drawContours(self.numpy_frame, [cnt], 0, (0, 0, 255), -1) self.raw_buoys.append(new_buoy) for buoy1 in self.raw_buoys[:]: for buoy2 in self.raw_buoys[:]: if buoy1 is buoy2: continue if buoy1 in self.raw_buoys and buoy2 in self.raw_buoys and \ math.fabs(buoy1.centerx - buoy2.centerx) > self.mid_sep and \ math.fabs(buoy1.centery - buoy2.centery) > self.mid_sep: if buoy1.area < buoy2.area: self.raw_buoys.remove(buoy1) elif buoy2.area < buoy1.area: self.raw_buoys.remove(buoy2) for buoy in self.raw_buoys: self.match_buoys(buoy) self.sort_buoys() self.draw_buoys() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv)
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms: denoise and convert to hsv self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) # Separate the channels convenience later (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame3 # Thresholding self.numpy_frame = cv2.adaptiveThreshold( self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) # capture frames representing the effect of the adaptive threshold self.adaptive_frame = self.numpy_frame.copy() # Find contours of every shape present after threshold contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] # if there are enough contours for at least one bin if len(contours) > 1: cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w if self.min_area < area < self.max_area: #approximate raw contour points to a simpler polygon with less points approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case if 2 <= len(approx) < 12 and ( .4 < aspect_ratio < .6 or 1.8 < aspect_ratio < 2.2): new_bin = Bin(tuple(box[0]), tuple(box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id new_bin.area = area # print "new bin created with slope: ", new_bin.line_slope #print -theta # if theta != 0: # new_bin.theta = np.pi*(-theta)/180 # else: # new_bin.theta = 0 self.recent_id += 1 self.raw_bins.append(new_bin) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.draw_bins() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) # svr.debug("processed", self.numpy_to_cv) # svr.debug("adaptive", self.adaptive_to_cv) # svr.debug("debug", self.debug_to_cv) self.debug_stream("debug", self.debug_frame) self.debug_stream("processed", self.numpy_frame) self.debug_stream("adaptive", self.adaptive_frame) for bin in self.confirmed: print type(bin.patch) if (bin.patch.shape[1] != 0) and (bin.patch.shape[0] != 0): self.debug_stream("Patch" + str(bin.id), bin.patch) # svr.debug("Patch"+str(bin.id),libvision.cv2_to_cv(bin.patch)) print bin.id
def process_frame(self, frame): self.numpy_frame = libvision.cv_to_cv2(frame) self.debug_frame = self.numpy_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (rf1, rf2, rf3) = cv2.split(self.numpy_frame) # RF2-inverted for red # RF1 for green rBinary = rf2 # rBinary = cv2.bitwise_not(rBinary) gBinary = rf1 # Adaptive Threshold rBinary = cv2.adaptiveThreshold(rBinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) gBinary = cv2.adaptiveThreshold(gBinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.Gadaptive_thresh_blocksize, self.Gadaptive_thresh) rFrame = rBinary.copy() gFrame = gBinary.copy() # Morphology kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) rBinary = cv2.erode(rBinary, kernel) rBinary = cv2.dilate(rBinary, kernel) gBinary = cv2.erode(gBinary, kernel) gBinary = cv2.dilate(gBinary, kernel) gray = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 150, 200, apertureSize=3) lines = cv2.HoughLines(edges, 1, np.pi / 180, 275) for rho, theta in lines[0]: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) # Here i have used int() instead of rounding the decimal value, so 3.8 --> 3 y1 = int(y0 + 1000 * (a)) # But if you want to round the number, then use np.around() function, then 3.8 --> 4.0 x2 = int(x0 - 1000 * (-b)) # But we need integers, so use int() function after that, ie int(np.around(x)) y2 = int(y0 - 1000 * (a)) cv2.line(self.debug_frame, (x1, y1), (x2, y2), (0, 255, 0), 2) rFrame = libvision.cv2_to_cv(rFrame) gFrame = libvision.cv2_to_cv(gFrame) self.debug_frame = libvision.cv2_to_cv(self.debug_frame) # svr.debug("Rframe", rFrame) # svr.debug("Gframe", gFrame) svr.debug("debug", self.debug_frame)
def process_frame(self, frame): self.numpy_frame = libvision.cv_to_cv2(frame) self.debug_frame = self.numpy_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (rf1, rf2, rf3) = cv2.split(self.numpy_frame) # RF2-inverted for red # RF1 for green rBinary = rf2 # rBinary = cv2.bitwise_not(rBinary) gBinary = rf1 # Adaptive Threshold rBinary = cv2.adaptiveThreshold(rBinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) gBinary = cv2.adaptiveThreshold(gBinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.Gadaptive_thresh_blocksize, self.Gadaptive_thresh) rFrame = rBinary.copy() gFrame = gBinary.copy() # Morphology kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) rBinary = cv2.erode(rBinary, kernel) rBinary = cv2.dilate(rBinary, kernel) gBinary = cv2.erode(gBinary, kernel) gBinary = cv2.dilate(gBinary, kernel) gray = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 150, 200, apertureSize=3) lines = cv2.HoughLines(edges, 1, np.pi / 180, 275) for rho, theta in lines[0]: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int( x0 + 1000 * (-b) ) # Here i have used int() instead of rounding the decimal value, so 3.8 --> 3 y1 = int( y0 + 1000 * (a) ) # But if you want to round the number, then use np.around() function, then 3.8 --> 4.0 x2 = int( x0 - 1000 * (-b) ) # But we need integers, so use int() function after that, ie int(np.around(x)) y2 = int(y0 - 1000 * (a)) cv2.line(self.debug_frame, (x1, y1), (x2, y2), (0, 255, 0), 2) rFrame = libvision.cv2_to_cv(rFrame) gFrame = libvision.cv2_to_cv(gFrame) self.debug_frame = libvision.cv2_to_cv(self.debug_frame) # svr.debug("Rframe", rFrame) # svr.debug("Gframe", gFrame) svr.debug("debug", self.debug_frame)
def process_frame(self, frame): # frame directors #self.debug_frame -- Frame containing helpful debug information # Debug numpy in CV2 raw_frame = libvision.cv_to_cv2(frame) self.debug_frame = raw_frame # CV2 blur blur_frame = cv2.medianBlur(self.debug_frame, 5) hsv_blur_frame = # collect brightly colored areas frame1 = self.adaptive_threshold(blur_frame, 0, self.adaptive_thresh_blocksize, self.adaptive_thresh) # collect shadowes under colored areas frame2 = self.adaptive_threshold(blur_frame, 1, self.shadow_thresh_blocksize, self.shadow_thresh) # use composite as the adaptive threshold adaptive_frame = cv2.add(frame1, frame2*0) frame = adaptive_frame #self.debug_stream("help", <frame>) # morphology sequence = ([-self.erode_factor, self.erode_factor]*1 +[self.bloom_factor, -self.bloom_factor]*1) despeckled_frame = self.morphology(frame, sequence) frame = despeckled_frame self.debug_stream("despeckled", despeckled_frame) # collect edges #a = 800 # TODO: ROI_edge detection edge_frame = self.ROI_edge_detection(raw_frame, frame, True) #edge_frame = cv2.Canny(frame, 150, 250, apertureSize=3) # collect buoy candidates using hough circles self.raw_circles = [] self.raw_buoys = [] self.raw_circles = cv2.HoughCircles( edge_frame, cv2.cv.CV_HOUGH_GRADIENT, self.inv_res_ratio, self.center_sep, np.array([]), self.upper_canny_thresh, self.acc_thresh, self.min_radius, self.max_radius, ) # create a new buoy object for every circle that is detected if self.raw_circles is not None and len(self.raw_circles[0] > 0): #print self.confirmed for circle in self.raw_circles[0]: (x, y, radius) = circle new_buoy = Buoy(x, y, radius, "unknown", self.next_id) self.next_id += 1 self.raw_buoys.append(new_buoy) self.match_buoys(new_buoy) # sort buoys among confirmed/canditates self.sort_buoys() # self.debug_frame= cv2.add(<HUD_FRAME>,cv2.cvtColor(<annotated_frame>, cv2.COLOR_GRAY2BGR) ) # perform color detection if self.confirmed is not None and len(self.confirmed) > 0: # vvv start color detection for buoy in self.confirmed: # draw a cirle around the confirmed bouy cv2.circle(self.debug_frame, (int(buoy.centerx), int(buoy.centery)), int(buoy.radius) + 10, (255, 255, 255), 5) # attain hue from a pixel on the buoy color_pick_point = ( int(buoy.centerx), int(buoy.centery - buoy.radius/2) ) _c = color_pick_point # ^^offset a couple pixels upward for some reason colorHue = np.mean(self.hsv_frame[_c[1]-buoy.radius/2 : _c[1]+buoy.radius/2, _c[0]-buoy.radius/2 : _c[0]+buoy.radius/2, 0]) if BUOY_COLOR_PRINTS: print("buoy%d has a hue of %d" %(buoy.id,int(colorHue))) # note: color wraps around at 180. Range is 0->180 if (colorHue >= 0 and colorHue < 45) or colorHue >= 95: # 105->180->45 cv2.putText(self.debug_frame,str(buoy.id)+"RED", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) buoy.color = "red" elif (colorHue >= 80 and colorHue < 95): # green is hardest to detect cv2.putText(self.debug_frame,str(buoy.id)+"GRE", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) #if buoy.color != "red" and buoy.color != "yellow": #print "switched from ", buoy.color buoy.color = "green" else: #yellow is about 50->80 cv2.putText(self.debug_frame,str(buoy.id)+"YEL", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) buoy.color = "yellow" cv2.putText(self.debug_frame,"HUE="+str(int(colorHue)), (int(buoy.centerx), int(buoy.centery-20)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) # ^^^ end color detection # debug frames self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) #self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(adaptive_frame) #svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv) # generate vision output self.output.buoys = [] if self.confirmed is not None and len(self.confirmed) > 0: for buoy in self.confirmed: buoy.theta = buoy.centerx #<- a rough approximation buoy.phi = buoy.centery #<- a rough approximation buoy.id = buoy.id self.output.buoys.append(buoy) # publish output #print ("%d buoys currently confirmed." % len(self.confirmed)) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame): numpy_frame = libvision.cv_to_cv2(frame) svr.debug("Original", frame) numpy_frame = cv2.medianBlur(numpy_frame, 7) debug_frame = numpy_frame.copy() numpy_frame = cv2.cvtColor(numpy_frame, cv2.COLOR_BGR2HSV) (h, s, v) = cv2.split(numpy_frame) binary = h binary = cv2.adaptiveThreshold(binary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) # Morphology kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) binary = cv2.dilate(binary, kernel) binary = cv2.dilate(binary, kernel) # Hough Transform raw_lines = cv2.HoughLinesP(binary, rho=1, theta=math.pi / 180, threshold=self.hough_threshold, minLineLength=50, maxLineGap=10) if raw_lines is None: raw_lines = [] else: raw_lines = raw_lines[0] def slope(line): """ Determine the slope [in degrees] of a line """ (x1, y1, x2, y2) = line p1 = (x1, y1) p2 = (x2, y2) leftx = min(x1, x2) if p1[0] == leftx: left = p1 right = p2 else: right = p1 left = p2 slope = (right[1]-left[1]) / (right[0]-left[0]) return slope def angle(line): sl = slope(line) return math.degrees(math.atan2(sl, 1)) def length(line): (x1, y1, x2, y2) = line return ((x2-x1)**2 + (y2-y1)**2) ** .5 def center(line): """ Determine the center of a line """ (x1, y1, x2, y2) = line p1 = (x1, y1) p2 = (x2, y2) leftx = min(x1, x2) if p1[0] == leftx: left = p1 right = p2 else: right = p1 left = p2 centerx = int(left[0] + length(line)/2*math.cos(math.atan2(slope(line), 1))) centery = int(left[1] + length(line)/2*math.sin(math.atan2(slope(line), 1))) return (centerx, centery) def is_vertical(line): return 60 <= abs(angle(line)) <= 90 def is_horizontal(line): return 0 <= abs(angle(line)) <= 30 def get_avg_endpoints(lines): lefts = [] rights = [] for line in lines: (x1, y1, x2, y2) = line p1 = (x1, y1) p2 = (x2, y2) leftx = min(x1, x2) if p1[0] == leftx: left = p1 right = p2 else: right = p1 left = p2 lefts.append(left) rights.append(right) return (average_pts(lefts), average_pts(rights)) def get_med_endpoints(lines): lefts = [] rights = [] for line in lines: (x1, y1, x2, y2) = line p1 = (x1, y1) p2 = (x2, y2) leftx = min(x1, x2) if p1[0] == leftx: left = p1 right = p2 else: right = p1 left = p2 lefts.append(left) rights.append(right) return (bad_median(lefts, .25), bad_median(rights, .75)) def average_pts(pts): num = len(pts) if num == 0: return None avg_x = sum(x for (x, y) in pts) / num avg_y = sum(y for (x, y) in pts) / num return (int(avg_x), int(avg_y)) def median_pts(pts): num = len(pts) if num == 0: return None pts = sorted(pts, key=lambda x: x[0]) return pts[num//2] def bad_median(pts, val=.5): num = len(pts) if num == 0: return None pts = sorted(pts, key=lambda x: x[0]) return pts[int(num*val)] def get_normal_vec(line): sl = slope(line) return line h_lines = [] v_lines = [] for line in raw_lines: if is_horizontal(line): h_lines.append(line) elif is_vertical(line): v_lines.append(line) else: (x1, y1, x2, y2) = line cv2.line(debug_frame, (x1, y1), (x2, y2), (0, 255, 0), 2) if h_lines: self.seen_crossbar = False self.crossbar_depth = None for line in h_lines: (x1, y1, x2, y2) = line cv2.line(debug_frame, (x1, y1), (x2, y2), (0, 0, 255), 2) for line in v_lines: (x1, y1, x2, y2) = line cv2.line(debug_frame, (x1, y1), (x2, y2), (255, 0, 0), 2) h_centers = [center(line) for line in h_lines] v_centers = sorted([center(line) for line in v_lines], key=lambda x: x[0]) h_avg_center = median_pts(h_centers) v_avg_center = average_pts(v_centers) if h_avg_center: cv2.circle(debug_frame, h_avg_center, 5, (0, 0, 0), -1) if v_avg_center: cv2.circle(debug_frame, v_avg_center, 5, (0, 0, 0), -1) split_pt = None for i in range(len(v_centers)): if i < len(v_centers)-1 and v_centers[i+1][0] - v_centers[i][0] > 40: split_pt = i+1 break left_pole_center = None right_pole_center = None if split_pt: left_centers = v_centers[:split_pt] right_centers = v_centers[split_pt:] avg_left = average_pts(left_centers) avg_right = average_pts(right_centers) left_pole_center = avg_left right_pole_center = avg_right elif v_avg_center and h_avg_center and h_avg_center[0] - v_avg_center[0] > 60: left_pole_center = v_avg_center cv2.circle(debug_frame, v_avg_center, 5, (0, 0, 0), -1) elif v_avg_center and h_avg_center and h_avg_center[0] - v_avg_center[0] < -60: right_pole_center = v_avg_center cv2.circle(debug_frame, v_avg_center, 5, (0, 0, 0), -1) else: avg_endpoints = get_med_endpoints(h_lines) lefts = avg_endpoints[0] rights = avg_endpoints[1] if lefts: cv2.circle(debug_frame, lefts, 5, (0, 0, 0), -1) left_pole_center = (lefts[0], lefts[1] - 80) if rights: cv2.circle(debug_frame, rights, 5, (0, 0, 0), -1) right_pole_center = (rights[0], rights[1] - 80) if left_pole_center: self.left_pole = left_pole_center[0] cv2.circle(debug_frame, left_pole_center, 5, (0, 0, 0), -1) if right_pole_center: self.right_pole = right_pole_center[0] cv2.circle(debug_frame, right_pole_center, 5, (0, 0, 0), -1) # median_slope_h = np.median(list(slope(line) for line in h_lines)) # average_slope_v = None if len(v_lines) == 0 else sum(slope(line) for line in v_lines) / len(v_lines) # center_horiz = # points = [] # for x1, y1, x2, y2 in raw_lines: # points.append((x1, y1)) # points.append((x2, y2)) # if points: # rect = cv2.minAreaRect(np.array(points)) # box = cv2.cv.BoxPoints(rect) # box = np.int0(box) # # test aspect ratio & area, create bin if matches # (x, y), (w, h), theta = rect # cv2.drawContours(debug_frame, [box], 0, (0, 0, 255), 2) binary = libvision.cv2_to_cv(binary) svr.debug("Binary", binary) debug_frame = libvision.cv2_to_cv(debug_frame) svr.debug("Debug", debug_frame)
def process_frame(self, frame): # frame types: #self.debug_frame -- Frame containing helpful debug information # Debug numpy in CV2 raw_frame = libvision.cv_to_cv2(frame) self.debug_frame = raw_frame # CV2 blur blur_frame = cv2.medianBlur(self.debug_frame, 5) # collect brightly colored areas frame1 = self.adaptive_threshold(blur_frame, 4, self.adaptive_thresh_blocksize, self.adaptive_thresh) # collect shadowes under colored areas frame2 = self.adaptive_threshold(blur_frame, 1, self.shadow_thresh_blocksize, self.shadow_thresh) # use composite as the adaptive threshold adaptive_frame = cv2.add(frame1, frame2*0) frame = adaptive_frame # morphology sequence = ([-self.erode_factor, self.erode_factor]*1 +[self.bloom_factor, -self.bloom_factor]*1) despeckled_frame = self.morphology(frame, sequence) frame = despeckled_frame self.debug_stream("despeckled", despeckled_frame) # collect edges # ROI_edge detection edge_frame = self.ROI_edge_detection(raw_frame, frame, self.edge_threshold, 0, True) # collect buoy candidates using hough circles self.raw_circles = [] self.raw_buoys = [] self.raw_circles = cv2.HoughCircles( image =edge_frame, method =cv2.cv.CV_HOUGH_GRADIENT, dp =self.inv_res_ratio, minDist =self.center_sep, param1 =self.upper_canny_thresh, param2 =self.acc_thresh, minRadius=self.min_radius, maxRadius=self.max_radius, ) if self.raw_circles is not None: self.raw_circles = np.round(self.raw_circles[:,0]).astype(int) # create a new buoy object for every circle that is detected #print(self.raw_circles) if self.raw_circles is not None: #print self.confirmed for circle in self.raw_circles: (x, y, radius) = circle new_buoy = Buoy(x, y, radius, "unknown", self.next_id) self.next_id += 1 self.raw_buoys.append(new_buoy) self.match_buoys(new_buoy) cv2.circle(self.debug_frame, (x, y), int(radius), (0, 255, 0), 5) # sort buoys among confirmed/canditates self.sort_buoys() # self.debug_frame= cv2.add(<HUD_FRAME>,cv2.cvtColor(<annotated_frame>, cv2.COLOR_GRAY2BGR) ) # perform color detection self.hsv_frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2HSV)[:,:,:] if self.confirmed is not None and len(self.confirmed) > 0: # vvv start color detection for buoy in self.confirmed: self.debug_frame = self.detect_buoy(buoy,self.debug_frame,self.hsv_frame) """ # draw a cirle around the confirmed bouy cv2.circle(self.debug_frame, (int(buoy.centerx), int(buoy.centery)), int(buoy.radius) + 10, (255, 255, 255), 5) # attain hue from a pixel on the buoy color_pick_point = ( int(buoy.centerx), int(buoy.centery - buoy.radius/2) ) _c = color_pick_point # ^^offset a couple pixels upward for some reason (total_height, total_width, _) = self.hsv_frame.shape colorHue = np.mean(self.hsv_frame[in_range(_c[1]-buoy.radius/2,0,total_width) : in_range(_c[1]+buoy.radius/2, 0, total_width), in_range(_c[0]-buoy.radius/2, 0, total_height) : in_range(_c[0]+buoy.radius/2, 0, total_height), 0]) print(_c[0],_c[1], buoy.radius/2) print(buoy.centery-20, buoy.centerx) if BUOY_COLOR_PRINTS: print("buoy%d has a hue of %d" %(buoy.id,int(colorHue))) # note: color wraps around at 180. Range is 0->180 if (colorHue >= 0 and colorHue < 45) or colorHue >= 95: # 105->180->45 cv2.putText(self.debug_frame,str(buoy.id)+"RED", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) buoy.color = "red" elif (colorHue >= 80 and colorHue < 95): # green is hardest to detect cv2.putText(self.debug_frame,str(buoy.id)+"GRE", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) #if buoy.color != "red" and buoy.color != "yellow": #print "switched from ", buoy.color buoy.color = "green" else: #yellow is about 50->80 cv2.putText(self.debug_frame,str(buoy.id)+"YEL", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) buoy.color = "yellow" #print(buoy.centerx) cv2.putText(self.debug_frame,"HUE="+str(int(colorHue)), (int(buoy.centerx), int(buoy.centery-20)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) cv2.putText(self.debug_frame,"last_seen="+str(int(buoy.lastseen)), (int(buoy.centerx), int(buoy.centery-40)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) cv2.putText(self.debug_frame,"candidate="+str(int(buoy in self.candidates)), (int(buoy.centerx), int(buoy.centery-60)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) # ^^^ end color detection """ # debug frames self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) #self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(adaptive_frame) #svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv) # generate vision output FOV_x = 71.0 FOV_y = 40.0 x_resolution = frame.shape[1] y_resolution = frame.shape[0] self.output.buoys = [] if self.confirmed is not None and len(self.confirmed) > 0: for buoy in self.confirmed: buoy.theta = (buoy.centerx - x_resolution/2.0) / (x_resolution/2.0) * (FOV_x/2.0) #<- a rough approximation buoy.phi = -(buoy.centery - y_resolution/2.0) / (y_resolution/2.0) * (FOV_y/2.0) #<- a rough approximation buoy.id = buoy.id self.output.buoys.append(buoy) # publish output #print ("%d buoys currently confirmed." % len(self.confirmed)) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold( self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) # kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel2) self.adaptive_frame = self.numpy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_buoys = [] if len(contours) > 0: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True) center, radius = cv2.minEnclosingCircle(cnt) x, y = center if len(approx) > 12: if (radius > 30): new_buoy = Buoy(int(x), int(y), int(radius), "unknown") new_buoy.id = self.recent_id self.recent_id += 1 self.raw_buoys.append(new_buoy) cv2.drawContours(self.numpy_frame, [cnt], 0, (0, 0, 255), -1) self.raw_buoys.append(new_buoy) for buoy1 in self.raw_buoys[:]: for buoy2 in self.raw_buoys[:]: if buoy1 is buoy2: continue if buoy1 in self.raw_buoys and buoy2 in self.raw_buoys and \ math.fabs(buoy1.centerx - buoy2.centerx) > self.mid_sep and \ math.fabs(buoy1.centery - buoy2.centery) > self.mid_sep: if buoy1.radius < buoy2.radius: self.raw_buoys.remove(buoy1) elif buoy2.radius < buoy1.radius: self.raw_buoys.remove(buoy2) for buoy in self.raw_buoys: self.match_buoys(buoy) self.sort_buoys() self.draw_buoys() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv) # Convert to output format self.output.buoys = [] if self.raw_buoys is not None and len(self.raw_buoys) > 0: for buoy in self.raw_buoys: x = buoy.centerx y = buoy.centery buoy = Container() buoy.theta = x buoy.phi = y buoy.id = 1 self.output.buoys.append(buoy) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms: denoise and convert to hsv self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) # Separate the channels convenience later (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame3 # Thresholding self.numpy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) # capture frames representing the effect of the adaptive threshold self.adaptive_frame = self.numpy_frame.copy() # Find contours of every shape present after threshold contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] # if there are enough contours for at least one bin if len(contours) > 1: cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w if self.min_area < area < self.max_area: #approximate raw contour points to a simpler polygon with less points approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case if 2 <= len(approx) < 12 and (.4 < aspect_ratio < .6 or 1.8 < aspect_ratio < 2.2): new_bin = Bin(tuple(box[0]), tuple( box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id new_bin.area = area # print "new bin created with slope: ", new_bin.line_slope #print -theta # if theta != 0: # new_bin.theta = np.pi*(-theta)/180 # else: # new_bin.theta = 0 self.recent_id += 1 self.raw_bins.append(new_bin) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.draw_bins() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) # svr.debug("processed", self.numpy_to_cv) # svr.debug("adaptive", self.adaptive_to_cv) # svr.debug("debug", self.debug_to_cv) self.debug_stream("debug", self.debug_frame) self.debug_stream("processed", self.numpy_frame) self.debug_stream("adaptive", self.adaptive_frame) for bin in self.confirmed: print type(bin.patch) if (bin.patch.shape[1] != 0) and (bin.patch.shape[0] != 0): self.debug_stream("Patch" + str(bin.id), bin.patch) # svr.debug("Patch"+str(bin.id),libvision.cv2_to_cv(bin.patch)) print bin.id
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold( self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.adaptive_frame = self.numpy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] self.failed_bins = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w if self.min_area < area < self.max_area: approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case if ((1 / self.ratio_range[1]) < aspect_ratio < (1 / self.ratio_range[0]) or self.ratio_range[0] < aspect_ratio < self.ratio_range[1]): new_bin = Bin(tuple(box[0]), tuple(box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id new_bin.area = area self.recent_id += 1 self.raw_bins.append(new_bin) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.draw_bins() #populate self.output with infos self.output.found = False if len(self.confirmed) > 0: self.output.found = True self.return_output() print self self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) # svr.debug("processed", self.numpy_to_cv) # svr.debug("adaptive", self.adaptive_to_cv) # svr.debug("debug", self.debug_to_cv) self.debug_stream("debug", self.debug_frame) self.debug_stream("processed", self.numpy_frame) self.debug_stream("adaptive", self.adaptive_frame)
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame3 # Thresholding self.numpy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh ) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel) self.adaptive_frame = self.numpy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.raw_bins = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): #hull = cv2.convexHull(cnt) rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) # test aspect ratio & area, create bin if matches (x, y), (w, h), theta = rect if w > 0 and h > 0: area = h * w if self.min_area < area < self.max_area: aspect_ratio = float(h) / w # Depending on the orientation of the bin, "width" may be flipped with height, thus needs 2 conditions for each case if .4 < aspect_ratio < .6 or 1.8 < aspect_ratio < 2.2: new_bin = Bin(tuple(box[0]), tuple( box[1]), tuple(box[2]), tuple(box[3])) new_bin.id = self.recent_id new_bin.area = area new_bin.theta = -theta self.recent_id += 1 self.raw_bins.append(new_bin) # Removes bins that have centers too close to others (to prevent bins inside bins) for bin1 in self.raw_bins[:]: for bin2 in self.raw_bins[:]: if bin1 is bin2: continue if bin1 in self.raw_bins and bin2 in self.raw_bins and \ math.fabs(bin1.midx - bin2.midx) < self.mid_sep and \ math.fabs(bin1.midy - bin2.midy) < self.mid_sep: if bin1.area < bin2.area: self.raw_bins.remove(bin1) elif bin2.area < bin1.area: self.raw_bins.remove(bin2) for bin in self.raw_bins: self.match_bins(bin) self.sort_bins() self.draw_bins() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv)
def process_frame(self, frame): self.numpy_frame = libvision.cv_to_cv2(frame) self.debug_frame = self.numpy_frame.copy() self.test_frame = self.numpy_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (rf1, rf2, rf3) = cv2.split(self.numpy_frame) Rbinary = rf3 Gbinary = rf1 # Adaptive Threshold Rbinary = cv2.adaptiveThreshold(Rbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) Gbinary = cv2.adaptiveThreshold(Gbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.Gadaptive_thresh_blocksize, self.Gadaptive_thresh) # Morphology kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) Rbinary = cv2.erode(Rbinary, kernel) Rbinary = cv2.dilate(Rbinary, kernel) Gbinary = cv2.erode(Gbinary, kernel) Gbinary = cv2.dilate(Gbinary, kernel) Rframe = cv2.cvtColor(Rbinary, cv2.COLOR_GRAY2RGB) Gframe = cv2.cvtColor(Gbinary, cv2.COLOR_GRAY2RGB) # Hough Transform raw_linesG = cv2.HoughLines(Gbinary, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdG) # Get vertical lines vertical_linesG = [] for line in raw_linesG[0]: rho = line[0] theta = line[1] if theta < self.vertical_thresholdG or \ theta > math.pi - self.vertical_thresholdG: vertical_linesG.append((abs(rho), theta)) # Group vertical lines vertical_line_groupsG = [] # A list of line groups which are each a line list for line in vertical_linesG: group_found = False for line_group in vertical_line_groupsG: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsG.append([line]) # Average line groups into lines vertical_linesG = [] for line_group in vertical_line_groupsG: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesG.append(line) # Get horizontal lines horizontal_lines = [] for line in raw_linesG[0]: rho = line[0] theta = line[1] dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or \ dist_from_horizontal > math.pi - self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [] # A list of line groups which are each a line list print "Horizontal lines: ", for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None Rframe = libvision.cv2_to_cv(Rframe) Gframe = libvision.cv2_to_cv(self.debug_frame) Rbinary = libvision.cv2_to_cv(Rbinary) self.debug_frame = libvision.cv2_to_cv(self.debug_frame) self.test_frame = libvision.cv2_to_cv(self.test_frame) Gbinary = libvision.cv2_to_cv(Gbinary) if len(vertical_linesG) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 self.right_pole = round(max(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1 * horizontal_lines[0][0] + Gframe.height / 2) / (Gframe.height / 2) * 32 self.crossbar_depth = self.r * math.atan(math.radians(bar_phi)) else: self.crossbar_depth = None # Line Finding on Red pvc # Hough Transform line_storage = cv.CreateMemStorage() raw_linesR = cv.HoughLines2(Rbinary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdR, param1=0, param2=0 ) # Get vertical lines vertical_linesR = [] for line in raw_linesR: if line[1] < self.vertical_thresholdR or \ line[1] > math.pi - self.vertical_thresholdR: vertical_linesR.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groupsR = [] # A list of line groups which are each a line list for line in vertical_linesR: group_found = False for line_group in vertical_line_groupsR: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsR.append([line]) # Average line groups into lines vertical_linesR = [] for line_group in vertical_line_groupsR: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesR.append(line) ''' for red_line in vertical_linesR: print "Red Line:", red_line[0],", ",red_line[1] for green_line in vertical_linesG: print "Green Line:", green_line[0],", ",green_line[1] ''' for red_line in vertical_linesR: for green_line in vertical_linesG[:]: if math.fabs(green_line[0] - red_line[0]) < self.GR_Threshold0 and \ math.fabs(green_line[1] - red_line[1]) < self.GR_Threshold1: vertical_linesG.remove(green_line) for red_line in vertical_linesR: print "New Red Line:", red_line[0], ", ", red_line[1] for green_line in vertical_linesG: print "New Green Line:", green_line[0], ", ", green_line[1] if len(vertical_linesR) is 0: print "No Red Found" self.left_pole = None self.right_pole = None if len(vertical_linesR) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round(min(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 self.right_pole = round(max(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None for i in range(len(vertical_linesR[:])): if vertical_linesR[i][1] > math.pi / 2: vertical_linesR[i] = (vertical_linesR[i][0], -(math.pi - vertical_linesR[i][1])) print "Line changed to ", vertical_linesR[i] for line in vertical_linesR: print line if line[1] > math.pi / 2: line = (line[0], math.pi - line[1]) print "Line changed to ", line libvision.misc.draw_lines(Gframe, vertical_linesG) libvision.misc.draw_lines(Gframe, horizontal_lines) libvision.misc.draw_lines(Rframe, vertical_linesR) # there was a merge error, these 3 lines conflicted b/c your copy out of date for line in vertical_linesR: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] x = line[0] * math.cos(line[1]) y = line[0] * math.sin(line[1]) cv.Circle(Rframe, (int(x), int(y)), 5, (0, 255, 0), -1, 8, 0) if x > width or y > width or x < 0 or y < 0: print "Lost point ", x svr.debug("Original", self.test_frame) svr.debug("Red", Rframe) svr.debug("Green", Gframe)
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) inv_res_ratio = 2 center_sep = 100 upper_canny_thresh = 40 # 40 acc_thresh = 10 # 20, 50 with green settings min_radius = 3 max_radius = 50 # Debug numpy is CV2 debug_frame = libvision.cv_to_cv2(frame) svr.debug("original", frame) # CV2 Transforms numpy_frame = debug_frame.copy() numpy_frame = cv2.medianBlur(numpy_frame, 5) numpy_frame = cv2.cvtColor(numpy_frame, cv2.COLOR_BGR2HSV) # Kernel for erosion/dilation kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) # Split into HSV frames (h_frame, s_frame, v_frame) = cv2.split(numpy_frame) # Run inverse adaptive thresh on saturation channel s_adapt_thresh = cv2.adaptiveThreshold(s_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 47, 10) # Erode and dilate the value frame s_eroded = cv2.erode(s_adapt_thresh, kernel) s_dilated = cv2.dilate(s_eroded, kernel) # Threshold the value frame _, v_thresh = cv2.threshold(v_frame, 250, 255, cv2.THRESH_BINARY) # Erode and dilate the value frame v_eroded = cv2.erode(v_thresh, kernel) v_dilated = cv2.dilate(v_eroded, kernel) s_contours = s_dilated.copy() # Find contours on the dilated saturation channel s_cnt, hy = cv2.findContours( s_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) v_contours = v_dilated.copy() # Find contours on the dilated v_cnt, hy = cv2.findContours( v_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) if len(s_contours) > 0: cv2.drawContours(s_contours, s_cnt, -1, (255, 255, 255), 3) if len(v_contours) > 0: cv2.drawContours(v_contours, v_cnt, -1, (255, 255, 255), 3) s_circles = cv2.HoughCircles( s_contours, cv2.cv.CV_HOUGH_GRADIENT, inv_res_ratio, center_sep, np.array([]), upper_canny_thresh, acc_thresh, min_radius, max_radius, ) v_circles = cv2.HoughCircles( v_contours, cv2.cv.CV_HOUGH_GRADIENT, inv_res_ratio, center_sep, np.array([]), upper_canny_thresh, acc_thresh, min_radius, max_radius, ) for circle in s_circles[0]: (x, y, radius) = circle cv2.circle(debug_frame, (int(x), int(y)), int(radius) + 10, (0, 255, 0), 5) # for circle in v_circles[0]: # (x, y, radius) = circle # cv2.circle(debug_frame, (int(x), int(y)), int(radius) + 10, (0, 0, 255), 5) # debug_to_cv = libvision.cv2_to_cv(v_circles) # svr.debug("v_frame", debug_to_cv) # debug_to_cv = libvision.cv2_to_cv(s_circles) # svr.debug("s_frame", debug_to_cv) debug_to_cv = libvision.cv2_to_cv(debug_frame) svr.debug("debug_frame", debug_to_cv)
def process_frame(self, frame): self.numpy_frame = libvision.cv_to_cv2(frame) self.debug_frame = self.numpy_frame.copy() self.test_frame = self.numpy_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (rf1, rf2, rf3) = cv2.split(self.numpy_frame) Rbinary = rf3 Gbinary = rf1 # Adaptive Threshold Rbinary = cv2.adaptiveThreshold(Rbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) Gbinary = cv2.adaptiveThreshold(Gbinary, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.Gadaptive_thresh_blocksize, self.Gadaptive_thresh) # Morphology kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) Rbinary = cv2.erode(Rbinary, kernel) Rbinary = cv2.dilate(Rbinary, kernel) Gbinary = cv2.erode(Gbinary, kernel) Gbinary = cv2.dilate(Gbinary, kernel) Rframe = cv2.cvtColor(Rbinary, cv2.COLOR_GRAY2RGB) Gframe = cv2.cvtColor(Gbinary, cv2.COLOR_GRAY2RGB) # Hough Transform raw_linesG = cv2.HoughLines(Gbinary, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdG) # Get vertical lines vertical_linesG = [] if raw_linesG is None: raw_linesG = [] if len(raw_linesG) > 0: for line in raw_linesG[0]: rho = line[0] theta = line[1] if theta < self.vertical_thresholdG or theta > ( math.pi - self.vertical_thresholdG): vertical_linesG.append((rho, theta)) # Group vertical lines vertical_line_groupsG = [ ] # A list of line groups which are each a line list for line in vertical_linesG: #print "Green Line Grouping Possibility:", line[0], ", ", line[1] group_found = False for line_group in vertical_line_groupsG: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsG.append([line]) # Average line groups into lines vertical_linesG = [] for line_group in vertical_line_groupsG: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesG.append(line) # Get horizontal lines horizontal_lines = [] if len(raw_linesG) > 0: for line in raw_linesG[0]: rho = line[0] theta = line[1] dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi if dist_from_horizontal < self.horizontal_threshold or dist_from_horizontal > math.pi - self.horizontal_threshold: horizontal_lines.append((abs(line[0]), line[1])) # Group horizontal lines horizontal_line_groups = [ ] # A list of line groups which are each a line list for line in horizontal_lines: group_found = False for line_group in horizontal_line_groups: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: horizontal_line_groups.append([line]) if len(horizontal_line_groups) is 1: self.seen_crossbar = True rhos = map(lambda line: line[0], horizontal_line_groups[0]) angles = map(lambda line: line[1], horizontal_line_groups[0]) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) horizontal_lines = [line] else: self.seen_crossbar = False horizontal_lines = [] self.left_pole = None self.right_pole = None Rframe = libvision.cv2_to_cv(Rframe) Gframe = libvision.cv2_to_cv(self.debug_frame) Rbinary = libvision.cv2_to_cv(Rbinary) self.debug_frame = libvision.cv2_to_cv(self.debug_frame) self.test_frame = libvision.cv2_to_cv(self.test_frame) Gbinary = libvision.cv2_to_cv(Gbinary) if len(vertical_linesG) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_linesG[0][0], vertical_linesG[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None if self.r and self.seen_crossbar: bar_phi = (-1 * horizontal_lines[0][0] + Gframe.height / 2) / (Gframe.height / 2) * 32 self.crossbar_depth = self.r * math.atan(math.radians(bar_phi)) else: self.crossbar_depth = None # Line Finding on Red pvc # Hough Transform line_storage = cv.CreateMemStorage() raw_linesR = cv.HoughLines2(Rbinary, line_storage, cv.CV_HOUGH_STANDARD, rho=1, theta=math.pi / 180, threshold=self.hough_thresholdR, param1=0, param2=0) # Get vertical lines vertical_linesR = [] for line in raw_linesR: if line[1] < self.vertical_thresholdR or \ line[1] > math.pi - self.vertical_thresholdR: vertical_linesR.append((abs(line[0]), line[1])) # Group vertical lines vertical_line_groupsR = [ ] # A list of line groups which are each a line list for line in vertical_linesR: group_found = False for line_group in vertical_line_groupsR: if line_group_accept_test(line_group, line, self.max_range): line_group.append(line) group_found = True if not group_found: vertical_line_groupsR.append([line]) # Average line groups into lines vertical_linesR = [] for line_group in vertical_line_groupsR: rhos = map(lambda line: line[0], line_group) angles = map(lambda line: line[1], line_group) line = (sum(rhos) / len(rhos), circular_average(angles, math.pi)) vertical_linesR.append(line) ''' for red_line in vertical_linesR: print "Red Line:", red_line[0],", ",red_line[1] for green_line in vertical_linesG: print "Green Line:", green_line[0],", ",green_line[1] ''' for red_line in vertical_linesR: for green_line in vertical_linesG[:]: if math.fabs(green_line[0] - red_line[0]) < self.GR_Threshold0 and \ math.fabs(green_line[1] - red_line[1]) < self.GR_Threshold1: vertical_linesG.remove(green_line) for red_line in vertical_linesR: print "New Red Line:", red_line[0], ", ", red_line[1] for green_line in vertical_linesG: print "New Green VLine:", green_line[0], ", ", green_line[1] for green_line in horizontal_lines: print "New Green HLine:", green_line[0], ", ", green_line[1] if len(vertical_linesR) is 0: print "No Red Found" self.left_pole = None self.right_pole = None if len(vertical_linesR) is 2: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] self.left_pole = round( min(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 self.right_pole = round( max(vertical_linesR[0][0], vertical_linesR[1][0]), 2) - width / 2 # TODO: If one pole is seen, is it left or right pole? # Calculate planar distance r (assuming we are moving perpendicular to # the hedge) if self.left_pole and self.right_pole: theta = abs(self.left_pole - self.right_pole) self.r = 3 / math.tan(math.radians(theta / 2)) else: self.r = None for i in range(len(vertical_linesR[:])): if vertical_linesR[i][1] > math.pi / 2: vertical_linesR[i] = (vertical_linesR[i][0], -(math.pi - vertical_linesR[i][1])) print "Line changed to ", vertical_linesR[i] for line in vertical_linesR: print line if line[1] > math.pi / 2: line = (line[0], math.pi - line[1]) print "Line changed to ", line libvision.misc.draw_lines(Gframe, vertical_linesG) libvision.misc.draw_lines(Gframe, horizontal_lines) libvision.misc.draw_lines(Rframe, vertical_linesR) # there was a merge error, these 3 lines conflicted b/c your copy out of date for line in vertical_linesR: roi = cv.GetImageROI(frame) width = roi[2] height = roi[3] x = line[0] * math.cos(line[1]) y = line[0] * math.sin(line[1]) cv.Circle(Rframe, (int(x), int(y)), 5, (0, 255, 0), -1, 8, 0) if x > width or y > width or x < 0 or y < 0: print "Lost point ", x svr.debug("Original", self.test_frame) svr.debug("Red", Rframe) svr.debug("Green", Gframe) svr.debug("Green Binary", Gbinary)
def process_frame(self, frame): # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.buoy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) self.buoy_frame = cv2.erode(self.numpy_frame, kernel) self.buoy_frame = cv2.dilate(self.numpy_frame, kernel2) self.buoy_adaptive = self.buoy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.raw_led = [] self.raw_buoys = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours( self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) center, radius = cv2.minEnclosingCircle(cnt) x, y = center if len(approx) > 12: if (radius > 30): new_buoy = Buoy(int(x), int(y), int(radius), "unknown") new_buoy.id = self.recent_id self.recent_id += 1 self.raw_buoys.append(new_buoy) cv2.drawContours( self.numpy_frame, [cnt], 0, (0, 0, 255), -1) self.raw_buoys.append(new_buoy) for buoy1 in self.raw_buoys[:]: for buoy2 in self.raw_buoys[:]: if buoy1 is buoy2: continue if buoy1 in self.raw_buoys and buoy2 in self.raw_buoys and \ math.fabs(buoy1.centerx - buoy2.centerx) > self.mid_sep and \ math.fabs(buoy1.centery - buoy2.centery) > self.mid_sep: if buoy1.area < buoy2.area: self.raw_buoys.remove(buoy1) elif buoy2.area < buoy1.area: self.raw_buoys.remove(buoy2) for buoy in self.raw_buoys: self.match_buoys(buoy) self.sort_buoys() self.draw_buoys() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv)
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh ) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) # #kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel2) self.adaptive_frame = self.numpy_frame.copy() # # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.raw_led = [] self.raw_buoys = [] if len(contours) > 1: cnt = contours[0] cv2.drawContours(self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True) center, radius = cv2.minEnclosingCircle(cnt) x, y = center if len(approx) > 10: if (radius > 17): new_buoy = Buoy(int(x), int(y), int(radius)) new_buoy.id = self.recent_id self.recent_id += 1 self.raw_buoys.append(new_buoy) cv2.drawContours(self.numpy_frame, [cnt], 0, (0, 0, 255), -1) for buoy in self.raw_buoys: self.match_buoys(buoy) self.sort_buoys() self.draw_buoys() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv)
def process_frame(self, frame): # This is equivalent to the old routine, but it isn't actually necessary #height, width, depth = libvision.cv_to_cv2(frame).shape #self.debug_frame = np.zeros((height, width, 3), np.uint8) # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) # kernel = np.ones((2,2), np.uint8) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel2) self.adaptive_frame = self.numpy_frame.copy() # Find contours contours, hierarchy = cv2.findContours(self.numpy_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) self.raw_buoys = [] if len(contours) > 0: cnt = contours[0] cv2.drawContours( self.numpy_frame, contours, -1, (255, 255, 255), 3) for h, cnt in enumerate(contours): approx = cv2.approxPolyDP( cnt, 0.01 * cv2.arcLength(cnt, True), True) center, radius = cv2.minEnclosingCircle(cnt) x, y = center if len(approx) > 12: if (radius > 30): new_buoy = Buoy(int(x), int(y), int(radius), "unknown") new_buoy.id = self.recent_id self.recent_id += 1 self.raw_buoys.append(new_buoy) cv2.drawContours( self.numpy_frame, [cnt], 0, (0, 0, 255), -1) self.raw_buoys.append(new_buoy) for buoy1 in self.raw_buoys[:]: for buoy2 in self.raw_buoys[:]: if buoy1 is buoy2: continue if buoy1 in self.raw_buoys and buoy2 in self.raw_buoys and \ math.fabs(buoy1.centerx - buoy2.centerx) > self.mid_sep and \ math.fabs(buoy1.centery - buoy2.centery) > self.mid_sep: if buoy1.radius < buoy2.radius: self.raw_buoys.remove(buoy1) elif buoy2.radius < buoy1.radius: self.raw_buoys.remove(buoy2) for buoy in self.raw_buoys: self.match_buoys(buoy) self.sort_buoys() self.draw_buoys() self.return_output() self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv) # Convert to output format self.output.buoys = [] if self.raw_buoys is not None and len(self.raw_buoys) > 0: for buoy in self.raw_buoys: x = buoy.centerx y = buoy.centery buoy = Container() buoy.theta = x buoy.phi = y buoy.id = 1 self.output.buoys.append(buoy) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame): # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) self.hsv_frame = self.numpy_frame (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold( self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel2) self.adaptive_frame = self.numpy_frame.copy() self.numpy_frame = cv2.Canny(self.adaptive_frame, 100, 250, apertureSize=3) self.raw_circles = [] self.raw_buoys = [] self.raw_circles = cv2.HoughCircles( self.numpy_frame, cv2.cv.CV_HOUGH_GRADIENT, self.inv_res_ratio, self.center_sep, np.array([]), self.upper_canny_thresh, self.acc_thresh, self.min_radius, self.max_radius, ) if self.raw_circles is not None and len(self.raw_circles[0] > 0): for circle in self.raw_circles[0]: (x, y, radius) = circle new_buoy = Buoy(x, y, radius, "unknown", self.next_id) self.next_id += 1 self.raw_buoys.append(new_buoy) self.match_buoys(new_buoy) self.sort_buoys() if self.confirmed is not None and len(self.confirmed) > 0: for buoy in self.confirmed: cv2.circle(self.debug_frame, (int(buoy.centerx), int(buoy.centery)), int(buoy.radius) + 10, (255, 255, 255), 5) colorHue = self.hsv_frame[buoy.centery + buoy.radius / 2, buoy.centerx][0] if (colorHue >= 0 and colorHue < 45) or colorHue >= 300: cv2.putText(self.debug_frame, str(buoy.id) + "RED", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255)) buoy.color = "red" elif (colorHue >= 70 and colorHue < 180): cv2.putText(self.debug_frame, str(buoy.id) + "GRE", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255)) if buoy.color != "red" and buoy.color != "yellow": print "switched from ", buoy.color buoy.color = "green" else: cv2.putText(self.debug_frame, str(buoy.id) + "YEL", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255)) buoy.color = "yellow" self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv) # Convert to output format self.output.buoys = [] if self.confirmed is not None and len(self.confirmed) > 0: for buoy in self.confirmed: buoy.theta = buoy.centerx buoy.phi = buoy.centery buoy.id = buoy.id self.output.buoys.append(buoy) if self.output.buoys: self.return_output() return self.output
def process_frame(self, frame): # Debug numpy is CV2 self.debug_frame = libvision.cv_to_cv2(frame) # CV2 Transforms self.numpy_frame = self.debug_frame.copy() self.numpy_frame = cv2.medianBlur(self.numpy_frame, 5) self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV) self.hsv_frame = self.numpy_frame (self.frame1, self.frame2, self.frame3) = cv2.split(self.numpy_frame) # Change the frame number to determine what channel to focus on self.numpy_frame = self.frame2 # Thresholding self.numpy_frame = cv2.adaptiveThreshold(self.numpy_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.adaptive_thresh_blocksize, self.adaptive_thresh) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4)) self.numpy_frame = cv2.erode(self.numpy_frame, kernel) self.numpy_frame = cv2.dilate(self.numpy_frame, kernel2) self.adaptive_frame = self.numpy_frame.copy() self.numpy_frame = cv2.Canny(self.adaptive_frame, 100, 250, apertureSize=3) self.raw_circles = [] self.raw_buoys = [] self.raw_circles = cv2.HoughCircles( self.numpy_frame, cv2.cv.CV_HOUGH_GRADIENT, self.inv_res_ratio, self.center_sep, np.array([]), self.upper_canny_thresh, self.acc_thresh, self.min_radius, self.max_radius, ) if self.raw_circles is not None and len(self.raw_circles[0] > 0): for circle in self.raw_circles[0]: (x, y, radius) = circle new_buoy = Buoy(x, y, radius, "unknown", self.next_id) self.next_id += 1 self.raw_buoys.append(new_buoy) self.match_buoys(new_buoy) self.sort_buoys() if self.confirmed is not None and len(self.confirmed) > 0: for buoy in self.confirmed: cv2.circle(self.debug_frame, (int(buoy.centerx), int(buoy.centery)), int(buoy.radius) + 10, (255, 255, 255), 5) colorHue = self.hsv_frame[buoy.centery + buoy.radius/2,buoy.centerx][0] if (colorHue >= 0 and colorHue < 45) or colorHue >= 300: cv2.putText(self.debug_frame,str(buoy.id)+"RED", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255)) buoy.color = "red" elif (colorHue >= 70 and colorHue < 180): cv2.putText(self.debug_frame,str(buoy.id)+"GRE", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255)) if buoy.color != "red" and buoy.color != "yellow": print "switched from ", buoy.color buoy.color = "green" else: cv2.putText(self.debug_frame,str(buoy.id)+"YEL", (int(buoy.centerx), int(buoy.centery)), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255)) buoy.color = "yellow" self.debug_to_cv = libvision.cv2_to_cv(self.debug_frame) self.numpy_to_cv = libvision.cv2_to_cv(self.numpy_frame) self.adaptive_to_cv = libvision.cv2_to_cv(self.adaptive_frame) svr.debug("processed", self.numpy_to_cv) svr.debug("adaptive", self.adaptive_to_cv) svr.debug("debug", self.debug_to_cv) # Convert to output format self.output.buoys = [] if self.confirmed is not None and len(self.confirmed) > 0: for buoy in self.confirmed: buoy.theta = buoy.centerx buoy.phi = buoy.centery buoy.id = buoy.id self.output.buoys.append(buoy) if self.output.buoys: self.return_output() return self.output