def get_mask_with_contour(img, ret_img=False, ret_cont=False, with_init_mask=False, cont_color=cv.RGB(255, 50, 50), normalize=True, skin_version=1, strong=False): if normalize: img = normalize_rgb(img, aggressive=0.005) mask = skin_mask(img) if skin_version == 1 else skin_mask2(img) di_mask = image_empty_clone(mask) cv.Dilate(mask, di_mask) seqs = cv.FindContours(cv.CloneImage(di_mask), memory(), cv.CV_RETR_EXTERNAL) c_img = image_empty_clone(mask) cv.DrawContours(c_img, seqs, 255, 255, 10, -1) er_img = image_empty_clone(c_img) cv.Erode(c_img, er_img, iterations=2) seqs = cv.FindContours(cv.CloneImage(er_img), memory(), cv.CV_RETR_EXTERNAL) if not seqs: print "no areas" return img, None seqs = cv.ApproxPoly(seqs, memory(), cv.CV_POLY_APPROX_DP, parameter=3, parameter2=1) result = [] if ret_img: # er_seq_img = cv.CreateImage(sizeOf(er_img), 8, 3) # cv.Zero(er_seq_img) er_seq_img = cv.CloneImage(img) if with_init_mask: cv.Merge(mask, mask, mask, None, er_seq_img) if strong: cv.DrawContours(er_seq_img, seqs, cont_color, 0, 10, thickness=3) cv.DrawContours(er_seq_img, seqs, cv.RGB(0, 0, 0), 0, 10, thickness=1) else: cv.DrawContours(er_seq_img, seqs, cont_color, 0, 10, thickness=1) result.append(er_seq_img) if ret_cont: result.append(seqs) return result
def lineScan(self, image, source=None): """ Performs contour detection on a single channel image. Returns a set of 2d points containing the outer left line for the detected contours. """ storage = cv.CreateMemStorage() contours = cv.FindContours(image, storage, mode=cv.CV_RETR_EXTERNAL, method=cv.CV_CHAIN_APPROX_NONE, offset=(0, 0)) points = [] if contours: cv.DrawContours(source, contours, (0, 0, 0), (0, 0, 0), 7, -1) while contours: y_max = max([y for _, y in contours]) seq = [(x, y) for x, y in contours] i = 0 # extract only the left side of the polygon for i in range(0, len(seq) - 1): if seq[i][1] == y_max: break seq = seq[:i] if source: # draws the detected polygon line as feedback cv.PolyLine(source, [seq], False, (0, 255, 0), 1, 8, 0) points.extend(seq) contours = contours.h_next() return points
def motion_detector(): global max_area, avg, prev_pos, largest_contour contour = cv.FindContours( temp_image, store, mode=cv.CV_RETR_EXTERNAL, method=cv.CV_CHAIN_APPROX_NONE) #Findling contours cv.Copy(img, render_image) #Copying for painting on the image if len(contour) != 0: temp_contour = contour area = 0 max_area_test = max_area while temp_contour != None: #Routine to find the largest contour area = cv.ContourArea(temp_contour) if area > max_area_test: largest_contour = temp_contour max_area_test = area temp_contour = temp_contour.h_next() rect = cv.BoundingRect(largest_contour) cv.DrawContours(render_image, largest_contour, (0, 255, 0), (0, 0, 255), 1, 3) cv.Rectangle(render_image, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (255, 0, 0)) avg = rect[0] + rect[2] / 2 else: avg = img.width / 2
def findRectPoints(self, oldRectPoints): hueRange = self.hueRange satRange = self.satRange valRange = self.valRange clone = cv.CloneImage(self.frame) hsv = cv.CloneImage(self.channels3) threshold = cv.CloneImage(self.channels1) threshold2 = cv.CloneImage(self.channels1) cv.Smooth(clone, clone, cv.CV_GAUSSIAN, 7, 7) cv.CvtColor(clone, hsv, cv.CV_BGR2HSV) cv.InRangeS(hsv, (hueRange[0], satRange[0], valRange[0]), (hueRange[1], satRange[1], satRange[1]), threshold) cv.InRangeS(hsv, (hueRange[2], satRange[0], satRange[0]), (hueRange[3], satRange[1], valRange[1]), threshold2) cv.Add(threshold, threshold2, threshold) cv.Erode(threshold, threshold, iterations=5) cv.Dilate(threshold, threshold, iterations=5) # cv.ShowImage(self.color, threshold) memory = cv.CreateMemStorage(0) clone2 = cv.CloneImage(threshold) contours = cv.FindContours(clone2, memory, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0, 0)) if not contours: rectPoints = oldRectPoints else: rectPoints = cv.BoundingRect(list(contours)) return rectPoints
def find_biggest_region(self): """ this code should find the biggest region and then determine some of its characteristics, which will help direct the drone """ # copy the thresholded image cv.Copy( self.threshed_image, self.copy ) # copy self.threshed_image # this is OpenCV's call to find all of the contours: contours = cv.FindContours(self.copy, self.storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) # Next we want to find the *largest* contour if len(contours)>0: biggest = contours biggestArea=cv.ContourArea(contours) while contours != None: nextArea=cv.ContourArea(contours) if biggestArea < nextArea: biggest = contours biggestArea = nextArea contours=contours.h_next() #Use OpenCV to get a bounding rectangle for the largest contour self.br = cv.BoundingRect(biggest,update=0) #Publish the data. self.publishBoxData()
def detect_card(grey_image, grey_base, thresh=100): diff = cv.CloneImage(grey_image) cv.AbsDiff(grey_image, grey_base, diff) edges = cv.CloneImage(grey_image) cv.Canny(diff, edges, thresh, thresh) contours = cv.FindContours(edges, cv.CreateMemStorage(0)) edge_pts = [] c = contours while c is not None: if len(c) > 10: edge_pts += list(c) if len(c) == 0: #'cus opencv is buggy and dumb break c = c.h_next() if len(edge_pts) == 0: return None hull = cv.ConvexHull2(edge_pts, cv.CreateMemStorage(0), cv.CV_CLOCKWISE, 1) lines = longest_lines(hull) perim = sum(l['len'] for l in lines) #print perim #likely to be a card. . . #if abs(perim - 1200) < 160: if perim > 700: #extrapolate the rectangle from the hull. #if our 4 longest lines make up 80% of our perimiter l = sum(l['len'] for l in lines[0:4]) #print "l = ",l if l / perim > 0.7: #we probably have a high-quality rectangle. extrapolate! sides = sorted(lines[0:4], key=lambda x: x['angle']) #sides are in _some_ clockwise order. corners = [None] * 4 # TODO: figure out why we can get an IndexError on xrange(4) try: for n in xrange(4): corners[n] = line_intersect(sides[n], sides[(n + 1) % 4]) if not all(corners): return None except IndexError: print >> sys.stderr, "detect_card() IndexError(), we should track down why this happens exactly" return None #rotate corners so top-left corner is first. #that way we're clockwise from top-left sorted_x = sorted(c[0] for c in corners) sorted_y = sorted(c[1] for c in corners) top_left = None for index, (x, y) in enumerate(corners): if sorted_x.index(x) < 2 and sorted_y.index(y) < 2: top_left = index if top_left is None: return None #return rotated list return corners[top_left:] + corners[:top_left] return None
def main(): global val1, val2 img = cv.LoadImage(sys.argv[1]) if img: cv.NamedWindow("bar") img2 = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 1) img21 = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 1) img3 = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_16S, 1) img4 = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 1) cv.CvtColor(img, img2, cv.CV_BGR2GRAY) cv.EqualizeHist(img2, img21) stor = cv.CreateMemStorage() cv.AdaptiveThreshold(img21, img4, 255, cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C, cv.CV_THRESH_BINARY_INV, 7, 7) cont = cv.FindContours(img4, stor, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE) img5 = cv.CloneImage(img) while cont: if validate_contour(cont): cv.DrawContours(img5, cont, (255, 255, 255), (255, 255, 255), 0, 2, 8, (0, 0)) cont = cont.h_next() cv.ShowImage("bar", img5) cv.WaitKey(0)
def findPoints(frame, oldRectPoints): imageSize = cv.GetSize(frame) original = cv.CloneImage(frame) hsv = cv.CreateImage(imageSize, 8, 3) threshold = cv.CreateImage(imageSize, 8, 1) # Do things to the image to isolate the red parts cv.CvtColor(original, hsv, cv.CV_RGB2HSV) cv.InRangeS(hsv, (110, 80, 80), (140, 255, 255), threshold) cv.Erode(threshold, threshold, iterations=5) cv.Dilate(threshold, threshold, iterations=5) cv.ShowImage("shit", threshold) memory = cv.CreateMemStorage(0) clone = cv.CloneImage(threshold) contours = cv.FindContours(clone, memory, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0, 0)) # area = cv.ContourArea(contours) if not contours: # If there's no red on the screen rectPoints = oldRectPoints else: rectPoints = cv.BoundingRect(contours) # print rectPoints return rectPoints
def find_new_markers(self, do_canny=True): ''' Find markers totally without taking their previous position into consideration @param do_canny: perform edge recognition ''' for scale in range(self.max_scale, -1, -1): if len(self.not_found) == 0: return self.set_scale(scale) if do_canny: cv.Canny(self.gray_img, self.bw_img, 100, 300) cv.Copy(self.bw_img, self.tmp_img) self.tmp_img[1, 1] = 255 cont = cv.FindContours( self.tmp_img, cv.CreateMemStorage(), cv.CV_RETR_TREE) db.DrawContours( self.canny_img, cont, (255, 255, 255), (128, 128, 128), 10) # db.show([self.canny_img,self.tmp_img,self.bw_img], 'show', 0, 1) # cv.ShowImage("name", self.canny_img) # cv.ShowImage("name1", self.tmp_img) self.set_scale(0) self.scale_factor = 1 << scale self.test_contours(cont) #markers= filter(lambda sq:sq.check_square(self.img,self.gray_img),rects) return self.markers
def find_corner_in_full_scale(self,point): point= self.m_d.scale_up(point) scale = self.m_d.scale self.m_d.set_scale(0) gray_img = self.m_d.gray_img canny_img = self.m_d.canny_img x,y = point cr = correct_rectangle((x-5,y-5,10,10), cv.GetSize(gray_img)) for img in [gray_img,canny_img]: cv.SetImageROI(img, cr) cv.Canny(gray_img, canny_img, 300, 500) conts = cv.FindContours(canny_img, cv.CreateMemStorage(), cv.CV_RETR_LIST,(cr[0],cr[1])) db.DrawContours(self.m_d.tmp_img, conts, (255,255,255), (128,128,128), 10) min =10 min_point = None while conts: for c in conts: vec = vector(point,c) len =length(vec) if len<min: min = len min_point = c conts.h_next() self.m_d.set_scale(scale) return min_point
def do1Image(self, image, prevpoints): #http://www.aishack.in/2010/07/tracking-colored-objects-in-opencv/ #http://nashruddin.com/OpenCV_Region_of_Interest_(ROI) #http://opencv-users.1802565.n2.nabble.com/Python-cv-Moments-Need-Help-td6044177.html #http://stackoverflow.com/questions/5132874/change-elements-in-a-cvseq-in-python img = self.getThreshold(image) points = [] for i in range(4): cv.SetImageROI(img, (int( self.RectanglePoints[i][0]), int(self.RectanglePoints[i][1]), int(self.RectanglePoints[i][2]), int(self.RectanglePoints[i][3]))) storage = cv.CreateMemStorage(0) contours = cv.FindContours(img, storage) moments = cv.Moments(contours) moment10 = cv.GetSpatialMoment(moments, 1, 0) moment01 = cv.GetSpatialMoment(moments, 0, 1) area = cv.GetCentralMoment(moments, 0, 0) cv.ResetImageROI(img) if (area != 0): x = self.RectanglePoints[i][0] + (moment10 / area) y = self.RectanglePoints[i][1] + (moment01 / area) else: if (prevpoints[i][0] == 0): x = self.RectanglePoints[i][0] y = self.RectanglePoints[i][1] else: x = prevpoints[i][0] y = prevpoints[i][1] points.append([x, y]) return points
def findRectPoints(self, oldRectPoints): hueRange = self.hueRange clone = cv.CloneImage(self.frame) hsv = cv.CloneImage(self.channels3) threshold = cv.CloneImage(self.channels1) threshold2 = cv.CloneImage(self.channels1) cv.CvtColor(clone, hsv, cv.CV_RGB2HSV) cv.InRangeS(hsv, (165, 100, 100), (180, 255, 255), threshold) cv.InRangeS(hsv, (0, 100, 100), (15, 255, 255), threshold2) cv.Add(threshold, threshold2, threshold) self.hue += 1 print self.hue cv.Erode(threshold, threshold, iterations=5) cv.Dilate(threshold, threshold, iterations=5) cv.ShowImage(self.color, threshold) memory = cv.CreateMemStorage(0) clone2 = cv.CloneImage(threshold) contours = cv.FindContours(clone2, memory, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0, 0)) if not contours: rectPoints = oldRectPoints else: rectPoints = cv.BoundingRect(contours) return rectPoints
def __init__( self, BW ): #Constructor. BW es una imagen binaria en forma de una matriz numpy self.BW = BW cs = cv.FindContours(cv.fromarray(self.BW.astype(np.uint8)), cv.CreateMemStorage(), mode=cv.CV_RETR_EXTERNAL) #Finds the contours counter = 0 """ Estas son listas dinamicas usadas para almacenar variables """ centroid = list() cHull = list() contours = list() cHullArea = list() contourArea = list() while cs: #Iterar a traves de CvSeq, cs. if abs( cv.ContourArea(cs) ) > 2000: #Filtra contornos de menos de 2000 pixeles en el area contourArea.append( cv.ContourArea(cs) ) #Se agrega contourArea con el area de contorno mas reciente m = cv.Moments( cs) #Encuentra todos los momentos del contorno filtrado try: m10 = int(cv.GetSpatialMoment(m, 1, 0)) #Momento espacial m10 m00 = int(cv.GetSpatialMoment(m, 0, 0)) #Momento espacial m00 m01 = int(cv.GetSpatialMoment(m, 0, 1)) #Momento espacial m01 centroid.append( (int(m10 / m00), int(m01 / m00)) ) #Aniade la lista de centroides con las coordenadas mas nuevas del centro de gravedad del contorno convexHull = cv.ConvexHull2( cs, cv.CreateMemStorage(), return_points=True ) #Encuentra el casco convexo de cs en el tipo CvSeq cHullArea.append( cv.ContourArea(convexHull) ) #Agrega el area del casco convexo a la lista cHullArea cHull.append( list(convexHull) ) #Agrega la lista del casco convexo a la lista de cHull contours.append( list(cs) ) #Agrega la forma de lista del contorno a la lista de contornos counter += 1 #Agrega al contador para ver cuantos blobs hay except: pass cs = cs.h_next() #Pasa al siguiente contorno en cs CvSeq """ A continuacion, las variables se convierten en campos para hacer referencias posteriores """ self.centroid = centroid self.counter = counter self.cHull = cHull self.contours = contours self.cHullArea = cHullArea self.contourArea = contourArea
def process(self): def seq_to_iter(seq): while seq: yield seq seq = seq.h_next() def score_rect(r,p): x,y,w,h = r return w * h cv.Resize(self.frame, self.resized_frame) cv.CvtColor(self.resized_frame, self.hsv_frame, cv.CV_RGB2HSV) cv.Smooth(self.hsv_frame, self.smooth_frame, cv.CV_GAUSSIAN, 31) for p in self.pompon: if p.calibration_done: self.in_range(p, self.smooth_frame, self.bin_frame) cv.Erode(self.bin_frame, self.mask, None, self.dilatation); if self.show_binary: self.mask2 = cv.CloneImage(self.mask) # for miniature contour = seq_to_iter(cv.FindContours(self.mask, cv.CreateMemStorage(), cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE)); rects = map((lambda c: cv.BoundingRect(c, 0)), contour) if rects: x,y,w,h = max(rects, key=lambda r: score_rect(r,p)) p.pos = self.proc2sym(x+w/2,y+h/2)
def __refresh_canny(self): cv.Canny(self.res_smooth, self.canny, self.__canny_lo, self.__canny_hi, self.__canny_apeture * 2 + 3) #cv.Threshold(self.res_smooth, self.canny, self.__canny_lo * 2, 255, cv.CV_THRESH_BINARY) cv.ShowImage('Canny', self.canny) cv.Copy(self.canny, self.contour_in) self.contours = cv.FindContours(self.contour_in, self.c_storage, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE) self.__refresh_poly()
def find_leds(thresh_img): """ Given a binary image showing the brightest pixels in an image, returns a result image, displaying found leds in a rectangle """ contours = cv.FindContours(thresh_img, cv.CreateMemStorage(), mode=cv.CV_RETR_EXTERNAL, method=cv.CV_CHAIN_APPROX_NONE, offset=(0, 0)) regions = [] while contours: pts = [pt for pt in contours] x, y = zip(*pts) min_x, min_y = min(x), min(y) width, height = max(x) - min_x + 1, max(y) - min_y + 1 regions.append((min_x, min_y, width, height)) contours = contours.h_next() out_img = cv.CreateImage(cv.GetSize(grey_img), 8, 3) for x, y, width, height in regions: pt1 = x, y pt2 = x + width, y + height color = (0, 0, 255, 0) cv.Rectangle(out_img, pt1, pt2, color, 2) return out_img, regions
def _get_pos_countour(self, th_img): storage = cv.CreateMemStorage(0) contour = None ci = cv.CreateImage(cv.GetSize(th_img), 8, 1) ci = cv.CloneImage(th_img) contour = cv.FindContours(th_img, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) points = [] while contour: bound_rect = cv.BoundingRect(list(contour)) contour = contour.h_next() pt1 = (bound_rect[0], bound_rect[1]) pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]) points.append(pt1) points.append(pt2) #cv.Rectangle(img, pt1, pt2, cv.CV_RGB(255,0,0), 1) center_point = None if len(points): center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points) #cv.Circle(depth, center_point, 10, cv.CV_RGB(255, 255, 255), 1) return center_point
def _computeContours(self): cvMask = self._fgMask.asOpenCVBW() cvdst = cv.CloneImage( cvMask) #because cv.FindContours may alter source image contours = cv.FindContours(cvdst, cv.CreateMemStorage(), cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) self._contours = contours
def get_contours(frame, approx=True): """Get contours from an image :: iplimage -> CvSeq """ # A workaround for OpenCV 2.0 crash on receiving a (nearly) black image nonzero = cv.CountNonZero(frame) logging.debug("Segmentation got an image with %d nonzero pixels", nonzero) if nonzero < 20 or nonzero > 10000: return [] storage = cv.CreateMemStorage(0) # find the contours contours = cv.FindContours(frame, storage, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE) if contours is None: return [] res = [] while contours: if not approx: result = contours else: result = cv.ApproxPoly(contours, storage, cv.CV_POLY_APPROX_DP, cv.ArcLength(contours) * 0.02, 1) res.append(result) contours = contours.h_next() return res
def main(): """Parse the command line and set off processing.""" # parse command line opts,args = getopt(sys.argv[1:], "i") if len(args) != 1: syntax() return 1 # grab options invert = False for n,v in opts: if n == '-i': invert = True # load image grey = prepare_image(args[0], invert) size = cv.GetSize(grey) # a bit of smoothing to reduce noise smoothed = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1) cv.Smooth(grey, smoothed, cv.CV_GAUSSIAN, 5, 5) # adaptive thresholding finds the letters against the numberplate # background thresholded = cv.CloneImage(grey) cv.AdaptiveThreshold(smoothed, thresholded, 255, cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C, cv.CV_THRESH_BINARY_INV, 19, 9) # use a hough transform to find straight edges in the image and then # remove them - removes number plate edges to ensure that characters # don't join with the edges of the plate storage = cv.CreateMemStorage() #lines = cv.HoughLines2(thresholded, storage, cv.CV_HOUGH_PROBABILISTIC, # 1, math.pi/180, 50, 50, 2) #for line in lines: # cv.Line(thresholded, line[0], line[1], 0, 3, 4) # grab the contours from the image cont = cv.FindContours(thresholded, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_NONE) # grab 'good' contours col = 128 validated = [] while cont: v = validate_contour(cont,grey) if v is not None: validated.append(v) cont = cont.h_next() # overlay bounding boxes of 'good' contours on the original image result = cv.LoadImage(args[0]) clusters = cluster_fuck(set(validated)) for cluster in clusters: cv.Rectangle(result, (int(min([c.x1 for c in cluster])), int(min([c.y1 for c in cluster]))), (int(max([c.x2 for c in cluster])), int(max([c.y2 for c in cluster]))), (0,0,255)) for bbox in cluster: cv.Rectangle(result, (int(bbox.x1),int(bbox.y1)), (int(bbox.x2), int(bbox.y2)), (255,0,0)) quick_show(result)
def __init__( self, BW ): #Constructor. BW is a binary image in the form of a numpy array self.BW = BW cs = cv.FindContours(cv.fromarray(self.BW.astype(np.uint8)), cv.CreateMemStorage(), mode=cv.CV_RETR_EXTERNAL) #Finds the contours counter = 0 """ These are dynamic lists used to store variables """ centroid = list() cHull = list() contours = list() cHullArea = list() contourArea = list() while cs: #Iterate through the CvSeq, cs. if abs( cv.ContourArea(cs) ) > 2000: #Filters out contours smaller than 2000 pixels in area contourArea.append(cv.ContourArea( cs)) #Appends contourArea with newest contour area m = cv.Moments( cs) #Finds all of the moments of the filtered contour try: m10 = int(cv.GetSpatialMoment(m, 1, 0)) #Spatial moment m10 m00 = int(cv.GetSpatialMoment(m, 0, 0)) #Spatial moment m00 m01 = int(cv.GetSpatialMoment(m, 0, 1)) #Spatial moment m01 centroid.append( (int(m10 / m00), int(m01 / m00)) ) #Appends centroid list with newest coordinates of centroid of contour convexHull = cv.ConvexHull2( cs, cv.CreateMemStorage(), return_points=True ) #Finds the convex hull of cs in type CvSeq cHullArea.append( cv.ContourArea(convexHull) ) #Adds the area of the convex hull to cHullArea list cHull.append( list(convexHull) ) #Adds the list form of the convex hull to cHull list contours.append( list(cs) ) #Adds the list form of the contour to contours list counter += 1 #Adds to the counter to see how many blobs are there except: pass cs = cs.h_next() #Goes to next contour in cs CvSeq """ Below the variables are made into fields for referencing later """ self.centroid = centroid self.counter = counter self.cHull = cHull self.contours = contours self.cHullArea = cHullArea self.contourArea = contourArea
def edge_threshold(image, roi=None, debug=0): thresholded = cv.CloneImage(image) horizontal = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_16S, 1) magnitude32f = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1) vertical = cv.CloneImage(horizontal) v_edge = cv.CloneImage(image) magnitude = cv.CloneImage(horizontal) storage = cv.CreateMemStorage(0) mag = cv.CloneImage(image) cv.Sobel(image, horizontal, 0, 1, 1) cv.Sobel(image, vertical, 1, 0, 1) cv.Pow(horizontal, horizontal, 2) cv.Pow(vertical, vertical, 2) cv.Add(vertical, horizontal, magnitude) cv.Convert(magnitude, magnitude32f) cv.Pow(magnitude32f, magnitude32f, 0.5) cv.Convert(magnitude32f, mag) if roi: cv.And(mag, roi, mag) cv.Normalize(mag, mag, 0, 255, cv.CV_MINMAX, None) cv.Threshold(mag, mag, 122, 255, cv.CV_THRESH_BINARY) draw_image = cv.CloneImage(image) and_image = cv.CloneImage(image) results = [] threshold_start = 17 for window_size in range(threshold_start, threshold_start + 1, 1): r = 20 for threshold in range(0, r): cv.AdaptiveThreshold(image, thresholded, 255, \ cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, window_size, threshold) contour_image = cv.CloneImage(thresholded) contours = cv.FindContours(contour_image, storage, cv.CV_RETR_LIST) cv.Zero(draw_image) cv.DrawContours(draw_image, contours, (255, 255, 255), (255, 255, 255), 1, 1) if roi: cv.And(draw_image, roi, draw_image) cv.And(draw_image, mag, and_image) m1 = np.asarray(cv.GetMat(draw_image)) m2 = np.asarray(cv.GetMat(mag)) total = mag.width * mag.height #cv.Sum(draw_image)[0] coverage = cv.Sum(and_image)[0] / (mag.width * mag.height) if debug: print threshold, coverage cv.ShowImage("main", draw_image) cv.ShowImage("main2", thresholded) cv.WaitKey(0) results.append((coverage, threshold, window_size)) results.sort(lambda x, y: cmp(y, x)) _, threshold, window_size = results[0] cv.AdaptiveThreshold(image, thresholded, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, \ cv.CV_THRESH_BINARY, window_size, threshold) return thresholded
def fitEllipse(cvImg): """ Use OpenCV to find the contours of the input cvImage and then proceed to find best bit ellipses around the contours. This is a rough approach to identifying the primary 'objects of interest' within an image. Render the results as ellipses onto a copy of the input image and return this cvImage as the result of the method call. @param cvImg: input OpenCV image to find best fit ellipses. @return: a copy of the input image with ellipse results rendered onto it. """ def contourIterator(contour): """ Helper method to iterate over cvContours. """ while contour: yield contour contour = contour.h_next() # Find all contours. stor = cv.CreateMemStorage() cont = cv.FindContours(cvImg, stor, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, (0, 0)) cimg = cv.CreateImage((cvImg.width,cvImg.height), cv.IPL_DEPTH_8U, 3) cv.CvtColor(cvImg, cimg, cv.CV_GRAY2BGR) # clen = 0 # for c in contourIterator(cont): # clen += len(c) # ptMat = cv.CreateMat(1, clen, cv.CV_32FC2) # ci = 0 for c in contourIterator(cont): # for (i, (x, y)) in enumerate(c): # ptMat[0, i+ci] = (x, y) # ci += len(c) # Number of points must be more than or equal to 6 for cv.FitEllipse2 if len(c) >= 6: # Copy the contour into an array of (x,y)s ptMat = cv.CreateMat(1, len(c), cv.CV_32FC2) for (i, (x, y)) in enumerate(c): ptMat[0, i] = (x, y) # Draw the current contour in gray gray = cv.CV_RGB(150, 150, 150) cv.DrawContours(cimg, c, gray, gray,0,1,8,(0,0)) # Fits ellipse to current contour. (center, size, angle) = cv.FitEllipse2(ptMat) # Convert ellipse data from float to integer representation. center = (cv.Round(center[0]), cv.Round(center[1])) size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5)) #angle = -angle # Draw ellipse in random color color = cv.CV_RGB(0,0,255) cv.Ellipse(cimg, center, size, angle, 0, 360, color, 1, cv.CV_AA, 0) return cimg
def get_elements(filename, treshold=50, minheight=15, minarea=200, elements=6): src = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE) test = cv.CreateImage(cv.GetSize(src), 32, 3) dst = cv.CreateImage(cv.GetSize(src), 8, 1) storage = cv.CreateMemStorage(0) cv.Canny(src, dst, treshold, treshold * 3, 3) storage = cv.CreateMemStorage(0) seqs = cv.FindContours(dst, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_NONE, (0, 0)) res = [] c = seqs.h_next() while True: if not c: break box = cv.BoundingRect(c) area = box[2] * box[3] #and (area > minarea) if (box[3] > minheight): res.append(box) c = c.h_next() if len(res) < elements: while len(res) < elements: m = 0 c = 0 for i, e in enumerate(res): if e[3] > m: m = e[3] c = i big = res.pop(c) res.append((big[0], big[1], int(big[2] * 1.0 / 2), big[3])) res.append((big[0] + int(big[2] * 1.0 / 2), big[1], int(big[2] * 1.0 / 2), big[3])) #for box in res: # cv.Rectangle(dst, (box[0],box[1]), (box[0]+box[2],box[1]+box[3]), cv.RGB(255,255,255)) #cv.ShowImage('Preview2',dst) #cv.WaitKey() imgs = [] print len(res) for box in res: cv.SetImageROI(src, box) tmp = cv.CreateImage((box[2], box[3]), 8, 1) cv.Copy(src, tmp) hq.heappush(imgs, (box[0], tmp)) cv.ResetImageROI(src) res = [hq.heappop(imgs)[1] for i in xrange(len(res))] return res
def blob_statistics(binary_image, max_area=99999.0, max_dim=99999.0): #, show=False): statistics = [] storage = cv.CreateMemStorage(0) #FindContours(image, storage, mode=CV_RETR_LIST, method=CV_CHAIN_APPROX_SIMPLE, offset=(0, 0)) contours = cv.FindContours(binary_image, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, (0, 0)) #number_contours, contours = cv.FindContours(binary_image, storage, cv.sizeof_CvContour, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, (0,0)) #TODO: FIGURE OUT WHAT THE EQUIV OF SIZEOF IS IN OPENCV2 #import pdb #pdb.set_trace() original_ptr = contours while contours != None: try: bx, by, bwidth, bheight = cv.BoundingRect(contours, 0) bounding_rect = Rect(bx, by, bwidth, bheight) moments = cv.Moments(contours, 0) #area = moments.m00 #approximation to area since cvMoments' area seem broken area = bounding_rect.width * bounding_rect.height if False: #TODO NOT WORKING!! if moments.m00 == 0.0: centroid = (bounding_rect.x, bounding_rect.y) else: centroid = (moments.m10 / moments.m00, moments.m01 / moments.m00) else: if bwidth > 0: cx = bx + bwidth / 2. else: cx = bx if bheight > 0: cy = by + bheight / 2. else: cy = by centroid = (cx, cy) #if show: # print 'areas is', area, bounding_rect.width, bounding_rect.height if area > max_area or bounding_rect.width > max_dim or bounding_rect.height > max_dim: cv.DrawContours(binary_image, contours, cv.Scalar(0), cv.Scalar(0), 0, cv.CV_FILLED) else: stats = { 'area': area, 'centroid': centroid, 'rect': bounding_rect } statistics.append(stats) contours = contours.h_next() except Exception, e: pass #This is due to OPENCV BUG and not being able to see inside contour object' break
def loop(self, lock): print('starting thread - ProcessContours') self.active = True self.loops = 0 while self.active: self.loops += 1 print('.....contours thread... locking, convert scale...') lock.acquire() #lock.release() # bug was here cv.CvtColor(self.depth8, self.DEPTH640, cv.CV_GRAY2RGB) cv.Resize(self.DEPTH640, self.DEPTH240, False) lock.release() # fixed Dec 6th 2011 print('.....contours thread ok.....') # blur helps? #cv.Smooth( self.depth8, self.depth8, cv.CV_BLUR, 16, 16, 0.1, 0.1 ) #cv.Smooth( self.depth8, self.depth8, cv.CV_GAUSSIAN, 13, 13, 0.1, 0.1 ) thresh = Kinect.sweep_begin index = 0 #for img in self.sweep_thresh: for i in range(int(Kinect.sweeps)): if thresh >= 255: break img = self.sweep_images[i] cv.ClearMemStorage(self.storage) cv.Threshold(self.depth8, img, thresh, 255, cv.CV_THRESH_BINARY_INV) #cv.Canny( img, img, 0, 255, 3 ) # too slow seq = cv.CvSeq() contours = ctypes.pointer(seq.POINTER) cv.FindContours(img, self.storage, contours, ctypes.sizeof(cv.Contour.CSTRUCT), cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE, (0, 0)) #print( contours.contents.contents.total ) _total = 0 try: _total = contours.contents.contents.total except ValueError: #ValueError: NULL pointer access thresh += int(Kinect.sweep_step) continue P = ReducedPolygon(contours.contents, index, thresh) lock.acquire() Kinect.BUFFER.append(P) lock.release() index += 1 thresh += int(Kinect.sweep_step) print('==========proc shapes.iterate============') self.proc_shapes.iterate(lock) time.sleep(0.01) print('thread exit - ProcessContours', self.loops)
def find_characters(grey, bw): """Find character contours in a 1-bit image.""" # detect contours storage = cv.CreateMemStorage() contour_iter = cv.FindContours(bw, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_NONE) # filter the detected contours while contour_iter: contour = Contour(contour_iter, grey) if contour.valid: yield contour contour_iter = contour_iter.h_next()
def get_blobs(bin_arr): ''' Find all contiguous nonzero blobs in the image, and return a list of Blob objects. ''' bin_img = cv.fromarray(bin_arr.copy()) storage = cv.CreateMemStorage(0) contours = cv.FindContours(bin_img, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE) blobs = [] while contours: blobs.append(Blob(contours)) contours = contours.h_next() return sorted(blobs, key=Blob.get_area, reverse=True)
def getBotCoord(): image = cv.QueryFrame(capture) imageTreshold = thresholded_image(image, botTreshold[0], botTreshold[1]) current_contour = cv.FindContours(cv.CloneImage(imageTreshold), cv.CreateMemStorage(), cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) mypos = (0, 0) if len(current_contour) != 0: mypos = contourCenter(largestContour(current_contour)) imagehsv = hsv_image(image) s = cv.Get2D(imagehsv, mypos[0], mypos[1]) return (mypos, s)
def find_contours(im): """ @param im IplImage: an input gray image @return cvseq contours using cv.FindContours """ storage = cv.CreateMemStorage(0) try: contours = cv.FindContours(im, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) #contours = cv.ApproxPoly(contours, # storage, # cv.CV_POLY_APPROX_DP, 3, 1) except cv.error, e: return None