def findImageContour(img, frame): storage = cv.CreateMemStorage() cont = cv.FindContours(img, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE, (0, 0)) max_center = [None, 0] for c in contour_iterator(cont): # Number of points must be more than or equal to 6 for cv.FitEllipse2 # Use to set minimum size of object to be tracked. if len(c) >= 60: # Copy the contour into an array of (x,y)s PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2) for (i, (x, y)) in enumerate(c): PointArray2D32f[0, i] = (x, y) # Fits ellipse to current contour. (center, size, angle) = cv.FitEllipse2(PointArray2D32f) # Only consider location of biggest contour -- adapt for multiple object tracking if size > max_center[1]: max_center[0] = center max_center[1] = size angle = angle if True: # Draw the current contour in gray gray = cv.CV_RGB(255, 255, 255) cv.DrawContours(img, c, gray, gray, 0, 1, 8, (0, 0)) if max_center[1] > 0: # Convert ellipse data from float to integer representation. center = (cv.Round(max_center[0][0]), cv.Round(max_center[0][1])) size = (cv.Round(max_center[1][0] * 0.5), cv.Round(max_center[1][1] * 0.5)) color = cv.CV_RGB(255, 0, 0) cv.Ellipse(frame, center, size, angle, 0, 360, color, 3, cv.CV_AA, 0)
def getContours(im, approx_value=1): #Return contours approximated storage = cv.CreateMemStorage(0) contours = cv.FindContours(cv.CloneImage(im), storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) contourLow = cv.ApproxPoly(contours, storage, cv.CV_POLY_APPROX_DP, approx_value, approx_value) return contourLow
def find_rectangles(self,input_img): """ Find contours in the input image. input_img: Is a binary image """ contours_img=cv.CreateMat(input_img.height, input_img.width, cv.CV_8UC1) # Image to draw the contours copied_img=cv.CreateMat(input_img.height, input_img.width, input_img.type) # Image to draw the contours cv.Copy(input_img, copied_img) contours = cv.FindContours(copied_img,cv.CreateMemStorage(),cv.CV_RETR_TREE,cv.CV_CHAIN_APPROX_SIMPLE) cv.DrawContours(contours_img,contours,255,0,10) return contours_img
def getContour(cvImg): lowerBound = cv.Scalar(0, 0, 0) upperBound = cv.Scalar(3, 3, 3) size = cv.GetSize(cvImg) output = cv.CreateMat(size[0], size[1], cv.CV_8UC1) cv.InRangeS(cvImg, lowerBound, upperBound, output) storage = cv.CreateMemStorage(0) seq = cv.FindContours(output, storage, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE) mask = np.array(output) y, x = np.nonzero(mask) return (x, y)
def process_image(self, slider_pos): """ This function finds contours, draws them and their approximation by ellipses. """ stor = cv.CreateMemStorage() # Create the destination images image02 = cv.CloneImage(self.source_image) cv.Zero(image02) image04 = cv.CreateImage(cv.GetSize(self.source_image), cv.IPL_DEPTH_8U, 3) cv.Zero(image04) # Threshold the source image. This needful for cv.FindContours(). cv.Threshold(self.source_image, image02, slider_pos, 255, cv.CV_THRESH_BINARY) # Find all contours. cont = cv.FindContours(image02, stor, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, (0, 0)) for c in contour_iterator(cont): # Number of points must be more than or equal to 6 for cv.FitEllipse2 if len(c) >= 6: # Copy the contour into an array of (x,y)s PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2) for (i, (x, y)) in enumerate(c): PointArray2D32f[0, i] = (x, y) # Draw the current contour in gray gray = cv.CV_RGB(100, 100, 100) cv.DrawContours(image04, c, gray, gray, 0, 1, 8, (0, 0)) # Fits ellipse to current contour. (center, size, angle) = cv.FitEllipse2(PointArray2D32f) # Convert ellipse data from float to integer representation. center = (cv.Round(center[0]), cv.Round(center[1])) size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5)) # Draw ellipse in random color color = cv.CV_RGB(random.randrange(256), random.randrange(256), random.randrange(256)) cv.Ellipse(image04, center, size, angle, 0, 360, color, 2, cv.CV_AA, 0) # Show image. HighGUI use. cv.ShowImage("Result", image04)
def find_squares_from_binary( gray ): """ use contour search to find squares in binary image returns list of numpy arrays containing 4 points """ squares = [] storage = cv.CreateMemStorage(0) contours = cv.FindContours(gray, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, (0,0)) storage = cv.CreateMemStorage(0) while contours: #approximate contour with accuracy proportional to the contour perimeter arclength = cv.ArcLength(contours) polygon = cv.ApproxPoly( contours, storage, cv.CV_POLY_APPROX_DP, arclength * 0.02, 0) if is_square(polygon): squares.append(polygon[0:4]) contours = contours.h_next() return squares
def findImage(img): #Set up storage for images frame_size = cv.GetSize(img) img2 = cv.CreateImage(frame_size,8,3) tmp = cv.CreateImage(frame_size,8,cv.CV_8U) h = cv.CreateImage(frame_size,8,1) #copy original image to do work on cv.Copy(img,img2) #altering the image a bit for smoother processing cv.Smooth(img2,img2,cv.CV_BLUR,3) cv.CvtColor(img2,img2,cv.CV_BGR2HSV) #make sure temp is empty cv.Zero(tmp) #detection based on HSV value #30,100,90 lower limit on pic 41,255,255 on pic #cv.InRangeS(img2,cv.Scalar(25,100,87),cv.Scalar(50,255,255),tmp) #Range for green plate dot in my Living room #cv.InRangeS(img2,cv.Scalar(55,80,60),cv.Scalar(65,95,90),tmp) #classroom #cv.InRangeS(img2,cv.Scalar(55,80,60),cv.Scalar(70,110,70),tmp) #Kutztowns Gym #cv.InRangeS(img2,cv.Scalar(65,75,58),cv.Scalar(75,59,61),tmp) cv.InRangeS(img2,cv.Scalar(90,90,185),cv.Scalar(100,100,200),tmp) elmt_shape=cv.CV_SHAPE_RECT pos = 6 element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, elmt_shape) cv.Dilate(tmp,tmp,element,6) cv.Split(tmp,h,None,None,None) storage = cv.CreateMemStorage() scan = cv.FindContours(h,storage) xyImage=drawCircles(scan,img) if xyImage != None: return (xyImage,tmp) else: return None
def somethingHasMoved(self): # Find contours storage = cv.CreateMemStorage(0) contours = cv.FindContours(self.gray_frame, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) self.currentcontours = contours #Save contours while contours: #For all contours compute the area self.currentsurface += cv.ContourArea(contours) contours = contours.h_next() avg = (self.currentsurface*100)/self.surface #Calculate the average of contour area on the total size self.currentsurface = 0 #Put back the current surface to 0 if avg > self.threshold: return True else: return False
def verificaMovimento(self): """ Obtem os contornos da imagem cinza e soma a area deles para verificar se ouve diferenca. Caso a soma da area dos contornos seja maior que o "0" retorna True, caso contrario False. """ # Encontra os contornos dos objetos na imagem cinza. contornos = cv.FindContours(self.imagem_cinza, cv.CreateMemStorage(0)) while contornos: self.area_corrente += cv.ContourArea(contornos) contornos = contornos.h_next() # Faco uma media da area corrente. movimentos = (self.area_corrente * 100) / self.area self.area_corrente = 0 if movimentos > 0: return True else: return False
def on_trackbar(position): ''' position is the value of the track bar ''' img_result = cv.CreateImage(src_img_size, 8, 1) cv.Canny(img_gray, img_result, position, position*2, 3) cv.ShowImage("contours", img_result) storage = cv.CreateMemStorage() contours = cv.FindContours(img_result, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE) print contours # draw contours in red and green cv.DrawContours (img_result, #dest image contours, #input contours _red, #color of external contour _green, #color of internal contour levels, #maxlevel of contours to draw _contour_thickness, cv.CV_AA, #line type (0, 0)) #offset pass
def motionDetect(self, img): cv.Smooth(img, img, cv.CV_GAUSSIAN, 3, 0) cv.RunningAvg(img, self.movingAvg, 0.020, None) cv.ConvertScale(self.movingAvg, self.tmp, 1.0, 0.0) cv.AbsDiff(img, self.tmp, self.diff) cv.CvtColor(self.diff, self.grayImage, cv.CV_RGB2GRAY) cv.Threshold(self.grayImage, self.grayImage, 70,255, cv.CV_THRESH_BINARY) cv.Dilate(self.grayImage, self.grayImage, None, 18)#18 cv.Erode(self.grayImage, self.grayImage, None, 10)#10 storage = cv.CreateMemStorage(0) contour = cv.FindContours(self.grayImage, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) # points = [] while contour: boundRect = cv.BoundingRect(list(contour)) contour = contour.h_next() pt1 = (boundRect[0], boundRect[1]) pt2 = (boundRect[0] + boundRect[2], boundRect[1] + boundRect[3]) cv.Rectangle(img, pt1, pt2, cv.CV_RGB(255,255,0), 1) return img
def find(self, img): started = time.time() gray = self.Cached('gray', img.height, img.width, cv.CV_8UC1) cv.CvtColor(img, gray, cv.CV_BGR2GRAY) sobel = self.Cached('sobel', img.height, img.width, cv.CV_16SC1) sobely = self.Cached('sobely', img.height, img.width, cv.CV_16SC1) cv.Sobel(gray, sobel, 1, 0) cv.Sobel(gray, sobely, 0, 1) cv.Add(sobel, sobely, sobel) sobel8 = self.Cached('sobel8', sobel.height, sobel.width, cv.CV_8UC1) absnorm8(sobel, sobel8) cv.Threshold(sobel8, sobel8, 128.0, 255.0, cv.CV_THRESH_BINARY) sobel_integral = self.Cached('sobel_integral', img.height + 1, img.width + 1, cv.CV_32SC1) cv.Integral(sobel8, sobel_integral) d = 16 _x1y1 = cv.GetSubRect( sobel_integral, (0, 0, sobel_integral.cols - d, sobel_integral.rows - d)) _x1y2 = cv.GetSubRect( sobel_integral, (0, d, sobel_integral.cols - d, sobel_integral.rows - d)) _x2y1 = cv.GetSubRect( sobel_integral, (d, 0, sobel_integral.cols - d, sobel_integral.rows - d)) _x2y2 = cv.GetSubRect( sobel_integral, (d, d, sobel_integral.cols - d, sobel_integral.rows - d)) summation = cv.CloneMat(_x2y2) cv.Sub(summation, _x1y2, summation) cv.Sub(summation, _x2y1, summation) cv.Add(summation, _x1y1, summation) sum8 = self.Cached('sum8', summation.height, summation.width, cv.CV_8UC1) absnorm8(summation, sum8) cv.Threshold(sum8, sum8, 32.0, 255.0, cv.CV_THRESH_BINARY) cv.ShowImage("sum8", sum8) seq = cv.FindContours(sum8, cv.CreateMemStorage(), cv.CV_RETR_EXTERNAL) subimg = cv.GetSubRect(img, (d / 2, d / 2, sum8.cols, sum8.rows)) t_cull = time.time() - started seqs = [] while seq: seqs.append(seq) seq = seq.h_next() started = time.time() found = {} print 'seqs', len(seqs) for seq in seqs: area = cv.ContourArea(seq) if area > 1000: rect = cv.BoundingRect(seq) edge = int((14 / 14.) * math.sqrt(area) / 2 + 0.5) candidate = cv.GetSubRect(subimg, rect) sym = self.dm.decode( candidate.width, candidate.height, buffer(candidate.tostring()), max_count=1, #min_edge = 6, #max_edge = int(edge) # Units of 2 pixels ) if sym: onscreen = [(d / 2 + rect[0] + x, d / 2 + rect[1] + y) for (x, y) in self.dm.stats(1)[1]] found[sym] = onscreen else: print "FAILED" t_brute = time.time() - started print "cull took", t_cull, "brute", t_brute return found
cv.CvtColor(im, image, cv.CV_BGR2GRAY) threshold = 51 colour = 255 cv.Threshold(image, image, threshold, colour, cv.CV_THRESH_BINARY) # create the window for the contours cv.NamedWindow("contours", cv.CV_WINDOW_NORMAL) # create the trackbar, to enable the change of the displayed level cv.CreateTrackbar("levels+3", "contours", 3, 7, on_contour) # create the storage area for contour image storage = cv.CreateMemStorage(0) # find the contours contours = cv.FindContours(image, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, (0, 0)) # polygon approxomation contours = cv.ApproxPoly(contours, storage, cv.CV_POLY_APPROX_DP, 3, 1) # call one time the callback, so we will have the 1st display of contour window on_contour(_DEFAULT_LEVEL) cv.WaitKey(0) cv.SaveImage('C:\\3d-Model\\bin\\segmentation_files\\pic_contours.jpg', contours_image) # save the image as png cv.DestroyAllWindows() #log code contour to file for progress with open('C:\\3d-Model\\bin\\segmentation_files\\progress.txt', 'w') as myFile:
def process_image(self, slider_pos): global cimg, source_image1, ellipse_size, maxf, maxs, eoc, lastcx, lastcy, lastr """ This function finds contours, draws them and their approximation by ellipses. """ stor = cv.CreateMemStorage() # Create the destination images cimg = cv.CloneImage(self.source_image) cv.Zero(cimg) image02 = cv.CloneImage(self.source_image) cv.Zero(image02) image04 = cv.CreateImage(cv.GetSize(self.source_image), cv.IPL_DEPTH_8U, 3) cv.Zero(image04) # Threshold the source image. This needful for cv.FindContours(). cv.Threshold(self.source_image, image02, slider_pos, 255, cv.CV_THRESH_BINARY) # Find all contours. cont = cv.FindContours(image02, stor, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, (0, 0)) maxf = 0 maxs = 0 size1 = 0 for c in contour_iterator(cont): if len(c) > ellipse_size: PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2) for (i, (x, y)) in enumerate(c): PointArray2D32f[0, i] = (x, y) # Draw the current contour in gray gray = cv.CV_RGB(100, 100, 100) cv.DrawContours(image04, c, gray, gray, 0, 1, 8, (0, 0)) if iter == 0: strng = segF + '/' + 'contour1.png' cv.SaveImage(strng, image04) color = (255, 255, 255) (center, size, angle) = cv.FitEllipse2(PointArray2D32f) # Convert ellipse data from float to integer representation. center = (cv.Round(center[0]), cv.Round(center[1])) size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5)) if iter == 1: if size[0] > size[1]: size2 = size[0] else: size2 = size[1] if size2 > size1: size1 = size2 size3 = size # Fits ellipse to current contour. if eoc == 0 and iter == 2: rand_val = abs((lastr - ((size[0] + size[1]) / 2))) if rand_val > 20 and float(max(size[0], size[1])) / float( min(size[0], size[1])) < 1.5: lastcx = center[0] lastcy = center[1] lastr = (size[0] + size[1]) / 2 if rand_val > 20 and float(max(size[0], size[1])) / float( min(size[0], size[1])) < 1.4: cv.Ellipse(cimg, center, size, angle, 0, 360, color, 2, cv.CV_AA, 0) cv.Ellipse(source_image1, center, size, angle, 0, 360, color, 2, cv.CV_AA, 0) elif eoc == 1 and iter == 2: (int, cntr, rad) = cv.MinEnclosingCircle(PointArray2D32f) cntr = (cv.Round(cntr[0]), cv.Round(cntr[1])) rad = (cv.Round(rad)) if maxf == 0 and maxs == 0: cv.Circle(cimg, cntr, rad, color, 1, cv.CV_AA, shift=0) cv.Circle(source_image1, cntr, rad, color, 2, cv.CV_AA, shift=0) maxf = rad elif (maxf > 0 and maxs == 0) and abs(rad - maxf) > 30: cv.Circle(cimg, cntr, rad, color, 2, cv.CV_AA, shift=0) cv.Circle(source_image1, cntr, rad, color, 2, cv.CV_AA, shift=0) maxs = len(c) if iter == 1: temp3 = 2 * abs(size3[1] - size3[0]) if (temp3 > 40): eoc = 1
def run(self): # Capture first frame to get size frame = cv.QueryFrame(self.capture) frame_size = cv.GetSize(frame) width = frame.width height = frame.height surface = width * height # Surface area of the image cursurface = 0 # Hold the current surface that have changed grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3) difference = None while True: color_image = cv.QueryFrame(self.capture) cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0) # Remove false positives if not difference: # For the first time put values in difference, temp and moving_average difference = cv.CloneImage(color_image) temp = cv.CloneImage(color_image) cv.ConvertScale(color_image, moving_average, 1.0, 0.0) else: cv.RunningAvg(color_image, moving_average, 0.020, None) # Compute the average # Convert the scale of the moving average. cv.ConvertScale(moving_average, temp, 1.0, 0.0) # Minus the current frame from the moving average. cv.AbsDiff(color_image, temp, difference) # Convert the image so that it can be thresholded cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY) cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY) cv.Dilate(grey_image, grey_image, None, 18) # to get object blobs cv.Erode(grey_image, grey_image, None, 10) # Find contours storage = cv.CreateMemStorage(0) contours = cv.FindContours(grey_image, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) backcontours = contours # Save contours while contours: # For all contours compute the area cursurface += cv.ContourArea(contours) contours = contours.h_next() avg = ( cursurface * 100 ) / surface # Calculate the average of contour area on the total size if avg > self.ceil: print("Something is moving !") ring = IntrusionAlarm() ring.run() # print avg,"%" cursurface = 0 # Put back the current surface to 0 # Draw the contours on the image _red = (0, 0, 255) # Red for external contours _green = (0, 255, 0) # Gren internal contours levels = 1 # 1 contours drawn, 2 internal contours as well, 3 ... cv.DrawContours(color_image, backcontours, _red, _green, levels, 2, cv.CV_FILLED) cv.ShowImage("Virtual Eye", color_image) # Listen for ESC or ENTER key c = cv.WaitKey(7) % 0x100 if c == 27 or c == 10: break elif c == 99: cv2.destroyWindow('Warning!!!')
def somethingHasMoved(self): # Find contours storage = cv.CreateMemStorage(0) contours = cv.FindContours(self.gray_frame, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) #cv.ShowImage("Image", self.gray_frame) #print(self.gray_frame.width) #print(self.gray_frame.height) alto = self.gray_frame.height ancho = self.gray_frame.width q0 = self.gray_frame[ :ancho/2, :alto/2 ] q1 = self.gray_frame[ ancho/2:, :alto/2 ] q2 = self.gray_frame[ :ancho/2, alto/2: ] q3 = self.gray_frame[ ancho/2:, alto/2: ] q0nz = np.count_nonzero(q0) q1nz = np.count_nonzero(q1) q2nz = np.count_nonzero(q2) q3nz = np.count_nonzero(q3) sys.stdout.write("\033[F") #back to previous line sys.stdout.write("\033[K") #clear line sys.stdout.write("\033[F") #back to previous line sys.stdout.write("\033[K") #clear line sys.stdout.write("\033[F") #back to previous line sys.stdout.write("\033[K") #clear line if q0nz or q1nz or q2nz or q3nz: print("Algo se mueve !") #cambio de estado if q0nz != self.m0: self.m0 = q0nz self.mqtt.publish("0:%i" % (self.m0), 'camaras') if q1nz != self.m1: self.m1 = q1nz self.mqtt.publish("1:%i" % (self.m1), 'camaras') if q2nz != self.m2: self.m2 = q2nz self.mqtt.publish("3:%i" % (self.m2), 'camaras') if q3nz != self.m3: self.m3 = q3nz self.mqtt.publish("2:%i" % (self.m3), 'camaras') else: print("") print("{0} {1}\n{2} {3}".format(blockchar(q0nz),blockchar(q2nz),blockchar(q1nz),blockchar(q3nz))) self.currentcontours = contours #Save contours while contours: #For all contours compute the area self.currentsurface += cv.ContourArea(contours) contours = contours.h_next() avg = (self.currentsurface*100)/self.surface #Calculate the average of contour area on the total size self.currentsurface = 0 #Put back the current surface to 0 #print(avg) if avg > self.threshold: return True else: return False
def run(self): # Capture first frame to get size frame = self.get_image2() frame_size = cv.GetSize(frame) color_image = cv.CreateImage(frame_size, 8, 3) grey_image = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1) moving_average = cv.CreateImage(frame_size, cv.IPL_DEPTH_32F, 3) first = True while True: closest_to_left = cv.GetSize(frame)[0] closest_to_right = cv.GetSize(frame)[1] print "getting Image" color_image = self.get_image2() print "got image" # Smooth to get rid of false positives cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0) if first: difference = cv.CloneImage(color_image) temp = cv.CloneImage(color_image) cv.ConvertScale(color_image, moving_average, 1.0, 0.0) first = False else: cv.RunningAvg(color_image, moving_average, 0.30, None) # Convert the scale of the moving average. cv.ConvertScale(moving_average, temp, 1.0, 0.0) # Minus the current frame from the moving average. cv.AbsDiff(color_image, temp, difference) # Convert the image to grayscale. cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY) # Convert the image to black and white. cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY) # Dilate and erode to get people blobs cv.Dilate(grey_image, grey_image, None, 18) cv.Erode(grey_image, grey_image, None, 10) storage = cv.CreateMemStorage(0) contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) points = [] while contour: bound_rect = cv.BoundingRect(list(contour)) contour = contour.h_next() pt1 = (bound_rect[0], bound_rect[1]) pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]) points.append(pt1) points.append(pt2) cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1) if len(points): center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points) cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1) cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1) cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1) cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 100, 0), 1) cv.ShowImage("Target", color_image) # Listen for ESC key c = cv.WaitKey(7) % 0x100 if c == 27: break
sk = Sketcher("image", [img, marker_mask]) while True: c = cv.WaitKey(0) % 0x100 if c == 27 or c == ord('q'): break if c == ord('r'): cv.Zero(marker_mask) cv.Copy(img0, img) cv.ShowImage("image", img) if c == ord('w'): storage = cv.CreateMemStorage(0) #cv.SaveImage("wshed_mask.png", marker_mask) #marker_mask = cv.LoadImage("wshed_mask.png", 0) contours = cv.FindContours(marker_mask, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) def contour_iterator(contour): while contour: yield contour contour = contour.h_next() cv.Zero(markers) comp_count = 0 for c in contour_iterator(contours): cv.DrawContours(markers, c, cv.ScalarAll(comp_count + 1), cv.ScalarAll(comp_count + 1), -1, -1, 8) comp_count += 1 cv.Watershed(img0, markers)
def compare_2_formes(Image1, Image2): mincontour = 500 # minimum size of a form to be detected CVCONTOUR_APPROX_LEVEL = 5 # parameter for call contour img_edge1 = cv.CreateImage(cv.GetSize(Image1), 8, 1) #egde image # img1_8uc3=cv.CreateImage(cv.GetSize(Image1),8,3) img_edge2 = cv.CreateImage(cv.GetSize(Image2), 8, 1) # img2_8uc3=cv.CreateImage(cv.GetSize(Image2),8,3) cv.Threshold(Image1, img_edge1, 123, 255, cv.CV_THRESH_BINARY) # filter threshold cv.Threshold(Image2, img_edge2, 123, 255, cv.CV_THRESH_BINARY) storage1 = cv.CreateMemStorage() storage2 = cv.CreateMemStorage() first_contour1 = cv.FindContours( img_edge1, storage1) # pointer to the first edge of the form 1 first_contour2 = cv.FindContours( img_edge2, storage2) # pointer to the first edge of the form 2 newseq = first_contour1 newseq2 = first_contour2 if not (first_contour1) or not (first_contour2): return 0 current_contour = first_contour1 while 1: current_contour = current_contour.h_next( ) # path in the sequence of edges of the first form if (not (current_contour) ): # stop condition if the contour pointer = NULL break if cv.ContourArea(current_contour) > mincontour: newseq = cv.ApproxPoly(current_contour, storage1, cv.CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL, 0) # cv.CvtColor(Image1,img1_8uc3,cv.CV_GRAY2BGR ); # cv.DrawContours(img1_8uc3,newseq,cv.CV_RGB(0,255,0),cv.CV_RGB(255,0,0),0,2,8); # cv.NamedWindow("ContourImage2",cv.CV_WINDOW_AUTOSIZE) # cv.ShowImage("ContourImage2",img1_8uc3) current_contour = first_contour2 # path of the second form of contours while 1: current_contour = current_contour.h_next() if (not (current_contour)): break if cv.ContourArea(current_contour) > mincontour: newseq2 = cv.ApproxPoly(current_contour, storage2, cv.CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL, 0) # cv.CvtColor(Image2,img2_8uc3,cv.CV_GRAY2BGR); # cv.DrawContours(img2_8uc3,newseq2,cv.CV_RGB(0,255,0),cv.CV_RGB(255,0,0),0,2,8); # cv.NamedWindow("ContourImage",cv.CV_WINDOW_AUTOSIZE) # cv.ShowImage("ContourImage",img2_8uc3) matchresult = 1 matchresult = cv.MatchShapes(newseq, newseq2, 1, 2) return matchresult
def procImg(img, sideName, dispFlag): #creates empty images of the same size imdraw = cv.CreateImage(cv.GetSize(img), 8, 3) #put the smoothed image here imgSmooth = cv.CreateImage(cv.GetSize(img), 8, 3) cv.SetZero(imdraw) cv.Smooth(img, imgSmooth, cv.CV_GAUSSIAN, 3, 0) #Gaussian filter the image imgbluethresh = getthresholdedimg( imgSmooth) #Get a color thresholed binary image cv.Erode(imgbluethresh, imgbluethresh, None, 3) cv.Dilate(imgbluethresh, imgbluethresh, None, 10) #img2 = cv.CloneImage(imgbluethresh) storage = cv.CreateMemStorage(0) contour = cv.FindContours(imgbluethresh, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) centroidx = 0 centroidy = 0 prevArea = 0 pt1 = (0, 0) pt2 = (0, 0) while contour: #find the area of each collection of contiguous points (contour) bound_rect = cv.BoundingRect(list(contour)) contour = contour.h_next() #get the largest contour area = bound_rect[2] * bound_rect[3] if dispFlag: print("Area= " + str(area)) if (area > prevArea and area > 3000): pt1 = (bound_rect[0], bound_rect[1]) pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]) # Draw bounding rectangle cv.Rectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3) # calculating centroid centroidx = cv.Round((pt1[0] + pt2[0]) / 2) centroidy = cv.Round((pt1[1] + pt2[1]) / 2) if (centroidx == 0 or centroidy == 0): print("no blimp detected from " + sideName) else: print(sideName + " centroid x:" + str(centroidx)) print(sideName + " centroid y:" + str(centroidy)) print("") if dispFlag: small_thresh = cv.CreateImage( (int(0.25 * cv.GetSize(imgbluethresh)[0]), int(0.25 * cv.GetSize(imgbluethresh)[1])), 8, 1) cv.Resize(imgbluethresh, small_thresh) cv.ShowImage(sideName + "_threshold", small_thresh) cv.WaitKey(100) small_hsv = cv.CreateImage((int( 0.25 * cv.GetSize(imghsv)[0]), int(0.25 * cv.GetSize(imghsv)[1])), 8, 3) cv.Resize(imghsv, small_hsv) cv.ShowImage(sideName + "_hsv", small_hsv) cv.WaitKey(100) return (centroidx, centroidy)
import cv2.cv as cv orig = cv.LoadImage('meinv.jpg', cv.CV_LOAD_IMAGE_COLOR) im = cv.CreateImage(cv.GetSize(orig), 8, 1) cv.CvtColor(orig, im, cv.CV_BGR2GRAY) #Keep the original in colour to draw contours in the end cv.Threshold(im, im, 128, 255, cv.CV_THRESH_BINARY) cv.ShowImage("Threshold 1", im) element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5, cv.CV_SHAPE_RECT) cv.MorphologyEx(im, im, None, element, cv.CV_MOP_OPEN) #Open and close to make appear contours cv.MorphologyEx(im, im, None, element, cv.CV_MOP_CLOSE) cv.Threshold(im, im, 128, 255, cv.CV_THRESH_BINARY_INV) cv.ShowImage("After MorphologyEx", im) # -------------------------------- vals = cv.CloneImage(im) #Make a clone because FindContours can modify the image contours=cv.FindContours(vals, cv.CreateMemStorage(0), cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0,0)) _red = (0, 0, 255); #Red for external contours _green = (0, 255, 0);# Gren internal contours levels=2 #1 contours drawn, 2 internal contours as well, 3 ... cv.DrawContours (orig, contours, _red, _green, levels, 2, cv.CV_FILLED) #Draw contours on the colour image cv.ShowImage("Image", orig) cv.WaitKey(0)
def try_thresh(self): dbg('', level=2) dbg('thres %d, best so far %s' % (self.cur_thresh, self.best_diff), level=1) line_len = vertex_len(self.line[0], self.line[1]) # Needs to at least squiggle back and forth self.min_len = 2 * line_len self.total_area = self.image.width * self.image.height # Select this by some metric with tagged features, say smallest - 10% self.min_area = 100.0 self.max_area = self.total_area * 0.9 dbg('Size: %dw X %dh = %g' % (self.image.width, self.image.height, self.total_area), level=3) size = cv.GetSize(self.image) dbg('Size: %s' % (size, ), level=3) self.gray_img = cv.CreateImage(size, 8, 1) storage = cv.CreateMemStorage(0) cv.CvtColor(self.image, self.gray_img, cv.CV_BGR2GRAY) cv.Threshold(self.gray_img, self.gray_img, self.cur_thresh, 255, cv.CV_THRESH_BINARY) if 0 and self.cur_thresh == 70: dbg('Saving intermediate B&W') cv.SaveImage('img_thresh.png', self.gray_img) ''' int cvFindContours( IplImage* img, CvMemStorage* storage, CvSeq** firstContour, int headerSize = sizeof(CvContour), CvContourRetrievalMode mode = CV_RETR_LIST, CvChainApproxMethod method = CV_CHAIN_APPROX_SIMPLE ); ''' self.contour_begin = cv.FindContours(self.gray_img, storage) if draw_thresh_contours: ''' B&W background but color highlighting actually the image is missing...but looks fine so w/e ''' self.contours_map = cv.CreateImage(cv.GetSize(self.image), 8, 3) # Sort of works although not very well...does not look like the gray image #cv.Zero(self.contours_map) cv.CvtColor(self.gray_img, self.contours_map, cv.CV_GRAY2BGR) self.contouri = -1 for self.cur_contour in icontours(self.contour_begin): self.try_contour() if draw_thresh_contours: # TODO: save image instead if 0: cv.ShowImage("Contours", self.contours_map) cv.WaitKey() else: cv.SaveImage( os.path.join(thresh_dir, '%03d.png' % self.cur_thresh), self.contours_map)
def findSquares4(img, storage): N = 11 sz = (img.width & -2, img.height & -2) timg = cv.CloneImage(img); # make a copy of input image gray = cv.CreateImage(sz, 8, 1) pyr = cv.CreateImage((sz.width/2, sz.height/2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.CreateSeq(0, sizeof_CvSeq, sizeof_CvPoint, storage) squares = CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.GetSubRect(timg, cv.Rect(0, 0, sz.width, sz.height)) # down-scale and upscale the image to filter out the noise cv.PyrDown(subimage, pyr, 7) cv.PyrUp(pyr, subimage, 7) tgray = cv.CreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgray cv.Split(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if(l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.Canny(tgray, gray, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.Dilate(gray, gray, None, 1) else: # apply threshold if l!=0: # tgray(x, y) = gray(x, y) < (l+1)*255/N ? 255 : 0 cv.Threshold(tgray, gray, (l+1)*255/N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.FindContours(gray, storage, sizeof_CvContour, cv.CV_RETR_LIST, cv. CV_CHAIN_APPROX_SIMPLE, (0, 0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.ApproxPoly(contour, sizeof_CvContour, storage, cv.CV_POLY_APPROX_DP, cv.ContourPerimeter(contours)*0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if(result.total == 4 and abs(cv.ContourArea(result)) > 1000 and cv.CheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if(i >= 2): t = abs(angle(result[i], result[i-2], result[i-1])) if s<t: s=t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if(s < 0.3): for i in range(4): squares.append(result[i]) return squares
def run(self): # Initialize # log_file_name = "tracker_output.log" # log_file = file( log_file_name, 'a' ) print "hello" frame = cv.QueryFrame(self.capture) frame_size = cv.GetSize(frame) # Capture the first frame from webcam for image properties display_image = cv.QueryFrame(self.capture) # Greyscale image, thresholded to create the motion mask: grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) # The RunningAvg() function requires a 32-bit or 64-bit image... running_average_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3) # ...but the AbsDiff() function requires matching image depths: running_average_in_display_color_depth = cv.CloneImage(display_image) # RAM used by FindContours(): mem_storage = cv.CreateMemStorage(0) # The difference between the running average and the current frame: difference = cv.CloneImage(display_image) target_count = 1 last_target_count = 1 last_target_change_t = 0.0 k_or_guess = 1 codebook = [] frame_count = 0 last_frame_entity_list = [] t0 = time.time() # For toggling display: image_list = ["camera", "difference", "threshold", "display", "faces"] image_index = 3 # Index into image_list # Prep for text drawing: text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA) text_coord = (5, 15) text_color = cv.CV_RGB(255, 255, 255) # Set this to the max number of targets to look for (passed to k-means): max_targets = 5 while True: # Capture frame from webcam camera_image = cv.QueryFrame(self.capture) frame_count += 1 frame_t0 = time.time() # Create an image with interactive feedback: display_image = cv.CloneImage(camera_image) # Create a working "color image" to modify / blur color_image = cv.CloneImage(display_image) # Smooth to get rid of false positives cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0) # Use the Running Average as the static background # a = 0.020 leaves artifacts lingering way too long. # a = 0.320 works well at 320x240, 15fps. (1/a is roughly num frames.) cv.RunningAvg(color_image, running_average_image, 0.320, None) # cv.ShowImage("background ", running_average_image) # Convert the scale of the moving average. cv.ConvertScale(running_average_image, running_average_in_display_color_depth, 1.0, 0.0) # Subtract the current frame from the moving average. cv.AbsDiff(color_image, running_average_in_display_color_depth, difference) cv.ShowImage("difference ", difference) # Convert the image to greyscale. cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY) # Threshold the image to a black and white motion mask: cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY) # Smooth and threshold again to eliminate "sparkles" cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0) cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY) grey_image_as_array = numpy.asarray(cv.GetMat(grey_image)) non_black_coords_array = numpy.where(grey_image_as_array > 3) # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples: non_black_coords_array = zip(non_black_coords_array[1], non_black_coords_array[0]) points = [ ] # Was using this to hold either pixel coords or polygon coords. bounding_box_list = [] # Now calculate movements using the white pixels as "motion" data contour = cv.FindContours(grey_image, mem_storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) levels = 10 while contour: bounding_rect = cv.BoundingRect(list(contour)) point1 = (bounding_rect[0], bounding_rect[1]) point2 = (bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3]) bounding_box_list.append((point1, point2)) polygon_points = cv.ApproxPoly(list(contour), mem_storage, cv.CV_POLY_APPROX_DP) # To track polygon points only (instead of every pixel): # points += list(polygon_points) # Draw the contours: cv.DrawContours(color_image, contour, cv.CV_RGB(255, 0, 0), cv.CV_RGB(0, 255, 0), levels, 3, 0, (0, 0)) cv.FillPoly(grey_image, [ list(polygon_points), ], cv.CV_RGB(255, 255, 255), 0, 0) cv.PolyLine(display_image, [ polygon_points, ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0) # cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1) contour = contour.h_next() # Find the average size of the bbox (targets), then # remove any tiny bboxes (which are prolly just noise). # "Tiny" is defined as any box with 1/10th the area of the average box. # This reduces false positives on tiny "sparkles" noise. box_areas = [] for box in bounding_box_list: box_width = box[right][0] - box[left][0] box_height = box[bottom][0] - box[top][0] box_areas.append(box_width * box_height) # cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1) average_box_area = 0.0 if len(box_areas): average_box_area = float(sum(box_areas)) / len(box_areas) trimmed_box_list = [] for box in bounding_box_list: box_width = box[right][0] - box[left][0] box_height = box[bottom][0] - box[top][0] # Only keep the box if it's not a tiny noise box: if (box_width * box_height) > average_box_area * 0.1: trimmed_box_list.append(box) # Draw the trimmed box list: # for box in trimmed_box_list: # cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 ) bounding_box_list = merge_collided_bboxes(trimmed_box_list) # Draw the merged box list: for box in bounding_box_list: cv.Rectangle(display_image, box[0], box[1], cv.CV_RGB(0, 255, 0), 1) # Here are our estimate points to track, based on merged & trimmed boxes: estimated_target_count = len(bounding_box_list) # Don't allow target "jumps" from few to many or many to few. # Only change the number of targets up to one target per n seconds. # This fixes the "exploding number of targets" when something stops moving # and the motion erodes to disparate little puddles all over the place. if frame_t0 - last_target_change_t < .350: # 1 change per 0.35 secs estimated_target_count = last_target_count else: if last_target_count - estimated_target_count > 1: estimated_target_count = last_target_count - 1 if estimated_target_count - last_target_count > 1: estimated_target_count = last_target_count + 1 last_target_change_t = frame_t0 # Clip to the user-supplied maximum: estimated_target_count = min(estimated_target_count, max_targets) # The estimated_target_count at this point is the maximum number of targets # we want to look for. If kmeans decides that one of our candidate # bboxes is not actually a target, we remove it from the target list below. # Using the numpy values directly (treating all pixels as points): points = non_black_coords_array center_points = [] if len(points): # If we have all the "target_count" targets from last frame, # use the previously known targets (for greater accuracy). k_or_guess = max(estimated_target_count, 1) # Need at least one target to look for. if len(codebook) == estimated_target_count: k_or_guess = codebook # points = vq.whiten(array( points )) # Don't do this! Ruins everything. codebook, distortion = vq.kmeans(array(points), k_or_guess) # Convert to tuples (and draw it to screen) for center_point in codebook: center_point = (int(center_point[0]), int(center_point[1])) center_points.append(center_point) # cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2) # cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3) # Now we have targets that are NOT computed from bboxes -- just # movement weights (according to kmeans). If any two targets are # within the same "bbox count", average them into a single target. # # (Any kmeans targets not within a bbox are also kept.) trimmed_center_points = [] removed_center_points = [] for box in bounding_box_list: # Find the centers within this box: center_points_in_box = [] for center_point in center_points: if center_point[0] < box[right][0] and center_point[0] > box[left][0] and \ center_point[1] < box[bottom][1] and center_point[1] > box[top][1] : # This point is within the box. center_points_in_box.append(center_point) # Now see if there are more than one. If so, merge them. if len(center_points_in_box) > 1: # Merge them: x_list = y_list = [] for point in center_points_in_box: x_list.append(point[0]) y_list.append(point[1]) average_x = int(float(sum(x_list)) / len(x_list)) average_y = int(float(sum(y_list)) / len(y_list)) trimmed_center_points.append((average_x, average_y)) # Record that they were removed: removed_center_points += center_points_in_box if len(center_points_in_box) == 1: trimmed_center_points.append( center_points_in_box[0]) # Just use it. # If there are any center_points not within a bbox, just use them. # (It's probably a cluster comprised of a bunch of small bboxes.) for center_point in center_points: if (not center_point in trimmed_center_points) and ( not center_point in removed_center_points): trimmed_center_points.append(center_point) # Draw what we found: # for center_point in trimmed_center_points: # center_point = ( int(center_point[0]), int(center_point[1]) ) # cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1) # cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1) # cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2) # cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3) # Determine if there are any new (or lost) targets: actual_target_count = len(trimmed_center_points) last_target_count = actual_target_count # Now build the list of physical entities (objects) this_frame_entity_list = [] # An entity is list: [ name, color, last_time_seen, last_known_coords ] for target in trimmed_center_points: # Is this a target near a prior entity (same physical entity)? entity_found = False entity_distance_dict = {} for entity in last_frame_entity_list: entity_coords = entity[3] delta_x = entity_coords[0] - target[0] delta_y = entity_coords[1] - target[1] distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2)) entity_distance_dict[distance] = entity # Did we find any non-claimed entities (nearest to furthest): distance_list = entity_distance_dict.keys() distance_list.sort() for distance in distance_list: # Yes; see if we can claim the nearest one: nearest_possible_entity = entity_distance_dict[distance] # Don't consider entities that are already claimed: if nearest_possible_entity in this_frame_entity_list: # print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] ) continue # print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1]) # Found the nearest entity to claim: entity_found = True nearest_possible_entity[ 2] = frame_t0 # Update last_time_seen nearest_possible_entity[ 3] = target # Update the new location this_frame_entity_list.append(nearest_possible_entity) # log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1] ) ) break if entity_found == False: # It's a new entity. color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) name = hashlib.md5(str(frame_t0) + str(color)).hexdigest()[:6] last_time_seen = frame_t0 new_entity = [name, color, last_time_seen, target] this_frame_entity_list.append(new_entity) # log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1] ) ) # Now "delete" any not-found entities which have expired: entity_ttl = 1.0 # 1 sec. for entity in last_frame_entity_list: last_time_seen = entity[2] if frame_t0 - last_time_seen > entity_ttl: # It's gone. # log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1] ) ) pass else: # Save it for next time... not expired yet: this_frame_entity_list.append(entity) # For next frame: last_frame_entity_list = this_frame_entity_list # Draw the found entities to screen: for entity in this_frame_entity_list: center_point = entity[3] c = entity[1] # RGB color tuple cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1) cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1) cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2) cv.Circle(display_image, center_point, 5, cv.CV_RGB(c[0], c[1], c[2]), 3) # print "min_size is: " + str(min_size) # Listen for ESC or ENTER key c = cv.WaitKey(7) % 0x100 if c == 27 or c == 10: break # Toggle which image to show # if chr(c) == 'd': # image_index = ( image_index + 1 ) % len( image_list ) # # image_name = image_list[ image_index ] # # # Display frame to user # if image_name == "camera": # image = camera_image # cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color ) # elif image_name == "difference": # image = difference # cv.PutText( image, "Difference Image", text_coord, text_font, text_color ) # elif image_name == "display": # image = display_image # cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color ) # elif image_name == "threshold": # # Convert the image to color. # cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB ) # image = display_image # Re-use display image here # cv.PutText( image, "Motion Mask", text_coord, text_font, text_color ) # elif image_name == "faces": # # Do face detection # detect_faces( camera_image, haar_cascade, mem_storage ) # image = camera_image # Re-use camera image here # cv.PutText( image, "Face Detection", text_coord, text_font, text_color ) # cv.ShowImage( "Target", image ) image1 = display_image cv.ShowImage("Target 1", image1) # if self.writer: # cv.WriteFrame( self.writer, image ); # log_file.flush() # If only using a camera, then there is no time.sleep() needed, # because the camera clips us to 15 fps. But if reading from a file, # we need this to keep the time-based target clipping correct: frame_t1 = time.time() # If reading from a file, put in a forced delay: if not self.writer: delta_t = frame_t1 - frame_t0 if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t) t1 = time.time() time_delta = t1 - t0 processed_fps = float(frame_count) / time_delta print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta, processed_fps)
def run(self): # Capture first frame to get size frame = cv.QueryFrame(self.capture) #nframes =+ 1 frame_size = cv.GetSize(frame) color_image = cv.CreateImage(cv.GetSize(frame), 8, 3) grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3) def totuple(a): try: return tuple(totuple(i) for i in a) except TypeError: return a first = True while True: closest_to_left = cv.GetSize(frame)[0] closest_to_right = cv.GetSize(frame)[1] color_image = cv.QueryFrame(self.capture) # Smooth to get rid of false positives cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0) if first: difference = cv.CloneImage(color_image) temp = cv.CloneImage(color_image) cv.ConvertScale(color_image, moving_average, 1.0, 0.0) first = False else: cv.RunningAvg(color_image, moving_average, .1, None) cv.ShowImage("BG", moving_average) # Convert the scale of the moving average. cv.ConvertScale(moving_average, temp, 1, 0.0) # Minus the current frame from the moving average. cv.AbsDiff(color_image, temp, difference) #cv.ShowImage("BG",difference) # Convert the image to grayscale. cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY) cv.ShowImage("BG1", grey_image) # Convert the image to black and white. cv.Threshold(grey_image, grey_image, 40, 255, cv.CV_THRESH_BINARY) #cv.ShowImage("BG2", grey_image) # Dilate and erode to get people blobs cv.Dilate(grey_image, grey_image, None, 8) cv.Erode(grey_image, grey_image, None, 3) cv.ShowImage("BG3", grey_image) storage = cv.CreateMemStorage(0) global contour contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) points = [] while contour: global bound_rect bound_rect = cv.BoundingRect(list(contour)) polygon_points = cv.ApproxPoly(list(contour), storage, cv.CV_POLY_APPROX_DP) contour = contour.h_next() global pt1, pt2 pt1 = (bound_rect[0], bound_rect[1]) pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]) #size control if (bound_rect[0] - bound_rect[2] > 10) and (bound_rect[1] - bound_rect[3] > 10): points.append(pt1) points.append(pt2) #points += list(polygon_points) global box, box2, box3, box4, box5 box = cv.MinAreaRect2(polygon_points) box2 = cv.BoxPoints(box) box3 = np.int0(np.around(box2)) box4 = totuple(box3) box5 = box4 + (box4[0], ) cv.FillPoly(grey_image, [ list(polygon_points), ], cv.CV_RGB(255, 255, 255), 0, 0) cv.PolyLine(color_image, [ polygon_points, ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0) cv.PolyLine(color_image, [list(box5)], 0, (0, 0, 255), 2) #cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1) if len(points): #center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points) center1 = (pt1[0] + pt2[0]) / 2 center2 = (pt1[1] + pt2[1]) / 2 #print center1, center2, center_point #cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1) #cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1) #cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1) cv.Circle(color_image, (center1, center2), 5, cv.CV_RGB(0, 0, 255), -1) cv.ShowImage("Target", color_image) # Listen for ESC key c = cv.WaitKey(7) % 0x100 if c == 27: #cv.DestroyAllWindows() break
middle_w = 2 #Cross Center width 十字中心歸零校正,數字越大線往左 middle_h = 2 #Cross Center height 十字中心歸零校正,數字越大線往上 cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height) while True: img = cv.QueryFrame(capture) cv.Smooth(img, img, cv.CV_BLUR, 3) hue_img = cv.CreateImage(cv.GetSize(img), 8, 3) cv.CvtColor(img, hue_img, cv.CV_BGR2HSV) threshold_img = cv.CreateImage(cv.GetSize(hue_img), 8, 1) cv.InRangeS(hue_img, (38, 120, 60), (75, 255, 255), threshold_img) storage = cv.CreateMemStorage(0) contour = cv.FindContours(threshold_img, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) points = [] while contour: rect = cv.BoundingRect(list(contour)) contour = contour.h_next() size = (rect[2] * rect[3]) if size > 100: pt1 = (rect[0], rect[1]) pt2 = (rect[0] + rect[2], rect[1] + rect[3]) cv.Rectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3) ######################################## contour center #################################################### cx = (rect[2] / 2 + rect[0]) #(0,0)################ cy = (rect[3] / 2 + rect[1]) ###################### print cx, cy ######################<--160x120 pix
def findBlob(rgbimage, hsvimage, maskimage, blobimage, hsvcolorrange, hsvmin, hsvmax): cv.CvtColor(rgbimage, hsvimage, cv.CV_BGR2HSV) hsvmin = [ hsvmin[0] - hsvcolorrange[0], hsvmin[1] - hsvcolorrange[1], hsvmin[2] - hsvcolorrange[2] ] hsvmax = [ hsvmax[0] + hsvcolorrange[0], hsvmax[1] + hsvcolorrange[1], hsvmax[2] + hsvcolorrange[2] ] if hsvmin[0] < 0: hsvmin[0] = 0 if hsvmin[1] < 0: hsvmin[1] = 0 if hsvmin[2] < 0: hsvmin[2] = 0 if hsvmax[0] > 255: hsvmax[0] = 255 if hsvmax[1] > 255: hsvmax[1] = 255 if hsvmax[2] > 255: hsvmax[2] = 255 cv.InRangeS(hsvimage, cv.Scalar(hsvmin[0], hsvmin[1], hsvmin[2]), cv.Scalar(hsvmax[0], hsvmax[1], hsvmax[2]), maskimage) element = cv.CreateStructuringElementEx(5, 5, 2, 2, cv.CV_SHAPE_RECT) cv.Erode(maskimage, maskimage, element, 1) cv.Dilate(maskimage, maskimage, element, 1) storage = cv.CreateMemStorage(0) cv.Copy(maskimage, blobimage) contour = cv.FindContours(maskimage, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) trackedpoint = None maxtrackedpoint = None maxareasize = 0 #You can tune these value to improve tracking maxarea = 0 minarea = 1 areasize = 0 while contour: bound_rect = cv.BoundingRect(list(contour)) contour = contour.h_next() pt1 = (bound_rect[0], bound_rect[1]) pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]) areasize = fabs(bound_rect[2] * bound_rect[3]) if (areasize > maxareasize): maxareasize = areasize maxtrackedpoint = (int( (pt1[0] + pt2[0]) / 2), int((pt1[1] + pt2[1]) / 2), 1.0) cv.Rectangle(rgb_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 1) trackedpoint = maxtrackedpoint if (trackedpoint != None): cv.Circle(rgb_image, (trackedpoint[0], trackedpoint[1]), 5, cv.CV_RGB(255, 0, 0), 1) return trackedpoint
frame_size = cv.GetSize(frame) grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) test = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.NamedWindow("Real") cv.NamedWindow("Threshold") while (1): color_image = cv.QueryFrame(capture) imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.Flip(color_image, color_image, 1) cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0) imgyellowthresh = getthresholdedimg(color_image) cv.Erode(imgyellowthresh, imgyellowthresh, None, 3) cv.Dilate(imgyellowthresh, imgyellowthresh, None, 10) storage = cv.CreateMemStorage(0) contour = cv.FindContours(imgyellowthresh, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) points = [] # This is the new part here. ie Use of cv.BoundingRect() while contour: # Draw bounding rectangles bound_rect = cv.BoundingRect(list(contour)) contour = contour.h_next() '''if contour!=None and contour.h_next()!=None: contour=contour.h_next()[0] print contour.h_next()[0]''' # for more details about cv.BoundingRect,see documentation pt1 = (bound_rect[0], bound_rect[1]) print pt1 pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]) print pt2
# ?? imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3) # ?? cv.SetZero(imdraw) cv.Flip(color_image,color_image, 1) cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0) # ?? imgbluethresh = getthresholdedimg(color_image) cv.Erode(imgbluethresh, imgbluethresh, None, 3) cv.Dilate(imgbluethresh, imgbluethresh, None, 10) # ?? img2 = cv.CloneImage(imgbluethresh) # ?? storage = cv.CreateMemStorage(0) contour = cv.FindContours(imgbluethresh, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) # blank list into which points for bounding rectangles around blobs are appended points = [] # this is the new part here. ie use of cv.BoundingRect() while contour: # Draw bounding rectangles bound_rect = cv.BoundingRect(list(contour)) contour = contour.h_next() #print contour # not sure why print contour # for more details about cv.BoundingRect,see documentation pt1 = (bound_rect[0], bound_rect[1]) pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
def run(self): logging.debug(' starting run ') global samecolorclient global capture global centroidList #abh global lock #abh global lock2 #abh global lock3 #abh global lock4 #abh mydata = threading.local() #window1=" Color Detection" mydata.window2 = str(self.name) + " Threshold" #cv.NamedWindow(window1,0) lock4.acquire() #abh cv.NamedWindow(mydata.window2, 0) lock4.release() #abh mydata.centroidold = [0, 0] mydata.flag = 0 mydata.roi = [100, 22, 390, 390] #mydata.roi=[95,40,380,350] while True: lock2.acquire() #abh lock4.acquire() #abh mydata.color_image = cv.QueryFrame(capture) lock4.release() #abh lock2.release() #abh if (mydata.flag == 0): lock4.acquire #abh lock4.release #abh mydata.color_image = cv.GetSubRect(mydata.color_image, (100, 22, 390, 390)) lock4.release #abh else: lock4.acquire #abh lock4.release #abh mydata.color_image = cv.GetSubRect( mydata.color_image, (int(mydata.roi[0]), int(mydata.roi[1]), int( mydata.roi[2]), int(mydata.roi[3]))) lock4.release #abh lock4.acquire #abh lock4.release #abh cv.Flip(mydata.color_image, mydata.color_image, 1) cv.Smooth(mydata.color_image, mydata.color_image, cv.CV_MEDIAN, 3, 0) #logging.debug(' Starting getthresholdedimg ') mydata.imghsv = cv.CreateImage(cv.GetSize(mydata.color_image), 8, 3) cv.CvtColor(mydata.color_image, mydata.imghsv, cv.CV_BGR2YCrCb) # Convert image from RGB to HSV mydata.imgnew = cv.CreateImage(cv.GetSize(mydata.color_image), cv.IPL_DEPTH_8U, 1) mydata.imgthreshold = cv.CreateImage( cv.GetSize(mydata.color_image), 8, 1) lock4.release #abh mydata.c = self.color[0] mydata.minc = (float(mydata.c[0]), float(mydata.c[1]), float(mydata.c[2])) mydata.c = self.color[1] mydata.maxc = (float(mydata.c[0]), float(mydata.c[1]), float(mydata.c[2])) lock4.acquire #abh lock4.release #abh cv.InRangeS(mydata.imghsv, cv.Scalar(*(mydata.minc)), cv.Scalar(*(mydata.maxc)), mydata.imgnew) cv.Add(mydata.imgnew, mydata.imgthreshold, mydata.imgthreshold) #logging.debug(' Exiting getthreasholdedimg') #logging.debug('function returned from thresholdedimg') cv.Erode(mydata.imgthreshold, mydata.imgthreshold, None, 1) cv.Dilate(mydata.imgthreshold, mydata.imgthreshold, None, 4) mydata.img2 = cv.CloneImage(mydata.imgthreshold) mydata.storage = cv.CreateMemStorage(0) mydata.contour = cv.FindContours(mydata.imgthreshold, mydata.storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE) lock4.release #abh mydata.points = [] #logging.debug('Starting while contour') while mydata.contour: # Draw bounding rectangles lock4.acquire #abh lock4.release #abh mydata.bound_rect = cv.BoundingRect(list(mydata.contour)) lock4.release #abh mydata.contour = mydata.contour.h_next() mydata.pt1 = (mydata.bound_rect[0], mydata.bound_rect[1]) mydata.pt2 = (mydata.bound_rect[0] + mydata.bound_rect[2], mydata.bound_rect[1] + mydata.bound_rect[3]) mydata.points.append(mydata.pt1) mydata.points.append(mydata.pt2) lock4.acquire #abh lock4.release #abh cv.Rectangle( mydata.color_image, mydata.pt1, mydata.pt2, cv.CV_RGB(mydata.maxc[0], mydata.maxc[1], mydata.maxc[2]), 1) lock4.release #abh # Calculating centroids if (((mydata.bound_rect[2]) * (mydata.bound_rect[3])) < 3500): #logging.debug('Inside iffffffffffffffffffffffff') lock4.acquire #abh lock4.release #abh mydata.centroidx = cv.Round( (mydata.pt1[0] + mydata.pt2[0]) / 2) mydata.centroidy = cv.Round( (mydata.pt1[1] + mydata.pt2[1]) / 2) lock4.release #abh if (mydata.flag == 1): #logging.debug("inside flag1") mydata.centroidx = mydata.roi[0] + mydata.centroidx mydata.centroidy = mydata.roi[1] + mydata.centroidy mydata.centroidnew = [mydata.centroidx, mydata.centroidy] #logging.debug('mydataroi[0] '+str(mydata.roi[0]) + ';centroidx ' + str(mydata.centroidx)) #logging.debug('mydataroi[1] '+str(mydata.roi[1]) + ';centroidy ' + str(mydata.centroidy)) #print mydata.centroidx #abh #print mydata.centroidy #abh mydata.tmpclient = [] lock3.acquire() #abh mydata.tmpclient = samecolorclient[self.i] lock3.release() #abh mydata.distance = math.sqrt( math.pow((mydata.centroidnew[0] - mydata.centroidold[0]), 2) + math.pow((mydata.centroidnew[1] - mydata.centroidold[1]), 2)) #lock.acquire() #abh #abh commented for mydata.j in range(len(mydata.tmpclient)): mydata.client_socket = mydata.tmpclient[mydata.j] #logging.debug('before centroid send...') if (mydata.distance >= 1.50): print 'inside 1.50 ' #self.server_socket.sendto(str(mydata.centroidnew),mydata.client_socket) #abh lock.acquire() #abh centroidList[colorlist.index( self.color)] = mydata.centroidnew #abh del mydata.centroidold[:] #logging.debug(str(centroidList)) self.server_socket.sendto( str(centroidList), mydata.client_socket) #abh lock.release() #abh #logging.debug ('updating done.') #abh #print centroidList #abh mydata.centroidold = mydata.centroidnew[:] else: #self.server_socket.sendto(str(mydata.centroidold),mydata.client_socket) #abh lock.acquire() #abh centroidList[colorlist.index( self.color)] = mydata.centroidold #abh #logging.debug(str(centroidList)) self.server_socket.sendto( str(centroidList), mydata.client_socket) #abh lock.release() #abh #logging.debug ('updating done2.') #abh #print centroidList #abh # logging.debug('byte sent to client') #lock.release() #abh mydata.roi[0] = mydata.centroidx - 50 mydata.roi[1] = mydata.centroidy - 50 if (mydata.roi[0] < 95): mydata.roi[0] = 95 if (mydata.roi[1] < 40): mydata.roi[1] = 40 mydata.roi[2] = 100 mydata.roi[3] = 100 if ((mydata.roi[0] + mydata.roi[2]) > 475): mydata.roi[0] = mydata.roi[0] - ( (mydata.roi[0] + mydata.roi[2]) - 475) if ((mydata.roi[1] + mydata.roi[3]) > 390): mydata.roi[1] = mydata.roi[1] - ( (mydata.roi[1] + mydata.roi[3]) - 390) #del mydata.centroidnew[:] mydata.flag = 1 if mydata.contour is None: mydata.flag = 0 #cv.ShowImage(window1,mydata.color_image) lock4.acquire #abh lock4.release #abh cv.ShowImage(mydata.window2, mydata.img2) lock4.release #abh if cv.WaitKey(33) == 27: #here it was 33 instead of 10 #cv.DestroyWindow(mydata.window1) #cv.DestroyWindow(mydata.window2) break