def countSendShift(contours): c = max(contours, key=cv2.contourArea) rect = cv2.minAreaRect(c) box = cv.BoxPoints(rect) box = np.int0(box) cv2.drawContours(img, [box], 0, (0, 0, 255)) midpoint = midpointCalc(box) if midpoint != None: cv2.line(img, (midpoint[0], midpoint[1]), (midpoint[2], midpoint[3]), (255, 0, 0), 3) cv2.circle(img, (midpoint[4], midpoint[5]), 5, (0, 255, 0), -1) shift = midpoint[4] - 280 if (sendstatus == 0): return 0 elif (shift <= 140 and shift >= -140): return 1 elif (shift < -140): return 2 elif (shift > 140): return 3 else: return 4
def checkgp(target): hsv = cv2.cvtColor(target, cv2.COLOR_BGR2HSV) maskedgreen = cv2.inRange(hsv, lower_green, upper_green) greencontours, _ = cv2.findContours(maskedgreen, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) if len(greencontours) != 0: greenc = max(greencontours, key=cv2.contourArea) greenrect = cv2.minAreaRect(greenc) greenbox = cv.BoxPoints(greenrect) greenbox = np.int0(greenbox) cv2.drawContours(img, [greenbox], 0, (0, 0, 255), 2) if greenbox != None: if (greenbox[0][0] >= 250 and greenbox[1][0] <= 310): print 'in goal' sendInt(9, car_address) time.sleep(1) sendInt(0, car_address) time.sleep(2) sendInt(4, car_address) p.ChangeDutyCycle(3.2) time.sleep(4) return 8 else: print 'not in goal' return 9 else: print 'no find green ' return 9
def Track(frame, allRoiPts, allRoiHist): hsv = cv2.cvtColor(frame, cv.CV_BGR2HSV) trackFaces = [] tempAllRoiPts = [] i = 0 mask = cv2.inRange(hsv, np.array((0., 40., 80.)), np.array((20., 255., 255.))) for roiHist in allRoiHist: backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1) backProj &= mask (x, y, w, h) = allRoiPts[i] if x > 0 and y > 0 and w > 0 and h > 0: (r, allRoiPts[i]) = cv2.CamShift(backProj, (int(x), int(y), int(w), int(h)), termination) for j in range(0, 4): if allRoiPts[i][j] < 0: allRoiPts[i][j] = 0 print 'Lose track' pts = np.int0(cv.BoxPoints(r)) #cv2.polylines(frame, [pts], True, (0, 255,255), 1) i += 1 trackFaces.append(pts) return allRoiPts
def draw(self, imageOut, drawDetail=False): if drawDetail: # Contours #cv2.drawContours(imageOut, [self.contour], 0, self.colorGreen if self.active else self.colorDarkGreen, 2) # Motion trace if self.active and self.lastCenter is not None: # if a lastCenter is available if hypot(self.center[0] - self.lastCenter[0], self.center[1] - self.lastCenter[1]) < 10: cv2.circle(imageOut, self.center, 10, self.colorMagenta, 2) # if too close, just draw a circle else: cv2.line(imageOut, self.center, self.lastCenter, self.colorMagenta, 2) # else draw a line # Rotated rectangle rectBox = np.int_(cv.BoxPoints(self.rect)) #print rectBox # [debug] cv2.drawContours( imageOut, [rectBox], 0, self.colorBlue if self.active else self.colorDarkBlue, 2) #cv2.polylines(imageOut, [rectBox], True, self.colorBlue if self.active else self.colorDarkBlue, 2) # alt. method; TODO test which one of polylines and drawContours is faster (?) else: # Just the bounding box cv2.rectangle( imageOut, (self.bbox[0], self.bbox[1]), (self.bbox[0] + self.bbox[2], self.bbox[1] + self.bbox[3]), self.colorBlue if self.active else self.colorDarkBlue, 2)
def draw_common(points): success, center, radius = cv.MinEnclosingCircle(points) if success: cv.Circle(img, roundxy(center), cv.Round(radius), cv.CV_RGB(255, 255, 0), 1, cv.CV_AA, 0) box = cv.MinAreaRect2(points) box_vtx = [roundxy(p) for p in cv.BoxPoints(box)] cv.PolyLine(img, [box_vtx], 1, cv.CV_RGB(0, 255, 255), 1, cv.CV_AA)
def computeSkewAngle(dilation): dilation_copy = dilation.copy() contours,hierarchy = cv2.findContours(dilation_copy,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) # Detect the contours of all objects leng = 0 for i in range(len(contours)): #print len(contours[i]) leng = leng + len(contours[i]) print leng + 1 points = [] pointMat = cv.CreateMat(leng, 1, cv.CV_32SC2) try: for i in range(len(contours[i])): cnt = contours[i] if cv2.contourArea(cnt) > 10000: print 'Found bigggggg contour' #### check why it is going list out of index OR EXCEPTION HANDLING ###### #print cnt else: for i in range(len(cnt)): pointMat[i, 0] = tuple(cnt[i][0]) points.append(tuple(cnt[i][0])) #print pointMat except: print 'Exception raised' print "Unexpected error:", sys.exc_info()[0] finally: print 'Hi' box = cv.MinAreaRect2(points) box_vtx = [roundxy(p) for p in cv.BoxPoints(box)] #box_vtx = [cv.Round(pt[0]), cv.Round(pt[1]) for p in cv.BoxPoints(box)] print box[2] if box[2] < -45: skew_angle = box[2] + 90 else: skew_angle = box[2] print 'Skew Angle : ',skew_angle return skew_angle,box_vtx
def getRotatedRoi_xywh(contour): Rect = cv2.minAreaRect(contour) Box = cv.BoxPoints(Rect) return np.int(Box)
def run(self): # Capture first frame to get size frame = cv.QueryFrame(self.capture) #nframes =+ 1 frame_size = cv.GetSize(frame) color_image = cv.CreateImage(cv.GetSize(frame), 8, 3) grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1) moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3) def totuple(a): try: return tuple(totuple(i) for i in a) except TypeError: return a first = True while True: closest_to_left = cv.GetSize(frame)[0] closest_to_right = cv.GetSize(frame)[1] color_image = cv.QueryFrame(self.capture) # Smooth to get rid of false positives cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0) if first: difference = cv.CloneImage(color_image) temp = cv.CloneImage(color_image) cv.ConvertScale(color_image, moving_average, 1.0, 0.0) first = False else: cv.RunningAvg(color_image, moving_average, .1, None) cv.ShowImage("BG", moving_average) # Convert the scale of the moving average. cv.ConvertScale(moving_average, temp, 1, 0.0) # Minus the current frame from the moving average. cv.AbsDiff(color_image, temp, difference) #cv.ShowImage("BG",difference) # Convert the image to grayscale. cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY) cv.ShowImage("BG1", grey_image) # Convert the image to black and white. cv.Threshold(grey_image, grey_image, 40, 255, cv.CV_THRESH_BINARY) #cv.ShowImage("BG2", grey_image) # Dilate and erode to get people blobs cv.Dilate(grey_image, grey_image, None, 8) cv.Erode(grey_image, grey_image, None, 3) cv.ShowImage("BG3", grey_image) storage = cv.CreateMemStorage(0) global contour contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE) points = [] while contour: global bound_rect bound_rect = cv.BoundingRect(list(contour)) polygon_points = cv.ApproxPoly(list(contour), storage, cv.CV_POLY_APPROX_DP) contour = contour.h_next() global pt1, pt2 pt1 = (bound_rect[0], bound_rect[1]) pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]) #size control if (bound_rect[0] - bound_rect[2] > 10) and (bound_rect[1] - bound_rect[3] > 10): points.append(pt1) points.append(pt2) #points += list(polygon_points) global box, box2, box3, box4, box5 box = cv.MinAreaRect2(polygon_points) box2 = cv.BoxPoints(box) box3 = np.int0(np.around(box2)) box4 = totuple(box3) box5 = box4 + (box4[0], ) cv.FillPoly(grey_image, [ list(polygon_points), ], cv.CV_RGB(255, 255, 255), 0, 0) cv.PolyLine(color_image, [ polygon_points, ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0) cv.PolyLine(color_image, [list(box5)], 0, (0, 0, 255), 2) #cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1) if len(points): #center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points) center1 = (pt1[0] + pt2[0]) / 2 center2 = (pt1[1] + pt2[1]) / 2 #print center1, center2, center_point #cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1) #cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1) #cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1) cv.Circle(color_image, (center1, center2), 5, cv.CV_RGB(0, 0, 255), -1) cv.ShowImage("Target", color_image) # Listen for ESC key c = cv.WaitKey(7) % 0x100 if c == 27: #cv.DestroyAllWindows() break
cv2.CHAIN_APPROX_NONE) yellowcontours, _ = cv2.findContours(maskedyellow, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) greencontours, _ = cv2.findContours(maskedgreen, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) purple_contours, _ = cv2.findContours(maskedpurple, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) if (len(purple_contours) != 0 and limitdetectpurple == 0): sendInt(4, car_address) time.sleep(0.1) sendInt(0, car_address) cpurple = max(purple_contours, key=cv2.contourArea) purplerect = cv2.minAreaRect(cpurple) purplebox = cv.BoxPoints(purplerect) purplebox = np.int0(purplebox) purplemidpoint = midpointCalc(purplebox) print 'detected' if purplemidpoint != None: print 'calculated' action = 1 if purplemidpoint[4] > 280: rdir = 2 elif purplemidpoint[4] < 280: rdir = 1 elif (len(yellowcontours) != 0): if (lastcolorblack == 1): noblack = 1
def test_detect_ui(imgname='base1'): img = cv2.imread('%s.png' % imgname) origin = img hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) hsv_full = cv2.cvtColor(img, cv2.COLOR_BGR2HSV_FULL) hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS) hls_full = cv2.cvtColor(img, cv2.COLOR_BGR2HLS_FULL) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ## this is very slow # gray_denoised = cv2.fastNlMeansDenoising(gray, None, 20, 7, 21) # img_denoised = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21) # kernel = np.ones((5,5), np.uint8) kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5)) print kernel nochange = lambda img: img erosion = lambda img: cv2.erode(img, kernel, iterations=3) dilation = lambda img: cv2.dilate(img, kernel, iterations=3) opening = lambda img: cv2.morphologyEx( img, cv2.MORPH_OPEN, kernel, iterations=3) closing = lambda img: cv2.morphologyEx( img, cv2.MORPH_CLOSE, kernel, iterations=3) gradient = lambda img: cv2.morphologyEx( img, cv2.MORPH_GRADIENT, kernel, iterations=3) blackhat = lambda img: cv2.morphologyEx( img, cv2.MORPH_BLACKHAT, kernel, iterations=3) tophat = lambda img: cv2.morphologyEx( img, cv2.MORPH_TOPHAT, kernel, iterations=3) # laplacian = lambda img: cv2.Laplacian(gray, cv2.CV_8U) # sobelx = lambda img: cv2.Sobel(gray,cv2.CV_8U,1,0,ksize=3) # sobely = lambda img: cv2.Sobel(gray,cv2.CV_8U,0,1,ksize=3) revtrans = { 'hsv': cv2.COLOR_HSV2BGR, 'hls': cv2.COLOR_HLS2BGR, 'hsv_full': cv2.COLOR_HSV2BGR, 'hls_full': cv2.COLOR_HLS2BGR } for tran in ('origin', 'gray', 'hsv', 'hsv_full', 'hls', 'hls_full', 'gray_denoised', 'img_denoised'): sample = locals().get(tran) if sample is None: continue # sample = cv2.GaussianBlur(sample, (3,3), 1) # sample = cv2.bilateralFilter(sample,9,70,70) for method in ('nochange', 'erosion', 'dilation', 'opening', 'closing', 'gradient', 'blackhat', 'tophat', 'laplacian', 'sobelx', 'sobely'): func = locals().get(method) if func is None: continue print tran, method mat = func(sample.copy()) edges = cv2.Canny(mat, 80, 200) revtran = revtrans.get(tran) if revtran: mat = cv2.cvtColor(mat, revtran) # edges = cv2.bilateralFilter(edges, 31, 30, 30) cv2.imshow('preview', edges) cv2.waitKey() _, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_OTSU) contours, _ = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) contours.sort(key=lambda cnt: len(cnt), reverse=True) for cnt in contours: area = cv2.contourArea(cnt) length = cv2.arcLength(cnt, True) # if len(cnt) < 10: # continue # if area < 20:# or area > 300: # continue # if length < 100:# or length > 400: # continue # print len(cnt), int(area), int(length) # epsilon = 0.2*length # poly = cv2.approxPolyDP(cnt,epsilon,True) hull = cv2.convexHull(cnt) hull_area = cv2.contourArea(hull) x, y, w, h = cv2.boundingRect(cnt) rect_area = float(w * h) if w < 20 or h < 20 or rect_area < 100: continue if hull_area / rect_area < 0.65: continue cv2.drawContours(mat, [hull], 0, 255, -1) cv2.rectangle(mat, (x, y), (x + w, y + h), (0, 255, 0), 2) cnt = hull style = -1 if style == 1: lb, lt, rt, rb = cv.BoxPoints(cv2.minAreaRect(cnt)) lt = tuple(map(int, lt)) rb = tuple(map(int, rb)) cv2.rectangle(mat, lt, rb, (0, 255, 0), 2) elif style == 2: (x, y), radius = cv2.minEnclosingCircle(cnt) center = (int(x), int(y)) radius = int(radius) cv2.circle(mat, center, radius, (255, 255, 0), 2) elif style == 3: ellipse = cv2.fitEllipse(cnt) cv2.ellipse(mat, ellipse, (0, 255, 0), 2) # cv2.imshow('preview', mat) # cv2.waitKey() # break cv2.imshow('preview', mat) # cv2.imwrite('%s-%s-%s.png' % (imgname, tran, method), mat) cv2.waitKey()