def circ(n): pt = cv.cvPoint(int(round(n[0,0])),int(round(n[1,0]))) cv.cvCircle(self.disp.buffer, pt, size, color_scalar, cv.CV_FILLED, cv.CV_AA) pt2 = cv.cvPoint(pt.x + 2, pt.y + 2) cv.cvPutText(self.disp.buffer, text, pt, self.disp.font, cv.cvScalar(255,255,255)) cv.cvPutText(self.disp.buffer, text, pt2, self.disp.font, cv.cvScalar(50,50,50))
def clear(self): cv.cvRectangle(self.buffer, cv.cvPoint(0,0), cv.cvPoint(self.buffer.width, self.buffer.height), cv.cvScalar(255,255,255), cv.CV_FILLED) if self.draw_grid: line_color = 230 lc = cv.cvScalar(line_color,line_color,line_color) for i in xrange(1, as_int(self.meters_disp)+3): cv.cvCircle(self.buffer, cv.cvPoint(self.w/2,self.h), as_int(self.pixels_per_meter * (i-.5)), #lc, 1) lc, 1, cv.CV_AA) cv.cvCircle(self.buffer, cv.cvPoint(self.w/2,self.h), as_int(self.pixels_per_meter * i), #lc, 1) lc, 1, cv.CV_AA) for i in xrange(360/30): x = (self.w/2) + math.cos(math.radians(i*30)) * self.pixels_per_meter * (self.meters_disp+2) y = self.h + math.sin(math.radians(i*30)) * self.pixels_per_meter * (self.meters_disp+2) cv.cvLine(self.buffer, cv.cvPoint(self.w/2,self.h), cv.cvPoint(as_int(x),as_int(y)), lc, 1, cv.CV_AA) if self.draw_center: cv.cvCircle(self.buffer, cv.cvPoint(self.w/2,self.h), 3, cv.cvScalar(0,0,200), cv.CV_FILLED, cv.CV_AA)
def draw_weighted_Pose2D(display, max_weight, particles): for p in particles: if type(p) is types.TupleType: part, weight = p rpos = part.pos else: part = p rpos = p.pos x = mt.cos(part.angle) * .07 y = mt.sin(part.angle) * .07 dir = rpos.copy() dir[0,0] = dir[0,0] + x dir[1,0] = dir[1,0] + y pos = display.to_screen(rpos) dirp = display.to_screen(dir) if type(p) is types.TupleType: color = round(255.0 * (weight/max_weight)) cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 2, cv.cvScalar(255, 255-color, 255), cv.CV_FILLED, cv.CV_AA) cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 2, cv.cvScalar(200, 200, 200), 8, cv.CV_AA) else: cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 2, cv.cvScalar(150, 150, 150), cv.CV_FILLED, cv.CV_AA) cv.cvLine(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), cv.cvPoint((int) (dirp[0,0]), (int) (dirp[1,0])), cv.cvScalar(100,200,100), 1, cv.CV_AA, 0)
def __findContour(self, filename): #find the contour of images, and save all points in self.vKeyPoints self.img = highgui.cvLoadImage (filename) self.grayimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1) self.drawimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,3) cv.cvCvtColor (self.img, self.grayimg, cv.CV_BGR2GRAY) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvThreshold( self.grayimg, self.grayimg, self.threshold, self.threshold +100, cv.CV_THRESH_BINARY ) cv.cvZero(self.drawimg) storage = cv.cvCreateMemStorage(0) nb_contours, cont = cv.cvFindContours (self.grayimg, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) cv.cvDrawContours (self.drawimg, cont, cv.cvScalar(255,255,255,0), cv.cvScalar(255,255,255,0), 1, 1, cv.CV_AA, cv.cvPoint (0, 0)) self.allcurve = [] idx = 0 for c in cont.hrange(): PointArray = cv.cvCreateMat(1, c.total , cv.CV_32SC2) PointArray2D32f= cv.cvCreateMat( 1, c.total , cv.CV_32FC2) cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)) fpoints = [] for i in range(c.total): kp = myPoint() kp.x = cv.cvGet2D(PointArray,0, i)[0] kp.y = cv.cvGet2D(PointArray,0, i)[1] kp.index = idx idx += 1 fpoints.append(kp) self.allcurve.append(fpoints) self.curvelength = idx
def circ(n): pt = cv.cvPoint(int(round(n[0, 0])), int(round(n[1, 0]))) cv.cvCircle(self.disp.buffer, pt, size, color_scalar, cv.CV_FILLED, cv.CV_AA) pt2 = cv.cvPoint(pt.x + 2, pt.y + 2) cv.cvPutText(self.disp.buffer, text, pt, self.disp.font, cv.cvScalar(255, 255, 255)) cv.cvPutText(self.disp.buffer, text, pt2, self.disp.font, cv.cvScalar(50, 50, 50))
def draw_weighted_Pose2D(display, max_weight, particles): for p in particles: if type(p) is types.TupleType: part, weight = p rpos = part.pos else: part = p rpos = p.pos x = mt.cos(part.angle) * 0.07 y = mt.sin(part.angle) * 0.07 dir = rpos.copy() dir[0, 0] = dir[0, 0] + x dir[1, 0] = dir[1, 0] + y pos = display.to_screen(rpos) dirp = display.to_screen(dir) if type(p) is types.TupleType: color = round(255.0 * (weight / max_weight)) cv.cvCircle( display.buffer, cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])), 2, cv.cvScalar(255, 255 - color, 255), cv.CV_FILLED, cv.CV_AA, ) cv.cvCircle( display.buffer, cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])), 2, cv.cvScalar(200, 200, 200), 8, cv.CV_AA, ) else: cv.cvCircle( display.buffer, cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])), 2, cv.cvScalar(150, 150, 150), cv.CV_FILLED, cv.CV_AA, ) cv.cvLine( display.buffer, cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])), cv.cvPoint((int)(dirp[0, 0]), (int)(dirp[1, 0])), cv.cvScalar(100, 200, 100), 1, cv.CV_AA, 0, )
def DrawKeyPoints(self): if (not self.drawimg): self.drawimg = cv.cvCloneImage(self.img) myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5) ic = 0 for c in self.points: cv.cvPutText(self.drawimg, str(ic), cv.cvPoint(int(c.x), int(c.y)), myfont, cv.cvScalar(255, 255, 0, 0)) ic += 1 cv.cvDrawCircle(self.drawimg, c, 4, cv.cvScalar(255, 255, 0, 0))
def get_nearest_feature( image, this_point, n=2000 ): """ Get the n-nearest features to a specified image coordinate. Features are determined using cvGoodFeaturesToTrack. """ _red = cv.cvScalar (0, 0, 255, 0); _green = cv.cvScalar (0, 255, 0, 0); _blue = cv.cvScalar (255,0,0,0); _white = cv.cvRealScalar (255) _black = cv.cvRealScalar (0) quality = 0.01 min_distance = 4 N_best = n win_size = 11 grey = cv.cvCreateImage (cv.cvGetSize (image), 8, 1) eig = cv.cvCreateImage (cv.cvGetSize (image), 32, 1) temp = cv.cvCreateImage (cv.cvGetSize (image), 32, 1) # create a grey version of the image cv.cvCvtColor ( image, grey, cv.CV_BGR2GRAY) points = cv.cvGoodFeaturesToTrack ( grey, eig, temp, N_best, quality, min_distance, None, 3, 0, 0.04) # refine the corner locations better_points = cv.cvFindCornerSubPix ( grey, points, cv.cvSize (win_size, win_size), cv.cvSize (-1, -1), cv.cvTermCriteria (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) eigs = [] for i in range(len(points)): eigs.append(cv.cvGetMat(eig)[int(points[i].y)][int(points[i].x)]) mypoints = np.matrix(np.zeros((len(points)*2),dtype=float)).reshape(len(points),2) dists = [] for i,point in enumerate(points): mypoints[i,0]=point.x mypoints[i,1]=point.y dists.append( np.linalg.norm(mypoints[i,:]-this_point) ) dists = np.array(dists) sorteddists = dists.argsort() cv.cvDrawCircle ( image, points[ sorteddists[0] ], 5, _green, 2, 8, 0 ) return better_points[ sorteddists[0] ]
def DrawKeyPoints(self): ic = 0 myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5) for ic, c in enumerate(self.mss.seqs): cnt = 0 for k in c.points: cnt += 1 if (int(cnt/2) * 2 != cnt): continue cv.cvDrawCircle(self.drawimg, cv.cvPoint(int(k.x), int(k.y)), 4, cv.cvScalar(255,255,255,0)) if (self.bDrawNumber and (cnt > self.start) and cnt < self.start + 8*4 and len(c.points) > 30): #cv.cvPutText(self.drawimg, str(cnt), cv.cvPoint(int(k.x) + 5, int(k.y)), myfont, cv.cvScalar(255, 255, 0,0)) cv.cvDrawCircle(self.drawimg, cv.cvPoint(int(k.x), int(k.y)), 4, cv.cvScalar(255,0, 255,0))
def get_nearest_feature(image, this_point, n=2000): """ Get the n-nearest features to a specified image coordinate. Features are determined using cvGoodFeaturesToTrack. """ _red = cv.cvScalar(0, 0, 255, 0) _green = cv.cvScalar(0, 255, 0, 0) _blue = cv.cvScalar(255, 0, 0, 0) _white = cv.cvRealScalar(255) _black = cv.cvRealScalar(0) quality = 0.01 min_distance = 4 N_best = n win_size = 11 grey = cv.cvCreateImage(cv.cvGetSize(image), 8, 1) eig = cv.cvCreateImage(cv.cvGetSize(image), 32, 1) temp = cv.cvCreateImage(cv.cvGetSize(image), 32, 1) # create a grey version of the image cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY) points = cv.cvGoodFeaturesToTrack(grey, eig, temp, N_best, quality, min_distance, None, 3, 0, 0.04) # refine the corner locations better_points = cv.cvFindCornerSubPix( grey, points, cv.cvSize(win_size, win_size), cv.cvSize(-1, -1), cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) eigs = [] for i in range(len(points)): eigs.append(cv.cvGetMat(eig)[int(points[i].y)][int(points[i].x)]) mypoints = np.matrix(np.zeros((len(points) * 2), dtype=float)).reshape(len(points), 2) dists = [] for i, point in enumerate(points): mypoints[i, 0] = point.x mypoints[i, 1] = point.y dists.append(np.linalg.norm(mypoints[i, :] - this_point)) dists = np.array(dists) sorteddists = dists.argsort() cv.cvDrawCircle(image, points[sorteddists[0]], 5, _green, 2, 8, 0) return better_points[sorteddists[0]]
def process_node(tup): node, color, size, text = tup color_scalar = cv.cvScalar(color[0], color[1], color[2]) node_val = node.val(t) if node_val.__class__ != tuple: if node_val != None: v_cv = self.disp.to_screen(node.val(t)) def circ(n): pt = cv.cvPoint(int(round(n[0, 0])), int(round(n[1, 0]))) cv.cvCircle(self.disp.buffer, pt, size, color_scalar, cv.CV_FILLED, cv.CV_AA) pt2 = cv.cvPoint(pt.x + 2, pt.y + 2) cv.cvPutText(self.disp.buffer, text, pt, self.disp.font, cv.cvScalar(255, 255, 255)) cv.cvPutText(self.disp.buffer, text, pt2, self.disp.font, cv.cvScalar(50, 50, 50)) map(circ, fun.points_of_mat(v_cv)) else: start_pts, end_pts = node_val for idx in range(start_pts.shape[1]): start_pt = cvpoint_of_pt( self.disp.to_screen(start_pts[:, idx]))[0] end_pt = cvpoint_of_pt(self.disp.to_screen( end_pts[:, idx]))[0] cv.cvLine(self.disp.buffer, start_pt, end_pt, color_scalar, size, cv.CV_AA)
def __GetCrossDist(self, p1, dx, dy, iPointIndex): bFound = 0 fDist = 0 bestPoint = cv.cvPoint(0, 0) bestLength = 1e10 bigLength = -1 nPoints = len(self.keypoints) for k in range(nPoints): if (k == iPointIndex or k == iPointIndex + 1): continue q1 = self.keypoints[(k - 1 + nPoints) % nPoints] q2 = self.keypoints[k] du = q2.x - q1.x dv = q2.y - q1.y dd = (dy * du - dx * dv) if (dd == 0): continue t = (dy * (p1.x - q1.x) - dx * (p1.y - q1.y)) / dd if (t >= -0.0001 and t <= 1.0001): # found it ptt = cv.cvPoint(int(q1.x + t * du), int(q1.y + t * dv)) l = math.sqrt((ptt.x - p1.x ) * (ptt.x - p1.x ) + (ptt.y - p1.y ) * (ptt.y - p1.y)) l2 = ((dv * q1.x - du * q1.y) - (dv * p1.x - du * p1.y)) / ( dv * dx - du * dy) bFound = 1 if (l <= bestLength and l2 > 0): bestPoint = ptt bestLength = l fDist = bestLength if (not bFound): fDist = 0 if (self.img): cv.cvLine(self.img, cv.cvPoint(int(p1.x), int(p1.y)), bestPoint, cv.cvScalar(255, 255, 255, 0)) return fDist
def __GetCrossDist(self, p1, dx, dy, iPointIndex): bFound = 0 fDist = 0 bestPoint = cv.cvPoint(0, 0) bestLength = 1e10 bigLength = -1 nPoints = len(self.keypoints) for k in range(nPoints): if (k == iPointIndex or k == iPointIndex + 1): continue q1 = self.keypoints[(k - 1 + nPoints) % nPoints] q2 = self.keypoints[k] du = q2.x - q1.x dv = q2.y - q1.y dd = (dy * du - dx * dv) if (dd == 0): continue t = (dy * (p1.x - q1.x) - dx * (p1.y - q1.y)) / dd if (t >= -0.0001 and t <= 1.0001): # found it ptt = cv.cvPoint(int(q1.x + t * du), int(q1.y + t * dv)) l = math.sqrt((ptt.x - p1.x) * (ptt.x - p1.x) + (ptt.y - p1.y) * (ptt.y - p1.y)) l2 = ((dv * q1.x - du * q1.y) - (dv * p1.x - du * p1.y)) / (dv * dx - du * dy) bFound = 1 if (l <= bestLength and l2 > 0): bestPoint = ptt bestLength = l fDist = bestLength if (not bFound): fDist = 0 if (self.img): cv.cvLine(self.img, cv.cvPoint(int(p1.x), int(p1.y)), bestPoint, cv.cvScalar(255, 255, 255, 0)) return fDist
def __link(self): myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5) kkk = 0 self.edges = [] for curve in self.allcurve: showpt = [] state = 0 currentPoint = None currentPointIdx = -1 cumulate = 0 dcurve = curve + curve curlen = len(curve) ptcount = 0 pointseq = [] for c in dcurve: if (ptcount > curlen): break cumulate += 1 for kk in range(len(self.points)): k = self.points[kk] if (abs(c.x - k.x) + abs(c.y - k.y) < 15): if (currentPoint != k or cumulate > 40): state += 1 currentPoint = k currentPointIdx = kk cumulate = 0 pointseq.append(kk) if (state > 0): showpt.append([c, state, currentPointIdx]) ptcount += 1 if (state > 1): kkk += 1 cnt = 0 pret = -1 e = None for s, t, cp in showpt: if (cp != pret): if e != None: e.end = cp e = Edge() self.edges.append(e) e.start = cp pret = cp cnt += 1 if (t < state): e.addPoint(s) #print "%d\t%3.2f\t%3.2f\t%d\t%d\t%d" % (kkk, s.x, s.y, cnt, pointseq[t - 1], pointseq[t]) e.end = showpt[-1][2] print >> OUT, "seq\tptn\tx\ty\t" # self.__edgededup() self.__evenSample(self.npoints) for ie, e in enumerate(self.edges): print "P(%d) <-> P(%d) length %d, selected %d" % ( e.start, e.end, len(e.points), len(e.selected)) for d in e.points: cv.cvSet2D(self.drawimg, int(d.y), int(d.x), color[3]) for ip, p in enumerate(e.selected): cv.cvDrawCircle(self.drawimg, p, 2, cv.cvScalar(255, 255, 0, 0)) print >> OUT, "%d\t%d\t%d\t%d" % (ie, ip, p.x, p.y)
def draw_weighted_2D(display, max_weight, particles): for p in particles: if type(p) is types.TupleType: rpos, weight = p else: rpos = p pos = display.to_screen(rpos) if type(p) is types.TupleType: color = round(255.0 * (weight/max_weight)) cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 3, cv.cvScalar(255, 255-color, 255), cv.CV_FILLED, cv.CV_AA) cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 3, cv.cvScalar(200, 200, 200), 1, cv.CV_AA) else: cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 2, cv.cvScalar(150, 150, 150), cv.CV_FILLED, cv.CV_AA)
def __link(self): myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5) kkk = 0 self.edges = [] for curve in self.allcurve: showpt = [] state = 0 currentPoint = None currentPointIdx = -1 cumulate = 0 dcurve = curve + curve curlen = len(curve) ptcount = 0 pointseq = [] for c in dcurve: if (ptcount > curlen): break cumulate += 1 for kk in range(len(self.points)): k = self.points[kk] if (abs(c.x - k.x) + abs(c.y - k.y) < 15): if (currentPoint != k or cumulate > 40): state += 1 currentPoint = k currentPointIdx = kk cumulate = 0 pointseq.append(kk) if (state > 0): showpt.append([c, state, currentPointIdx]) ptcount += 1 if (state > 1): kkk += 1 cnt = 0 pret = -1 e = None for s,t, cp in showpt: if (cp != pret): if e != None: e.end = cp e = Edge() self.edges.append(e) e.start = cp pret = cp cnt += 1 if (t < state): e.addPoint(s) #print "%d\t%3.2f\t%3.2f\t%d\t%d\t%d" % (kkk, s.x, s.y, cnt, pointseq[t - 1], pointseq[t]) e.end = showpt[-1][2] print >> OUT, "seq\tptn\tx\ty\t" # self.__edgededup() self.__evenSample(self.npoints) for ie, e in enumerate(self.edges): print "P(%d) <-> P(%d) length %d, selected %d" % (e.start, e.end, len(e.points), len(e.selected)) for d in e.points: cv.cvSet2D(self.drawimg, int(d.y), int(d.x), color[3]) for ip, p in enumerate(e.selected): cv.cvDrawCircle(self.drawimg, p, 2, cv.cvScalar(255,255,0,0)) print >> OUT, "%d\t%d\t%d\t%d" % (ie, ip, p.x, p.y)
def DrawKeyPoints(self): if (not self.drawimg): self.drawimg = cv.cvCloneImage(self.img) myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5) ic = 0 for c in self.points: cv.cvPutText(self.drawimg, str(ic), cv.cvPoint(int(c.x), int(c.y)), myfont, cv.cvScalar(255, 255, 0,0)) ic += 1 cv.cvDrawCircle(self.drawimg, c, 4, cv.cvScalar(255,255,0,0))
def DrawKeyPoints(self): ic = 0 myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5) for ic, c in enumerate(self.mss.seqs): for k in c.points: if self.bDrawNumber: cv.cvPutText( self.drawimg, str(ic), cv.cvPoint(int(k.x), int(k.y)), myfont, cv.cvScalar(255, 255, 0, 0) ) cv.cvDrawCircle(self.drawimg, cv.cvPoint(int(k.x), int(k.y)), 4, cv.cvScalar(255, 0, 255, 0))
def np2cv(im): print 'WARNING: np2cv is not reliable or well tested (it is a bit flakey...)' #raise AssertionError('np2cv does not work :-(') if len(im.shape) == 3: shp = im.shape channels = shp[2] height = shp[0] width = shp[1] #height, width, channels = im.shape elif len(im.shape) == 2: height, width = im.shape channels = 1 else: raise AssertionError( "unrecognized shape for the input image. should be 3 or 2, but was %d." % len(im.shape)) key = str(im.dtype) cv_type = np2cv_type_dict[key] print 'attempt to create opencv image with (key, width, height, channels) =', ( key, width, height, channels) cv_im = cv.cvCreateImage(cv.cvSize(width, height), cv_type, channels) #cv_im.imageData = im.tostring() if True: if len(im.shape) == 3: for y in xrange(height): for x in xrange(width): pix = [float(v) for v in im[y, x]] scalar = cv.cvScalar(*pix) #print scalar cv_im[y, x] = scalar else: for y in xrange(height): for x in xrange(width): pix = float(im[y, x]) cv_im[y, x] = cv.cvScalar(pix, pix, pix) #print 'im[y,x], cv_im[y,x] =', im[y,x], cv_im[y,x] print 'resulted in an image openCV image with the following properties:' numpy_type, nchannels = cv2np_type_dict[cv.cvGetElemType(cv_im)] print '(numpy_type, nchannels, cvmat.width, cvmat.height) =', ( numpy_type, nchannels, cv_im.width, cv_im.height) return cv_im
def clear(self): cv.cvRectangle(self.buffer, cv.cvPoint(0, 0), cv.cvPoint(self.buffer.width, self.buffer.height), cv.cvScalar(255, 255, 255), cv.CV_FILLED) if self.draw_grid: line_color = 230 lc = cv.cvScalar(line_color, line_color, line_color) for i in xrange(1, as_int(self.meters_disp) + 3): cv.cvCircle( self.buffer, cv.cvPoint(self.w / 2, self.h), as_int(self.pixels_per_meter * (i - .5)), #lc, 1) lc, 1, cv.CV_AA) cv.cvCircle( self.buffer, cv.cvPoint(self.w / 2, self.h), as_int(self.pixels_per_meter * i), #lc, 1) lc, 1, cv.CV_AA) for i in xrange(360 / 30): x = (self.w / 2) + math.cos(math.radians( i * 30)) * self.pixels_per_meter * (self.meters_disp + 2) y = self.h + math.sin(math.radians( i * 30)) * self.pixels_per_meter * (self.meters_disp + 2) cv.cvLine(self.buffer, cv.cvPoint(self.w / 2, self.h), cv.cvPoint(as_int(x), as_int(y)), lc, 1, cv.CV_AA) if self.draw_center: cv.cvCircle(self.buffer, cv.cvPoint(self.w / 2, self.h), 3, cv.cvScalar(0, 0, 200), cv.CV_FILLED, cv.CV_AA)
def np2cv(im): print 'WARNING: np2cv is not reliable or well tested (it is a bit flakey...)' #raise AssertionError('np2cv does not work :-(') if len(im.shape) == 3: shp = im.shape channels = shp[2] height = shp[0] width = shp[1] #height, width, channels = im.shape elif len(im.shape) == 2: height, width = im.shape channels = 1 else: raise AssertionError("unrecognized shape for the input image. should be 3 or 2, but was %d." % len(im.shape)) key = str(im.dtype) cv_type = np2cv_type_dict[key] print 'attempt to create opencv image with (key, width, height, channels) =', (key, width, height, channels) cv_im = cv.cvCreateImage(cv.cvSize(width, height), cv_type, channels) #cv_im.imageData = im.tostring() if True: if len(im.shape) == 3: for y in xrange(height): for x in xrange(width): pix = [float(v) for v in im[y,x]] scalar = cv.cvScalar(*pix) #print scalar cv_im[y,x] = scalar else: for y in xrange(height): for x in xrange(width): pix = float(im[y,x]) cv_im[y,x] = cv.cvScalar(pix, pix, pix) #print 'im[y,x], cv_im[y,x] =', im[y,x], cv_im[y,x] print 'resulted in an image openCV image with the following properties:' numpy_type, nchannels = cv2np_type_dict[cv.cvGetElemType(cv_im)] print '(numpy_type, nchannels, cvmat.width, cvmat.height) =', (numpy_type, nchannels, cv_im.width, cv_im.height) return cv_im
def hsv2rgb(hue): # convert the hue value to the corresponding rgb value sector_data = [[0, 2, 1], [1, 2, 0], [1, 0, 2], [2, 0, 1], [2, 1, 0], [0, 1, 2]] hue *= 0.1 / 3 sector = cv.cvFloor(hue) p = cv.cvRound(255 * (hue - sector)) if sector & 1: p ^= 255 rgb = {} rgb[sector_data[sector][0]] = 255 rgb[sector_data[sector][1]] = 0 rgb[sector_data[sector][2]] = p return cv.cvScalar(rgb[2], rgb[1], rgb[0], 0)
def removeErrantPoints(frame): size = cv.cvGetSize(frame) for x in range(size.width): for y in range(size.height): if(cv.cvGetReal2D(frame, y, x) > 0): count = 0 count += same2ndValue(frame, x-1, y) count += same2ndValue(frame, x+1, y) count += same2ndValue(frame, x, y-1) count += same2ndValue(frame, x, y+1) count += same2ndValue(frame, x-1, y-1) count += same2ndValue(frame, x-1, y+1) count += same2ndValue(frame, x+1, y-1) count += same2ndValue(frame, x+1, y+1) if count == 0: cv.cvSet2D(frame, y, x, cv.cvScalar(0, 0, 0, 0))
def process_node(tup): node, color, size, text = tup color_scalar = cv.cvScalar(color[0], color[1], color[2]) node_val = node.val(t) if node_val.__class__ != tuple: if node_val != None: v_cv = self.disp.to_screen(node.val(t)) def circ(n): pt = cv.cvPoint(int(round(n[0,0])),int(round(n[1,0]))) cv.cvCircle(self.disp.buffer, pt, size, color_scalar, cv.CV_FILLED, cv.CV_AA) pt2 = cv.cvPoint(pt.x + 2, pt.y + 2) cv.cvPutText(self.disp.buffer, text, pt, self.disp.font, cv.cvScalar(255,255,255)) cv.cvPutText(self.disp.buffer, text, pt2, self.disp.font, cv.cvScalar(50,50,50)) map(circ, fun.points_of_mat(v_cv)) else: start_pts, end_pts = node_val for idx in range(start_pts.shape[1]): start_pt = cvpoint_of_pt(self.disp.to_screen(start_pts[:,idx]))[0] end_pt = cvpoint_of_pt(self.disp.to_screen( end_pts[:,idx]))[0] cv.cvLine(self.disp.buffer, start_pt, end_pt, color_scalar, size, cv.CV_AA)
def draw_point(self, x, y): cv.cvCircle(self.__image, [x,y], 3, cv.cvScalar(0, 255, 0, 0), -1, 8, 0)
def paint(self, img): for p in self.points: cv.cvDrawCircle(img, p.getCvPoint(), 2, cv.cvScalar(0, 0, 255,0)) for i in range(len(self.points) - 1): cv.cvLine(img, self.points[i].getCvPoint(), self.points[i + 1].getCvPoint(), cv.cvScalar(255,255,255,0), 1)
import sys from opencv import cv from opencv import highgui import opencv from pygame.locals import * hmin = 4 hmax = 18 vmin = 140 vmax = 255 smin = 147 smax = 255 hsv_min = cv.cvScalar(0, smin, vmin, 0) hsv_max = cv.cvScalar(180, 256, vmax, 0) capture = None def change_hmin(p): global hmin hmin = p def change_hmax(p): global hmax hmax = p
from opencv import highgui ############################################################################# # definition of some constants # how many bins we want for the histogram, and their ranges hdims = 16 hranges = [[0, 180]] # ranges for the limitation of the histogram vmin = 10 vmax = 256 smin = 30 # the range we want to monitor hsv_min = cv.cvScalar(0, smin, vmin, 0) hsv_max = cv.cvScalar(180, 256, vmax, 0) ############################################################################# # some useful functions def hsv2rgb(hue): # convert the hue value to the corresponding rgb value sector_data = [[0, 2, 1], [1, 2, 0], [1, 0, 2], [2, 0, 1], [2, 1, 0], [0, 1, 2]] hue *= 0.1 / 3 sector = cv.cvFloor(hue) p = cv.cvRound(255 * (hue - sector)) if sector & 1:
from contourFilters import areaFilter, rectangularAspectFilter, boxAreaFilter, perimeterFilter MORPH_KERNEL_SIZE = 2 DILATE_ITER = 2 ERODE_ITER = 2 # Polygon Contour aproximation tolerance PER_TOLERANCE = 50 # contour constants CURRENT_CONTOUR = 3 CONTOUR_THICKNESS = 3 # color constants _red = cv.cvScalar(0, 0, 255, 0) _green = cv.cvScalar(0, 255, 0, 0) def getPatchContour(): frame = cvLoadImage("../images/colilla-patch2.png") src_hsvImage = cvCreateImage(cvGetSize(frame), frame.depth, 3) src_Himage = cvCreateImage(cvGetSize(frame), frame.depth, 1) src_Simage = cvCreateImage(cvGetSize(frame), frame.depth, 1) src_Vimage = cvCreateImage(cvGetSize(frame), frame.depth, 1) cvCvtColor(frame, src_hsvImage, CV_BGR2HSV) cvSplit(src_hsvImage, src_Himage, src_Simage, src_Vimage, None) cvShowImage("HSV", src_Simage) eqImage = cvClone(src_Simage) cvEqualizeHist(src_Simage, eqImage)
def main(): ct1 = CntPoint() ct2 = CntPoint() agl = CntAngle() das = CntDAS() das.bDraw = 0 try: opts, args = getopt.getopt(sys.argv[1:], "ho:dn:e", ["help", "output=", "draw", "num=", "even"]) except getopt.GetoptError: usage() sys.exit(2) output = None bDraw = 0 npoint = 100 for o, a in opts: if o == "-v": ct.verbose = 1 if o in ("-h", "--help"): usage() sys.exit() if o in ("-o", "--output"): output = a if o in ("-d", "--draw"): bDraw = 1 if o in ("-n", "--num"): npoint = string.atoi(a) if o in ("-e", "--even"): ct.bEven = 1 if (len(args)) != 2: usage() sys.exit(2) ct1.GetContour(args[0], npoint) agl.ExtractFeature(ct1.GetKeyPoints(), ct1.drawimg) das.ExtractFeature(ct1.GetKeyPoints(), ct1.drawimg) ct2.GetContour(args[1], npoint) agl.ExtractFeature(ct2.GetKeyPoints(), ct2.drawimg) das.ExtractFeature(ct2.GetKeyPoints(), ct2.drawimg) seq1 = getdata(ct1.GetKeyPoints()) seq2 = getdata(ct2.GetKeyPoints()) matcher = SmithWaterman() cost, align, X, Y = matcher.Align(seq1, seq2) myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5) if (bDraw): ct1.DrawKeyPoints() kpoints1 = ct1.GetKeyPoints() ct2.DrawKeyPoints() kpoints2 = ct2.GetKeyPoints() ptcount = 0 for i in range(len(X)): xi = X[i] yi = Y[i] if (xi == -1): cv.cvPutText( ct2.drawimg, 'O', cv.cvPoint(int(kpoints2[yi].x), int(kpoints2[yi].y)), myfont, cv.cvScalar(255, 0, 0, 0)) if (yi == -1): cv.cvPutText( ct1.drawimg, 'O', cv.cvPoint(int(kpoints1[xi].x), int(kpoints1[xi].y)), myfont, cv.cvScalar(255, 0, 0, 0)) if (xi != -1 and yi != -1): ptcount += 1 cv.cvPutText( ct1.drawimg, str(ptcount), cv.cvPoint(int(kpoints1[xi].x), int(kpoints1[xi].y)), myfont, cv.cvScalar(255, 255, 0, 0)) cv.cvPutText( ct2.drawimg, str(ptcount), cv.cvPoint(int(kpoints2[yi].x), int(kpoints2[yi].y)), myfont, cv.cvScalar(255, 255, 0, 0)) highgui.cvNamedWindow("contour1", 1) highgui.cvNamedWindow("contour2", 1) highgui.cvShowImage("contour1", ct1.drawimg) highgui.cvShowImage("contour2", ct2.drawimg) highgui.cvWaitKey(0)
if dx * dx + dy * dy <= 25: # too close add_remove_pt = 0 continue if not status [point_counter]: # we will disable this point continue # this point is a correct point new_points.append (the_point) # draw the current point cv.cvCircle (image, cv.cvPointFrom32f(the_point), 3, cv.cvScalar (0, 255, 0, 0), -1, 8, 0) # set back the points we keep points [1] = new_points if add_remove_pt: # we want to add a point points [1].append (cv.cvPointTo32f (pt)) # refine the corner locations points [1][-1] = cv.cvFindCornerSubPix ( grey, [points [1][-1]], cv.cvSize (win_size, win_size), cv.cvSize (-1, -1), cv.cvTermCriteria (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,
def point(self, pts, size=1, color=cv.cvScalar(100,100,100)): def circ(n): pt = cv.cvPoint(int(round(n[0,0])),int(round(n[1,0]))) cv.cvCircle(self.buffer, pt, size, color, cv.CV_FILLED, cv.CV_AA) map(circ, fun.points_of_mat(pts))
def main(): ct1 = CntPoint() ct2 = CntPoint() agl = CntAngle() das = CntDAS() das.bDraw = 0 try: opts, args = getopt.getopt(sys.argv[1:], "ho:dn:e", ["help", "output=", "draw", "num=", "even"]) except getopt.GetoptError: usage() sys.exit(2) output = None bDraw = 0 npoint = 100 for o, a in opts: if o == "-v": ct.verbose = 1 if o in ("-h", "--help"): usage() sys.exit() if o in ("-o", "--output"): output = a if o in ("-d", "--draw"): bDraw = 1 if o in ("-n", "--num"): npoint = string.atoi(a) if o in ("-e", "--even"): ct.bEven = 1 if (len(args)) != 2: usage() sys.exit(2) ct1.GetContour(args[0], npoint) agl.ExtractFeature(ct1.GetKeyPoints(), ct1.drawimg) das.ExtractFeature(ct1.GetKeyPoints(), ct1.drawimg) ct2.GetContour(args[1], npoint) agl.ExtractFeature(ct2.GetKeyPoints(), ct2.drawimg) das.ExtractFeature(ct2.GetKeyPoints(), ct2.drawimg) seq1 = getdata(ct1.GetKeyPoints()) seq2 = getdata(ct2.GetKeyPoints()) matcher = SmithWaterman() cost,align,X,Y = matcher.Align(seq1, seq2) myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5) if (bDraw): ct1.DrawKeyPoints() kpoints1 = ct1.GetKeyPoints() ct2.DrawKeyPoints() kpoints2 = ct2.GetKeyPoints() ptcount = 0 for i in range(len(X)): xi = X[i] yi = Y[i] if (xi == -1): cv.cvPutText(ct2.drawimg, 'O', cv.cvPoint(int(kpoints2[yi].x), int(kpoints2[yi].y)), myfont, cv.cvScalar(255, 0, 0,0)) if (yi == -1): cv.cvPutText(ct1.drawimg, 'O', cv.cvPoint(int(kpoints1[xi].x), int(kpoints1[xi].y)), myfont, cv.cvScalar(255, 0, 0,0)) if (xi != -1 and yi != -1): ptcount += 1 cv.cvPutText(ct1.drawimg, str(ptcount), cv.cvPoint(int(kpoints1[xi].x), int(kpoints1[xi].y)), myfont, cv.cvScalar(255, 255, 0,0)) cv.cvPutText(ct2.drawimg, str(ptcount), cv.cvPoint(int(kpoints2[yi].x), int(kpoints2[yi].y)), myfont, cv.cvScalar(255, 255, 0,0)) highgui.cvNamedWindow ("contour1", 1) highgui.cvNamedWindow ("contour2", 1) highgui.cvShowImage ("contour1", ct1.drawimg) highgui.cvShowImage ("contour2", ct2.drawimg) highgui.cvWaitKey (0)
#!/usr/bin/env python import sys import math import string import optparse import fileinput from CntPoint import * from opencv import cv from opencv import highgui _red = cv.cvScalar (0, 0, 255, 0) _green = cv.cvScalar (0, 255, 0, 0) _white = cv.cvScalar (255,255,255,0) _black = cv.cvScalar (0,0,0,0) class Corner: def __FindCorner(self, filename): #find the corners of images, and save all corner points in self.vKeyPoints self.img = highgui.cvLoadImage (filename) greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1) hsvimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3) cv.cvCvtColor(self.img, hsvimg, cv.CV_RGB2HSV) cv.cvCvtColor (hsvimg, greyimg, cv.CV_BGR2GRAY) eigImage = cv.cvCreateImage(cv.cvGetSize(greyimg), cv.IPL_DEPTH_32F, 1) tempImage = cv.cvCreateImage(cv.cvGetSize(greyimg), cv.IPL_DEPTH_32F, 1) self.points = cv.cvGoodFeaturesToTrack(greyimg, eigImage,tempImage, 2000, 0.01, 5, None, 3,0,0.01 ) self.points2 = cv.cvFindCornerSubPix(greyimg, self.points,cv.cvSize(20, 20), cv.cvSize(-1, -1), cv.cvTermCriteria(cv.CV_TERMCRIT_ITER |cv.CV_TERMCRIT_EPS, 20, 0.03)) cv.cvReleaseImage(eigImage)
def harrisResponse(frame): """pyvision/point/DetectorHarris.py Runs at 10.5 fps... """ #gray = cv.cvCreateImage( cv.cvGetSize(image), 8, 1 ) #corners = cv.cvCreateImage( cv.cvGetSize(image), 32, 1 ) #cv.cvCvtColor( image, gray, cv.CV_BGR2GRAY ) #cv.cvCornerHarris(gray,corners,15) # This could be done in a persistant way # create the images we need image = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3) grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) prev_grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) prev_pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) eig = cv.cvCreateImage(cv.cvGetSize(frame), cv.IPL_DEPTH_32F, 1) temp = cv.cvCreateImage(cv.cvGetSize(frame), cv.IPL_DEPTH_32F, 1) points = [[], []] # copy the frame, so we can draw on it cv.cvCopy(frame, image) # create a grey version of the image cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY) # search the good points points[1] = cv.cvGoodFeaturesToTrack(grey, eig, temp, MAX_COUNT, quality, min_distance, None, 3, 0, 0.04) # refine the corner locations cv.cvFindCornerSubPix( grey, points[1], cv.cvSize(win_size, win_size), cv.cvSize(-1, -1), cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) if len(points[0]) > 0: # we have points, so display them # calculate the optical flow [points[1], status], something = cv.cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid, points[0], len(points[0]), (win_size, win_size), 3, len(points[0]), None, cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), flags) # initializations point_counter = -1 new_points = [] for the_point in points[1]: # go trough all the points # increment the counter point_counter += 1 if add_remove_pt: # we have a point to add, so see if it is close to # another one. If yes, don't use it dx = pt.x - the_point.x dy = pt.y - the_point.y if dx * dx + dy * dy <= 25: # too close add_remove_pt = 0 continue if not status[point_counter]: # we will disable this point continue # this point is a correct point new_points.append(the_point) # draw the current point cv.cvCircle(image, cv.cvPointFrom32f(the_point), 3, cv.cvScalar(0, 255, 0, 0), -1, 8, 0) # set back the points we keep points[1] = new_points # swapping prev_grey, grey = grey, prev_grey prev_pyramid, pyramid = pyramid, prev_pyramid points[0], points[1] = points[1], points[0] return image
def point(self, pts, size=1, color=cv.cvScalar(100, 100, 100)): def circ(n): pt = cv.cvPoint(int(round(n[0, 0])), int(round(n[1, 0]))) cv.cvCircle(self.buffer, pt, size, color, cv.CV_FILLED, cv.CV_AA) map(circ, fun.points_of_mat(pts))
random.randrange (0, 100) * 0.05 + 0.01, random.randrange (0, 5) * 0.1, random.randrange (0, 10), line_type) cv.cvPutText (image, "Testing text rendering!", pt1, font, random_color (random)) highgui.cvShowImage (window_name, image) highgui.cvWaitKey (delay) # prepare a text, and get it's properties font = cv.cvInitFont (cv.CV_FONT_HERSHEY_COMPLEX, 3, 3, 0.0, 5, line_type) text_size, ymin = cv.cvGetTextSize ("OpenCV forever!", font) pt1.x = (width - text_size.width) / 2 pt1.y = (height + text_size.height) / 2 image2 = cv.cvCloneImage(image) # now, draw some OpenCV pub ;-) for i in range (255): cv.cvSubS (image2, cv.cvScalarAll (i), image, None) cv.cvPutText (image, "OpenCV forever!", pt1, font, cv.cvScalar (255, i, i)) highgui.cvShowImage (window_name, image) highgui.cvWaitKey (delay) # wait some key to end highgui.cvWaitKey (0)
# generate a random point points.append (cv.cvPoint ( my_random.randrange (0, image.width / 2) + image.width / 4, my_random.randrange (0, image.width / 2) + image.width / 4 )) # compute the convex hull hull = cv.cvConvexHull2 (points, cv.CV_CLOCKWISE, 0) # start with an empty image cv.cvSetZero (image) for i in range (count): # draw all the points cv.cvCircle (image, points [i], 2, cv.cvScalar (0, 0, 255, 0), cv.CV_FILLED, cv.CV_AA, 0) # start the line from the last point pt0 = points [hull [-1]] for point_index in hull: # connect the previous point to the current one # get the current one pt1 = points [point_index] # draw cv.cvLine (image, pt0, pt1, cv.cvScalar (0, 255, 0, 0), 1, cv.CV_AA, 0)
cv.cvCvtColor(image, hsv, cv.CV_BGR2HSV) cv.cvLine(image, cv.cvPoint(0, image.height / 2), cv.cvPoint(image.width, image.height / 2), cv.CV_RGB(0, 255, 0), 2, 8, 0) cv.cvLine(image, cv.cvPoint(image.width / 2, 0), cv.cvPoint(image.width / 2, image.height), cv.CV_RGB(0, 255, 0), 2, 8, 0) if track_object: _vmin = vmin _vmax = vmax cv.cvInRangeS(hsv, cv.cvScalar(0, smin, min(_vmin, _vmax), 0), cv.cvScalar(180, 256, max(_vmin, _vmax), 0), mask) cv.cvSplit(hsv, hue, None, None, None) if track_object < 0: max_val = 0.0 subhue = cv.cvGetSubRect(hue, selection) submask = cv.cvGetSubRect(mask, selection) cv.cvCalcHist(subhue, hist, 0, submask) # extract the min and max value of the histogram min_val, max_val, min_idx, max_idx = cv.cvGetMinMaxHistValue( hist) if (max_val):
dy = pt.y - the_point.y if dx * dx + dy * dy <= 25: # too close add_remove_pt = 0 continue if not status[point_counter]: # we will disable this point continue # this point is a correct point new_points.append(the_point) # draw the current point cv.cvCircle(image, [the_point.x, the_point.y], 3, cv.cvScalar(0, 255, 0, 0), -1, 8, 0) # set back the points we keep points[1] = new_points if add_remove_pt: # we want to add a point points[1].append(cv.cvPointTo32f(pt)) # refine the corner locations points[1][-1] = cv.cvFindCornerSubPix( grey, [points[1][-1]], cv.cvSize(win_size, win_size), cv.cvSize(-1, -1), cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))[0]
#ERODE_ITER=2 DILATE_ITER = 1 ERODE_ITER = 1 #Polygon Contour aproximation tolerance PER_TOLERANCE = 50 #contour constants CURRENT_CONTOUR = 2 CONTOUR_THICKNESS = 2 #threshold values THRESHOLD_VALUE = 240 #color constants _red = cv.cvScalar(0, 0, 255, 0) _green = cv.cvScalar(0, 255, 0, 0) def isValidRect(rect, fullFrame): if rect.x < 0 or rect.y < 0: return False if rect.x > fullFrame.width or rect.y > fullFrame.height: return False if rect.x + rect.width > fullFrame.width or rect.y + rect.height > fullFrame.height: return False return True def biggerRect(rect, percent, fullFrame):
random.randrange (0, 5) * 0.1, random.randrange (0, 10), line_type) cv.cvPutText (image, "Testing text rendering!", pt1, font, random_color (random)) highgui.cvShowImage (window_name, image) highgui.cvWaitKey (delay) # prepare a text, and get it's properties font = cv.cvInitFont (cv.CV_FONT_HERSHEY_COMPLEX, 3, 3, 0.0, 5, line_type) text_size, ymin = cv.cvGetTextSize ("OpenCV forever!", font) pt1.x = (width - text_size.width) / 2 pt1.y = (height + text_size.height) / 2 image2 = cv.cvCloneImage(image) # now, draw some OpenCV pub ;-) for i in range (255): cv.cvSubS (image2, cv.cvScalarAll (i), image, None) cv.cvPutText (image, "OpenCV forever!", pt1, font, cv.cvScalar (255, i, i)) highgui.cvShowImage (window_name, image) highgui.cvWaitKey (delay) # wait some key to end highgui.cvWaitKey (0)
def random_color (random): """ Return a random color """ icolor = random.randint (0, 0xFFFFFF) return cv.cvScalar (icolor & 0xff, (icolor >> 8) & 0xff, (icolor >> 16) & 0xff)
dx = pt.x - the_point.x dy = pt.y - the_point.y if dx * dx + dy * dy <= 25: # too close add_remove_pt = 0 continue if not status[point_counter]: # we will disable this point continue # this point is a correct point new_points.append(the_point) # draw the current point cv.cvCircle(image, [the_point.x, the_point.y], 3, cv.cvScalar(0, 255, 0, 0), -1, 8, 0) # set back the points we keep points[1] = new_points if add_remove_pt: # we want to add a point points[1].append(cv.cvPointTo32f(pt)) # refine the corner locations points[1][-1] = cv.cvFindCornerSubPix( grey, [points[1][-1]], cv.cvSize(win_size, win_size), cv.cvSize(-1, -1), cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03),
red_hmin = 127 red_hmax = 180 green_hmin = 38 green_hmax = 100 vmin = 125 vmax = 255 # global statistics variables stats = True frameCount = 0 redFailCount = 0 greenFailCount = 0 startTime = time() hsv_min = cv.cvScalar(0, 0, vmin, 0) hsv_max = cv.cvScalar(180, 255, vmax, 0) capture = None def change_hmin(p): global hmin hmin = p def change_hmax(p): global hmax hmax = p def change_red_hmin(p): global red_hmin red_hmin = p
def segment_center_object(image, display_on=False, nsamp=10000, iter_limit=30, use_texture=True, use_hsv=True, set_v_to_zero=True, use_mask=True, remove_saturation=False, remove_boundary = True, prior_gmm=None, use_flip_heuristic=True): """ segment the input image (OpenCV image) and return an ellipse fit to the center object (foreground) and a binary image mask for this foreground object nsamp : number of pixels to be used when fitting the Gaussian mixture model (impacts speed and accuracy) iter_limit : maximum number of iterations when fitting the texture model use_texture : use texture features use_hsv : use hsv color space for features set_v_to_zero : effectively remove the value (brightness) component of the hsv features use_mask : mask out areas of the image prior to training the appearance model and segmenting remove_saturation : if use_mask, then remove saturated pixels (RGB values = 255 = max value) remove_boundary : if use_mask, then remove the borders of the image prior to segmenting it returns a segmentation object (SegmentObject) """ #remove_low_freq = True if use_hsv: hsv_image = cv.cvCreateImage(cv.cvSize(image.width, image.height), cv.IPL_DEPTH_8U, 3) cv.cvCvtColor(image, hsv_image, cv.CV_RGB2HSV) #cv.CV_BGR2HSV) if set_v_to_zero: #cvSet(hsv_image, cvScalarAll(0), ) for y in xrange(hsv_image.height): for x in xrange(hsv_image.width): pix = hsv_image[y,x] hsv_image[y,x] = cv.cvScalar(pix[0], pix[1], 0.0) image = hsv_image if display_on: image_list = [] else: image_list = None imf = ImageFeatures(image, use_texture=use_texture) #imf.texture_features() if use_mask: #test_mask = np.zeros([image.height, image.width]) #test_mask[0:200, 0:200] = 1.0 #test_mask = test_mask > 0.0 # select saturation mask nim = ut.cv2np(image) if remove_saturation: # remove saturated pixels #saturation_mask = ~np.alltrue(nim > 255, axis=2) saturation_mask = ~np.any(nim >= 255, axis=2) #saturation_mask = np.sum(nim >= 255, axis=2) < 2 if remove_boundary: # remove boundaries beyond the possible object size border_y = 50 border_x = 100 too_big_mask = np.zeros(nim.shape[:2], dtype=np.bool) w = nim.shape[1] h = nim.shape[0] too_big_mask[border_y : h - border_y, border_x : w - border_x] = True if remove_saturation and remove_boundary: feature_mask = saturation_mask & too_big_mask elif remove_saturation: feature_mask = saturation_mask else: feature_mask = too_big_mask disp_mask = feature_mask.copy() features = imf.select_subset(nsamp, mask_image=feature_mask) cv_mask = ut.np2cv(disp_mask.astype(np.uint8) * 255) if image_list is not None: image_list.append(cv_mask) else: features = imf.select_subset(nsamp) #sego = SegmentObject(image, features, iter_limit=iter_limit) sego = SegmentObject(image, imf, iter_limit=iter_limit, prior_gmm=prior_gmm) sego.classify_image(use_flip_heuristic) sego.clean_classified_image() #sego.find_largest_object() sego.find_best_object() sego.fit_to_largest_object() if image_list is not None: image_list.extend(sego.get_images_for_display()) ut.display_images(image_list) return sego
def main(args): global capture global hmax, hmin global stats, startTime highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Red Hue', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Green Hue', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Red Laser', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Green Laser', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow('Camera', 0, 10) highgui.cvMoveWindow('Value', 10, 420) highgui.cvMoveWindow('Red Laser', 360, 10) highgui.cvMoveWindow('Green Laser', 360, 360) highgui.cvMoveWindow('Red Hue',700, 10 ) highgui.cvMoveWindow('Green Hue',700, 420) highgui.cvCreateTrackbar("Brightness Trackbar","Camera",0,255, change_brightness); highgui.cvCreateTrackbar("vmin Trackbar","Value",vmin,255, change_vmin); highgui.cvCreateTrackbar("vmax Trackbar","Value",vmax,255, change_vmax); highgui.cvCreateTrackbar("red hmin Trackbar","Red Hue",red_hmin,180, change_red_hmin); highgui.cvCreateTrackbar("red hmax Trackbar","Red Hue",red_hmax,180, change_red_hmax); highgui.cvCreateTrackbar("green hmin Trackbar","Green Hue",green_hmin,180, change_green_hmin); highgui.cvCreateTrackbar("green hmax Trackbar","Green Hue",green_hmax,180, change_green_hmax); print "grabbing camera" capture = highgui.cvCreateCameraCapture(0) print "found camera" highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_WIDTH, iwidth) highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_HEIGHT, iheight) frame = highgui.cvQueryFrame(capture) frameSize = cv.cvGetSize(frame) hsv = cv.cvCreateImage(frameSize,8,3) mask = cv.cvCreateImage(frameSize,8,1) red_hue = cv.cvCreateImage(frameSize,8,1) green_hue = cv.cvCreateImage(frameSize,8,1) saturation = cv.cvCreateImage(frameSize,8,1) value = cv.cvCreateImage(frameSize,8,1) red_laser = cv.cvCreateImage(frameSize,8,1) green_laser = cv.cvCreateImage(frameSize,8,1) turret = FuzzyController(frameSize.width,frameSize.height,True) while 1: frame = highgui.cvQueryFrame(capture) cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV) cv.cvSplit(hsv,red_hue,saturation,value,None) cv.cvSplit(hsv,green_hue,saturation,value,None) cv.cvInRangeS(red_hue, cv.cvScalar(red_hmin), cv.cvScalar(red_hmax), red_hue) cv.cvInRangeS(green_hue, cv.cvScalar(green_hmin), cv.cvScalar(green_hmax), green_hue) cv.cvInRangeS(value, cv.cvScalar(vmin), cv.cvScalar(vmax), value) cv.cvAnd(red_hue, value, red_laser) cv.cvAnd(green_hue, value, green_laser) green_cenX,green_cenY = averageWhitePoints(green_laser) draw_target(frame, green_cenX, green_cenY, "GREEN") red_cenX, red_cenY = averageWhitePoints(red_laser) draw_target(frame, red_cenX, red_cenY, "RED") if(green_cenX >= 0 and green_cenY >= 0):# and move_count <= 0): turret.update(green_cenX,green_cenY) highgui.cvShowImage('Camera',frame) highgui.cvShowImage('Red Hue', red_hue) highgui.cvShowImage('Green Hue', green_hue) highgui.cvShowImage('Value',value) highgui.cvShowImage('Red Laser',red_laser) highgui.cvShowImage('Green Laser',green_laser) if stats: printRunningStats((green_cenX, green_cenY), (red_cenX, red_cenY)) k = highgui.cvWaitKey(10) if k == '\x1b' or k == 'q': sys.exit() if k == 'p': if stats: printTotalStats() stats = False else: startTime = time() stats = True
def main(args): global capture global hmax, hmin highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Satuation', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow('Camera', 0, 10) highgui.cvMoveWindow('Hue', 0, 350) highgui.cvMoveWindow('Satuation', 360, 10) highgui.cvMoveWindow('Value', 360, 350) highgui.cvMoveWindow('Laser', 700, 40) highgui.cvCreateTrackbar("Brightness Trackbar","Camera",0,255, change_brightness); highgui.cvCreateTrackbar("hmin Trackbar","Hue",hmin,180, change_hmin); highgui.cvCreateTrackbar("hmax Trackbar","Hue",hmax,180, change_hmax); highgui.cvCreateTrackbar("smin Trackbar","Satuation",smin,255, change_smin); highgui.cvCreateTrackbar("smax Trackbar","Satuation",smax,255, change_smax); highgui.cvCreateTrackbar("vmin Trackbar","Value",vmin,255, change_vmin); highgui.cvCreateTrackbar("vmax Trackbar","Value",vmax,255, change_vmax); print "grabbing camera" capture = highgui.cvCreateCameraCapture(0) print "found camera" highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_WIDTH, 320) highgui.cvSetCaptureProperty(capture,highgui.CV_CAP_PROP_FRAME_HEIGHT, 240) frame = highgui.cvQueryFrame(capture) frameSize = cv.cvGetSize(frame) hsv = cv.cvCreateImage(frameSize,8,3) mask = cv.cvCreateImage(frameSize,8,1) hue = cv.cvCreateImage(frameSize,8,1) satuation = cv.cvCreateImage(frameSize,8,1) value = cv.cvCreateImage(frameSize,8,1) laser = cv.cvCreateImage(frameSize,8,1) turret = FuzzyController(frameSize.width,frameSize.height,True) move_count = 0 while 1: frame = highgui.cvQueryFrame(capture) cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV) #cv.cvInRangeS(hsv,hsv_min,hsv_max,mask) cv.cvSplit(hsv,hue,satuation,value,None) cv.cvInRangeS(hue,cv.cvScalar(hmin),cv.cvScalar(hmax),hue) cv.cvInRangeS(satuation,cv.cvScalar(smin),cv.cvScalar(smax),satuation) cv.cvInRangeS(value,cv.cvScalar(vmin),cv.cvScalar(vmax),value) #cv.cvInRangeS(hue,0,180,hue) cv.cvAnd(hue, value, laser) #cv.cvAnd(laser, value, laser) cenX,cenY = averageWhitePoints(laser) #print cenX,cenY draw_target(frame,cenX,cenY) if(cenX != 0 and cenY != 0):# and move_count <= 0): turret.update(cenX,cenY,False) """ turret.reset() move_count = 3 if(cenX < 100): turret.left(20) elif(cenX > 200): turret.right(20) if(cenY < 80): turret.up(40) elif(cenY > 170): print "DOWN please.." turret.down(40) print cenY """ #move_count -= 1 #draw_target(frame,200,1) highgui.cvShowImage('Camera',frame) highgui.cvShowImage('Hue',hue) highgui.cvShowImage('Satuation',satuation) highgui.cvShowImage('Value',value) highgui.cvShowImage('Laser',laser) k = highgui.cvWaitKey(10) if k == 'q': sys.exit()
#!/usr/bin/env python ##################################################### # Longbin Chen # ------------ # Created by [email protected] ##################################################### import sys, math, string, optparse, fileinput, cStringIO, random from opencv import cv from opencv import highgui from MSS import * import NW _red = cv.cvScalar(0, 0, 255, 0) _green = cv.cvScalar(0, 255, 0, 0) _white = cv.cvScalar(255, 255, 255, 0) _black = cv.cvScalar(0, 0, 0, 0) color = [ _red, _green, _white, cv.cvScalar(128, 0, 128), cv.cvScalar(128, 128, 128), cv.cvScalar(128, 0, 0), cv.cvScalar(0, 0, 128) ] OUT = cStringIO.StringIO() class Edge:
from opencv import highgui ############################################################################# # definition of some constants # how many bins we want for the histogram, and their ranges hdims = 16 hranges = [[0, 180]] # ranges for the limitation of the histogram vmin = 10 vmax = 256 smin = 30 # the range we want to monitor hsv_min = cv.cvScalar (0, smin, vmin, 0) hsv_max = cv.cvScalar (180, 256, vmax, 0) ############################################################################# # some useful functions def hsv2rgb (hue): # convert the hue value to the corresponding rgb value sector_data = [[0, 2, 1], [1, 2, 0], [1, 0, 2], [2, 0, 1], [2, 1, 0], [0, 1, 2]] hue *= 0.1 / 3
def segment_center_object(image, display_on=False, nsamp=10000, iter_limit=30, use_texture=True, use_hsv=True, set_v_to_zero=True, use_mask=True, remove_saturation=False, remove_boundary=True, prior_gmm=None, use_flip_heuristic=True): """ segment the input image (OpenCV image) and return an ellipse fit to the center object (foreground) and a binary image mask for this foreground object nsamp : number of pixels to be used when fitting the Gaussian mixture model (impacts speed and accuracy) iter_limit : maximum number of iterations when fitting the texture model use_texture : use texture features use_hsv : use hsv color space for features set_v_to_zero : effectively remove the value (brightness) component of the hsv features use_mask : mask out areas of the image prior to training the appearance model and segmenting remove_saturation : if use_mask, then remove saturated pixels (RGB values = 255 = max value) remove_boundary : if use_mask, then remove the borders of the image prior to segmenting it returns a segmentation object (SegmentObject) """ #remove_low_freq = True if use_hsv: hsv_image = cv.cvCreateImage(cv.cvSize(image.width, image.height), cv.IPL_DEPTH_8U, 3) cv.cvCvtColor(image, hsv_image, cv.CV_RGB2HSV) #cv.CV_BGR2HSV) if set_v_to_zero: #cvSet(hsv_image, cvScalarAll(0), ) for y in xrange(hsv_image.height): for x in xrange(hsv_image.width): pix = hsv_image[y, x] hsv_image[y, x] = cv.cvScalar(pix[0], pix[1], 0.0) image = hsv_image if display_on: image_list = [] else: image_list = None imf = ImageFeatures(image, use_texture=use_texture) #imf.texture_features() if use_mask: #test_mask = np.zeros([image.height, image.width]) #test_mask[0:200, 0:200] = 1.0 #test_mask = test_mask > 0.0 # select saturation mask nim = ut.cv2np(image) if remove_saturation: # remove saturated pixels #saturation_mask = ~np.alltrue(nim > 255, axis=2) saturation_mask = ~np.any(nim >= 255, axis=2) #saturation_mask = np.sum(nim >= 255, axis=2) < 2 if remove_boundary: # remove boundaries beyond the possible object size border_y = 50 border_x = 100 too_big_mask = np.zeros(nim.shape[:2], dtype=np.bool) w = nim.shape[1] h = nim.shape[0] too_big_mask[border_y:h - border_y, border_x:w - border_x] = True if remove_saturation and remove_boundary: feature_mask = saturation_mask & too_big_mask elif remove_saturation: feature_mask = saturation_mask else: feature_mask = too_big_mask disp_mask = feature_mask.copy() features = imf.select_subset(nsamp, mask_image=feature_mask) cv_mask = ut.np2cv(disp_mask.astype(np.uint8) * 255) if image_list is not None: image_list.append(cv_mask) else: features = imf.select_subset(nsamp) #sego = SegmentObject(image, features, iter_limit=iter_limit) sego = SegmentObject(image, imf, iter_limit=iter_limit, prior_gmm=prior_gmm) sego.classify_image(use_flip_heuristic) sego.clean_classified_image() #sego.find_largest_object() sego.find_best_object() sego.fit_to_largest_object() if image_list is not None: image_list.extend(sego.get_images_for_display()) ut.display_images(image_list) return sego