def __findcurve(self, img): storage = cv.cvCreateMemStorage(0) nb_contours, cont = cv.cvFindContours(img, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint(0, 0)) cidx = int(random.random() * len(color)) if (self.drawcontour): cv.cvDrawContours(self.drawimg, cont, _white, _white, 1, 1, cv.CV_AA, cv.cvPoint(0, 0)) idx = 0 for c in cont.hrange(): PointArray = cv.cvCreateMat(1, c.total, cv.CV_32SC2) PointArray2D32f = cv.cvCreateMat(1, c.total, cv.CV_32FC2) cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)) fpoints = [] for i in range(c.total): kp = myPoint() kp.x = cv.cvGet2D(PointArray, 0, i)[0] kp.y = cv.cvGet2D(PointArray, 0, i)[1] kp.index = idx idx += 1 fpoints.append(kp) self.allcurve.append(fpoints) self.curvelength = idx
def __calculate(self): print "I want to calculate an image" size = cv.cvGetSize(self.origImage) result = cv.cvCreateMat(size.height,size.width,cv.CV_32FC1) row_sums = cv.cvCreateMat(size.height,size.width,cv.CV_32FC1) for i in range(size.height): for j in range(size.width): image_value = cv.cvGet2D(self.origImage,i,j) image_value = image_value[0] prev_row_sum = 0 if(i == 0): cv.cvmSet(row_sums,i,j,image_value) else: prev_row_sum = cv.cvmGet(row_sums,i-1,j) cv.cvmSet(row_sums,i,j,image_value+prev_row_sum) if(j == 0): cv.cvmSet(result,i,j,prev_row_sum+image_value) else: prev_result = cv.cvmGet(result,i,j-1) cv.cvmSet(result,i,j,prev_row_sum+image_value+prev_result) if(i == 0 and j == 0): print "image_value:",image_value print "prev_row_sum:",prev_row_sum return result
def __findcurve(self, img): storage = cv.cvCreateMemStorage(0) nb_contours, cont = cv.cvFindContours (img, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) cidx = int(random.random() * len(color)) if (self.drawcontour): cv.cvDrawContours (self.drawimg, cont, _white, _white, 1, 1, cv.CV_AA, cv.cvPoint (0, 0)) idx = 0 for c in cont.hrange(): PointArray = cv.cvCreateMat(1, c.total, cv.CV_32SC2) PointArray2D32f= cv.cvCreateMat( 1, c.total , cv.CV_32FC2) cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)) fpoints = [] for i in range(c.total): kp = myPoint() kp.x = cv.cvGet2D(PointArray,0, i)[0] kp.y = cv.cvGet2D(PointArray,0, i)[1] kp.index = idx idx += 1 fpoints.append(kp) self.allcurve.append(fpoints) self.curvelength = idx
def _calibrate_camera(self) : source = CameraInputProvider.get_frame(self) success, corners = cv.cvFindChessboardCorners(source, cv.cvSize(self.grid[0],self.grid[1])) n_points = self.grid[0]*self.grid[1] grid_x = self.capture_dims[0]/self.grid[0] grid_y = self.capture_dims[1]/self.grid[1] dest = [] for i in range(0,self.grid[0]) : for j in range(0,self.grid[1]) : dest.append((j*grid_x,i*grid_y)) self.dest = dest s = cv.cvCreateMat(n_points,2,cv.CV_32F) d = cv.cvCreateMat(n_points,2,cv.CV_32F) p = cv.cvCreateMat(3,3,cv.CV_32F) for i in range(n_points): s[i,0] = corners[i].x s[i,1] = corners[i].y d[i,0] = dest[i][0] d[i,1] = dest[i][1] results = cv.cvFindHomography(s,d,p) self.matrix = p
def __findContour(self, filename): #find the contour of images, and save all points in self.vKeyPoints self.img = highgui.cvLoadImage (filename) self.grayimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1) self.drawimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,3) cv.cvCvtColor (self.img, self.grayimg, cv.CV_BGR2GRAY) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvThreshold( self.grayimg, self.grayimg, self.threshold, self.threshold +100, cv.CV_THRESH_BINARY ) cv.cvZero(self.drawimg) storage = cv.cvCreateMemStorage(0) nb_contours, cont = cv.cvFindContours (self.grayimg, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) cv.cvDrawContours (self.drawimg, cont, cv.cvScalar(255,255,255,0), cv.cvScalar(255,255,255,0), 1, 1, cv.CV_AA, cv.cvPoint (0, 0)) self.allcurve = [] idx = 0 for c in cont.hrange(): PointArray = cv.cvCreateMat(1, c.total , cv.CV_32SC2) PointArray2D32f= cv.cvCreateMat( 1, c.total , cv.CV_32FC2) cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)) fpoints = [] for i in range(c.total): kp = myPoint() kp.x = cv.cvGet2D(PointArray,0, i)[0] kp.y = cv.cvGet2D(PointArray,0, i)[1] kp.index = idx idx += 1 fpoints.append(kp) self.allcurve.append(fpoints) self.curvelength = idx
def get_eye(self): eyes = False face = self.cap.get_area(commons.haar_cds['Face']) if face: cvtile = cv.cvCreateMat(128,128,cv.CV_8UC3) bwtile = cv.cvCreateMat(128,128,cv.CV_8U) areas = [ (pt[1].x - pt[0].x)*(pt[1].y - pt[0].y) for pt in face ] startF = face[areas.index(max(areas))][0] endF = face[areas.index(max(areas))][1] facerect = self.cap.rect(startF.x, startF.y, endF.x - startF.x, endF.y - startF.y) if not facerect: return cv.cvResize(facerect, cvtile) cv.cvCvtColor( cvtile, bwtile, cv.CV_BGR2GRAY ) leye,reye,lcp,rcp = self.fel.locateEyes(bwtile) leye = pv.Point(leye) reye = pv.Point(reye) leye_x = int((float(leye.X())*facerect.width/cvtile.width) + startF.x) leye_y = int((float(leye.Y())*facerect.height/cvtile.height) + startF.y) reye_x = int((float(reye.X())*facerect.width/cvtile.width) + startF.x) reye_y = int((float(reye.Y())*facerect.height/cvtile.height) + startF.y) eye_rect = { "startX" : leye_x - 5, "startY" : leye_y - 5, "endX" : leye_x + 5, "endY" : leye_y + 5} #self.cap.image(self.cap.rect(leye_x - 5, leye_y - 5, 20, 20)) if not hasattr(self.cap, "leye"): self.cap.add( Point("point", "leye", [int(leye_x), int(leye_y)], parent=self.cap, follow=True) ) else: self.cap.add( Point("point", "reye", [int(reye_x), int(reye_y)], parent=self.cap, follow=True) ) # Shows the face rectangle #self.cap.add( Graphic("rect", "Face", ( startF.x, startF.y ), (endF.x, endF.y), parent=self.cap) ) self.foreheadOrig = None return False
def __init__(self,cam_num,capture_dims) : #Call CameraInputProvider constructor CameraInputProvider.__init__(self,cam_num,capture_dims) print self.scale self.image_dims=tuple((int(dim) for dim in self.capture_dims)) self.storage = cvCreateMemStorage(0); self.color_thresh = 230 self.grid = (7,7) self.n_points = self.grid[0]*self.grid[1] self.object_points = cv.cvCreateMat(self.n_points,3,cv.CV_32F) self.image_points = cv.cvCreateMat(self.n_points,2,cv.CV_32F)
def numpymat2cvmat(nmat): cvmat = cv.cvCreateMat(nmat.shape[0], nmat.shape[1], cv.CV_32FC1) for i in range(nmat.shape[0]): for j in range(nmat.shape[1]): #print cvmat[i][j] #print nmat[i,j] cvmat[i, j] = nmat[i, j] return cvmat
def preproc(image): from opencv.cv import cvCreateMat, cvThreshold, CV_THRESH_OTSU rows = image.rows cols = image.cols typ = image.type binary_image = cvCreateMat(rows, cols, typ) cvThreshold(image, binary_image, 128, 255, CV_THRESH_OTSU) return binary_image
def numpymat2cvmat(nmat): cvmat = cv.cvCreateMat(nmat.shape[0],nmat.shape[1],cv.CV_32FC1) for i in range(nmat.shape[0]): for j in range(nmat.shape[1]): #print cvmat[i][j] #print nmat[i,j] cvmat[i,j] = nmat[i,j] return cvmat
def preproc(image): from opencv.cv import cvCreateMat,cvThreshold,CV_THRESH_OTSU rows = image.rows cols = image.cols typ = image.type binary_image = cvCreateMat(rows,cols,typ) cvThreshold(image, binary_image,128,255, CV_THRESH_OTSU) return binary_image
def returnEllipses(contours): ellipses = [] for c in contours.hrange(): count = c.total; if( count < 6 ): continue; PointArray = cv.cvCreateMat(1, count, cv.CV_32SC2) PointArray2D32f= cv.cvCreateMat( 1, count, cv.CV_32FC2) cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)); cv.cvConvert( PointArray, PointArray2D32f ) box = cv.CvBox2D() box = cv.cvFitEllipse2(PointArray2D32f); #cv.cvDrawContours(frame, c, cv.CV_RGB(255,255,255), cv.CV_RGB(255,255,255),0,1,8,cv.cvPoint(0,0)); center = cv.CvPoint() size = cv.CvSize() center.x = cv.cvRound(box.center.x); center.y = cv.cvRound(box.center.y); size.width = cv.cvRound(box.size.width*0.5); size.height = cv.cvRound(box.size.height*0.5); box.angle = -box.angle; ellipses.append({'center':center, 'size':size, 'angle':box.angle}) return ellipses
def matchTemplate(self, template, image): ''' matchTemplate(self, template, image): \ returns - correlation value of best match (b/w 0 & 1) \ top-left coord of template for the best match (cvPoint) \ ''' matchResultHeight = image.height-template.height+1 matchResultWidth = image.width-template.width+1 #print 'matchResultHeight: %d matchResultWidth %d'%(matchResultHeight, matchResultWidth) matchResult = cv.cvCreateMat(matchResultHeight, matchResultWidth, cv.CV_32FC1) cv.cvMatchTemplate(image, template, matchResult, cv.CV_TM_CCORR_NORMED) min_loc = cv.cvPoint(0,0) max_loc = cv.cvPoint(0,0) min_val, max_val = cv.cvMinMaxLoc(matchResult, min_loc, max_loc) return {'image': matchResult , 'max_val':max_val, 'max_loc':max_loc}
def matchTemplate(self, template, image): ''' matchTemplate(self, template, image): \ returns - correlation value of best match (b/w 0 & 1) \ top-left coord of template for the best match (cvPoint) \ ''' matchResultHeight = image.height - template.height + 1 matchResultWidth = image.width - template.width + 1 #print 'matchResultHeight: %d matchResultWidth %d'%(matchResultHeight, matchResultWidth) matchResult = cv.cvCreateMat(matchResultHeight, matchResultWidth, cv.CV_32FC1) cv.cvMatchTemplate(image, template, matchResult, cv.CV_TM_CCORR_NORMED) min_loc = cv.cvPoint(0, 0) max_loc = cv.cvPoint(0, 0) min_val, max_val = cv.cvMinMaxLoc(matchResult, min_loc, max_loc) return {'image': matchResult, 'max_val': max_val, 'max_loc': max_loc}
def on_trackbar1(position): global pos1 global pos2 global pos3 global pos4 global pos5 global pos6 global pos7 global img global gray global edges print print position, pos2, pos3, pos4, pos5, pos6, pos7 temp = cv.cvCloneImage(img) gray = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1) edges = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1) dst = cv.cvCreateImage( cv.cvSize(256,256), 8, 3 ) src = cv.cvCloneImage(img) src2 = cv.cvCreateImage( cv.cvGetSize(src), 8, 3 ); cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY) cv.cvCanny(gray, edges, position, pos2, 3) cv.cvSmooth(edges, edges, cv.CV_GAUSSIAN, 9, 9) storage = cv.cvCreateMat(50, 1, cv.CV_32FC3) cv.cvSetZero(storage) try: circles = cv.cvHoughCircles(gray, storage, cv.CV_HOUGH_GRADIENT, 1, float(pos3), float(pos2), float(pos4), long(pos5),long(pos6) ) #print storage for i in storage: print "Center: ", i[0], i[1], " Radius: ", i[2] center = cv.cvRound(i[0]), cv.cvRound(i[1]) radius = cv.cvRound(i[2]) cv.cvCircle(temp, (center), radius, cv.CV_RGB(255, 0, 0), 1, cv.CV_AA, 0 ) cv.cvCircle(edges, (center), radius, cv.CV_RGB(255, 255, 255), 1, cv.CV_AA, 0 ) if radius > 200: print "Circle found over 200 Radius" center_crop_topleft = (center[0]-(radius - pos7)), (center[1]-(radius - pos7)) center_crop_bottomright = (center[0]+(radius - pos7)), (center[1]+(radius - pos7)) print "crop top left: ", center_crop_topleft print "crop bottom right: ", center_crop_bottomright center_crop = cv.cvGetSubRect(src, (center_crop_topleft[0], center_crop_topleft[1] , (center_crop_bottomright[0] - center_crop_topleft[0]), (center_crop_bottomright[1] - center_crop_topleft[1]) )) #center_crop = cv.cvGetSubRect(src, (50, 50, radius/2, radius/2)) cvShowImage( "center_crop", center_crop ) print "center_crop created" #mark found circle's center with blue point and blue circle of pos 7 radius cv.cvCircle(temp ,(center), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) cv.cvCircle(temp ,(center), (radius - pos7), cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) #cvLogPolar(src, dst, (center), 48, CV_INTER_LINEAR +CV_WARP_FILL_OUTLIERS ) #this will draw a smaller cirle outlining the center circle #pos7 = int(pos7 /2.5) #cv.cvCircle(dst ,(img_size.width-pos7, 0), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) #cv.cvLine(dst, (img_size.width-pos7-1, 0), (img_size.width-pos7-1, img_size.height), cv.CV_RGB(0, 0, 255),1,8,0) #cvShowImage( "log-polar", dst ) #print radius, (radius-pos7) #cropped = cv.cvCreateImage( (pos7, img_size.height), 8, 3) #cropped2 = cv.cvCreateImage( (pos7, img_size.height), 8, 3) #coin_edge_img = cv.cvGetSubRect(dst, (img_size.width-pos7, 0, pos7 ,img_size.height )) #to create the center cropped part of coin #img_size = cvGetSize(scr) #cvCopy(coin_edge_img, cropped) #cvSaveImage("temp.png", cropped) #im = Image.open("temp.png").rotate(90) #print "pil image size = ", im.size[0], im.size[1] #im = im.resize((im.size[0]*2, im.size[1]*2)) #print "pil image size = ", im.size #im.show() #im.save("temp2.png") cropped2 = highgui.cvLoadImage("temp2.png") #cvShowImage( "cropped", cropped2) except: print "Exception:", sys.exc_info()[0] print position, pos2, pos3, pos4, pos5, pos6, pos7 pass highgui.cvShowImage("edges", edges) #cvShowImage( "log-polar", dst ) cvShowImage(wname, temp)
#create image arrays grayimage = cv.cvCreateImage(cv.cvGetSize(image), 8, 1) cannyedges = cv.cvCreateImage(cv.cvGetSize(image), 8, 1) #convert to grayscale cv.cvCvtColor(image, grayimage, cv.CV_BGR2GRAY) #Canny #Canny(image, edges, threshold1, threshold2, aperture_size=3) = None #Implements the Canny algorithm for edge detection. cv.cvCanny(grayimage, cannyedges, 150, 450 , 3) #This is the line that throws the error storage = cv.cvCreateMat(50, 1, cv.CV_32FC3) cv.cvSetZero(storage) #circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 2, grayimage.height/4, 150, 40, long(sys.argv[2]), long(sys.argv[3])) #circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 1, grayimage.height, 200, 40, long(sys.argv[2]), long(sys.argv[3])) circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 1, grayimage.width, 150, 40, long(sys.argv[2]), grayimage.width) print storage for i in storage: print i[0], i[1], i[2] center = cv.cvRound(i[0]), cv.cvRound(i[1]) radius = cv.cvRound(i[2]) cv.cvCircle(image, (center), radius, cv.CV_RGB(255, 0, 0), 1, cv.CV_AA, 0 ) cv.cvCircle(image, (center), 10, cv.CV_RGB(255, 0, 0), -1, cv.CV_AA, 0 ) cv.cvCircle(cannyedges, (center), radius, cv.CV_RGB(255, 255, 255), 1, cv.CV_AA, 0 )
def read(self): source = self.input.read() if not self.calibrated: success, corners = cv.cvFindChessboardCorners(source, cv.cvSize(self.grid[0],self.grid[1])) if success == 1 : cv.cvDrawChessboardCorners(source,cv.cvSize(self.grid[0],self.grid[1]),corners,len(corners)) #self.debug_print('CVPerspective: success, corners = (%d,%s(%d))'%(success,corners,len(corners))) n_points = self.grid[0]*self.grid[1] grid_x = self.dims[0]/(self.grid[0]+1) grid_y = self.dims[1]/(self.grid[1]+1) self.dest = [] for i in range(1,self.grid[0]+1) : for j in range(1,self.grid[1]+1) : self.dest.append((j*grid_x,i*grid_y)) # loop through corners (clockwise wrapped), figure out which is closest to origin four_corners = [corners[0],corners[self.grid[0]-1],corners[-1],corners[self.grid[0]*(self.grid[1]-1)]] distances = [] for i,corner in enumerate(four_corners) : distances.append((sqrt(corner.x**2+corner.y**2),i)) min_corner = min(distances) #self.debug_print(distances) #self.debug_print(min_corner) if min_corner[1] != 0 : rotator = array([corners]) rotator = rotator.reshape((self.grid)) rotator = rot90(rotator,min_corner[1]) corners = rotator.flatten().tolist() s = cv.cvCreateMat(n_points,2,cv.CV_32F) d = cv.cvCreateMat(n_points,2,cv.CV_32F) p = cv.cvCreateMat(3,3,cv.CV_32F) for i in range(n_points): s[i,0] = corners[i].x s[i,1] = corners[i].y d[i,0] = self.dest[i][0] d[i,1] = self.dest[i][1] results = cv.cvFindHomography(s,d,p) self.matrix = p self.settings.perspective.calibrated = True #self.debug_print('projection matrix:%s'%p) if self.calibrated : try : # workaround for now dst = cv.cvCreateImage(cv.cvSize(self.dims[0],self.dims[1]),source.depth,source.nChannels) cv.cvWarpPerspective( source, dst, self.matrix) except AttributeError : pass else: return dst return source
def find_homography(points1, points2): cv_homography = cv.cvCreateMat(3, 3, cv.CV_32FC1) cv_points1 = numpymat2cvmat(points1) cv_points2 = numpymat2cvmat(points2) cv.cvFindHomography(cv_points1, cv_points2, cv_homography) return cvmat2numpymat(cv_homography)
def process_image( slider_pos ): """ Define trackbar callback functon. This function find contours, draw it and approximate it by ellipses. """ stor = cv.cvCreateMemStorage(0); # Threshold the source image. This needful for cv.cvFindContours(). cv.cvThreshold( image03, image02, slider_pos, 255, cv.CV_THRESH_BINARY ); # Find all contours. nb_contours, cont = cv.cvFindContours (image02, stor, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) # Clear images. IPL use. cv.cvZero(image02); cv.cvZero(image04); # This cycle draw all contours and approximate it by ellipses. for c in cont.hrange(): count = c.total; # This is number point in contour # Number point must be more than or equal to 6 (for cv.cvFitEllipse_32f). if( count < 6 ): continue; # Alloc memory for contour point set. PointArray = cv.cvCreateMat(1, count, cv.CV_32SC2) PointArray2D32f= cv.cvCreateMat( 1, count, cv.CV_32FC2) # Get contour point set. cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)); # Convert CvPoint set to CvBox2D32f set. cv.cvConvert( PointArray, PointArray2D32f ) box = cv.CvBox2D() # Fits ellipse to current contour. box = cv.cvFitEllipse2(PointArray2D32f); # Draw current contour. cv.cvDrawContours(image04, c, cv.CV_RGB(255,255,255), cv.CV_RGB(255,255,255),0,1,8,cv.cvPoint(0,0)); # Convert ellipse data from float to integer representation. center = cv.CvPoint() size = cv.CvSize() center.x = cv.cvRound(box.center.x); center.y = cv.cvRound(box.center.y); size.width = cv.cvRound(box.size.width*0.5); size.height = cv.cvRound(box.size.height*0.5); box.angle = -box.angle; # Draw ellipse. cv.cvEllipse(image04, center, size, box.angle, 0, 360, cv.CV_RGB(0,0,255), 1, cv.CV_AA, 0); # Show image. HighGUI use. highgui.cvShowImage( "Result", image04 );
def process_image(slider_pos): """ Define trackbar callback functon. This function find contours, draw it and approximate it by ellipses. """ stor = cv.cvCreateMemStorage(0) # Threshold the source image. This needful for cv.cvFindContours(). cv.cvThreshold(image03, image02, slider_pos, 255, cv.CV_THRESH_BINARY) # Find all contours. nb_contours, cont = cv.cvFindContours(image02, stor, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint(0, 0)) # Clear images. IPL use. cv.cvZero(image02) cv.cvZero(image04) # This cycle draw all contours and approximate it by ellipses. for c in cont.hrange(): count = c.total # This is number point in contour # Number point must be more than or equal to 6 (for cv.cvFitEllipse_32f). if (count < 6): continue # Alloc memory for contour point set. PointArray = cv.cvCreateMat(1, count, cv.CV_32SC2) PointArray2D32f = cv.cvCreateMat(1, count, cv.CV_32FC2) # Get contour point set. cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)) # Convert CvPoint set to CvBox2D32f set. cv.cvConvert(PointArray, PointArray2D32f) box = cv.CvBox2D() # Fits ellipse to current contour. box = cv.cvFitEllipse2(PointArray2D32f) # Draw current contour. cv.cvDrawContours(image04, c, cv.CV_RGB(255, 255, 255), cv.CV_RGB(255, 255, 255), 0, 1, 8, cv.cvPoint(0, 0)) # Convert ellipse data from float to integer representation. center = cv.CvPoint() size = cv.CvSize() center.x = cv.cvRound(box.center.x) center.y = cv.cvRound(box.center.y) size.width = cv.cvRound(box.size.width * 0.5) size.height = cv.cvRound(box.size.height * 0.5) box.angle = -box.angle # Draw ellipse. cv.cvEllipse(image04, center, size, box.angle, 0, 360, cv.CV_RGB(0, 0, 255), 1, cv.CV_AA, 0) # Show image. HighGUI use. highgui.cvShowImage("Result", image04)
def find_homography(points1,points2): cv_homography = cv.cvCreateMat(3,3,cv.CV_32FC1) cv_points1 = numpymat2cvmat(points1) cv_points2 = numpymat2cvmat(points2) cv.cvFindHomography(cv_points1,cv_points2,cv_homography) return cvmat2numpymat(cv_homography)
def __call__(self, origExamples = None, resultType = orange.GetValue, returnDFV = False): res = None """ orange.GetBoth - <type 'tuple'> -> (<orange.Value 'Act'='3.44158792'>, <3.442: 1.000>) orange.GetValue - <type 'orange.Value'> -> <orange.Value 'Act'='3.44158792'> orange.GetProbabilities - <type 'orange.DiscDistribution'> -> <0.000, 0.000> """ #dataUtilities.rmAllMeta(examples) if len(origExamples.domain.getmetas()) == 0: examples = origExamples else: examples = dataUtilities.getCopyWithoutMeta(origExamples) #Check if the examples are compatible with the classifier (attributes order and varType compatibility) if self.verbose > 1: dataUtilities.verbose = self.verbose if not self.ExFix.ready: self.ExFix.set_domain(self.imputer.defaults.domain) self.ExFix.set_examplesFixedLog(self.examplesFixedLog) inExamples = self.ExFix.fixExample(examples) if not inExamples: return None #Imput the examples if there are missing values examplesImp = self.imputer(inExamples) # There is a problem with using the imputer when examples contain meta attributes. # Unable to remove meta attributes from the examples. OK to rm meta from ExampleTables, but not from Example objects. if not examplesImp: if self.verbose > 0: print "Unable to predict with the ANN model." if self.verbose > 0: print "Perhaps you need to remove meta attributes from your examples." return None res = None if self.classVar.varType == orange.VarTypes.Continuous: Nout = 1 else: Nout = len(self.classVar.values) out = cv.cvCreateMat(1,Nout,cv.CV_32FC1) self.classifier.predict(dataUtilities.Example2CvMat(examplesImp,self.varNames),out) #print "OUT = ",out #print out,"->",dataUtilities.CvMat2orangeResponse(out,self.classVar,True),":",origExamples[self.classVar.name].value res = dataUtilities.CvMat2orangeResponse(out,self.classVar,True) #print "RES=",res DFV = None if out.cols > 1: fannOutVector = dataUtilities.CvMat2List(out)[0] probabilities = self.__getProbabilities(fannOutVector) #Compute the DFV if self.classVar.varType == orange.VarTypes.Discrete and len(self.classVar.values) == 2: DFV = probabilities[0] # Subtract 0.5 so that the threshold is 0 as all learners DFV DFV -= 0.5 self._updateDFVExtremes(DFV) # Retrun the desired quantity if resultType == orange.GetProbabilities: res = probabilities else: if resultType == orange.GetBoth: res = (res, probabilities) else: #On Regression models, assume the DFV as the value predicted DFV = res.value self._updateDFVExtremes(DFV) if resultType == orange.GetProbabilities: res = [0.0] else: if resultType==orange.GetBoth: res = (res,[0.0]) self.nPredictions += 1 if returnDFV: return (res,DFV) else: return res
def compute_saliency(image): global thresh global scale saliency_scale = int(math.pow(2,scale)); bw_im1 = cv.cvCreateImage(cv.cvGetSize(image), cv.IPL_DEPTH_8U,1) cv.cvCvtColor(image, bw_im1, cv.CV_BGR2GRAY) bw_im = cv.cvCreateImage(cv.cvSize(saliency_scale,saliency_scale), cv.IPL_DEPTH_8U,1) cv.cvResize(bw_im1, bw_im) highgui.cvShowImage("BW", bw_im) realInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1); imaginaryInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1); complexInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 2); cv.cvScale(bw_im, realInput, 1.0, 0.0); cv.cvZero(imaginaryInput); cv.cvMerge(realInput, imaginaryInput, None, None, complexInput); dft_M = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.height - 1 ); dft_N = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.width - 1 ); dft_A = cv.cvCreateMat( dft_M, dft_N, cv.CV_32FC2 ); image_Re = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); image_Im = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); # copy A to dft_A and pad dft_A with zeros tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height)); cv.cvCopy( complexInput, tmp, None ); if(dft_A.width > bw_im.width): tmp = cv.cvGetSubRect( dft_A, cv.cvRect(bw_im.width,0, dft_N - bw_im.width, bw_im.height)); cv.cvZero( tmp ); cv.cvDFT( dft_A, dft_A, cv.CV_DXT_FORWARD, complexInput.height ); cv.cvSplit( dft_A, image_Re, image_Im, None, None ); # Compute the phase angle image_Mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); image_Phase = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); #compute the phase of the spectrum cv.cvCartToPolar(image_Re, image_Im, image_Mag, image_Phase, 0) log_mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); cv.cvLog(image_Mag, log_mag) #Box filter the magnitude, then take the difference image_Mag_Filt = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); filt = cv.cvCreateMat(3,3, cv.CV_32FC1); cv.cvSet(filt,cv.cvScalarAll(-1.0/9.0)) cv.cvFilter2D(log_mag, image_Mag_Filt, filt, cv.cvPoint(-1,-1)) cv.cvAdd(log_mag, image_Mag_Filt, log_mag, None) cv.cvExp(log_mag, log_mag) cv.cvPolarToCart(log_mag, image_Phase, image_Re, image_Im,0); cv.cvMerge(image_Re, image_Im, None, None, dft_A) cv.cvDFT( dft_A, dft_A, cv.CV_DXT_INVERSE, complexInput.height) tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height)); cv.cvCopy( tmp, complexInput, None ); cv.cvSplit(complexInput, realInput, imaginaryInput, None, None) min, max = cv.cvMinMaxLoc(realInput); #cv.cvScale(realInput, realInput, 1.0/(max-min), 1.0*(-min)/(max-min)); cv.cvSmooth(realInput, realInput); threshold = thresh/100.0*cv.cvAvg(realInput)[0] cv.cvThreshold(realInput, realInput, threshold, 1.0, cv.CV_THRESH_BINARY) tmp_img = cv.cvCreateImage(cv.cvGetSize(bw_im1),cv.IPL_DEPTH_32F, 1) cv.cvResize(realInput,tmp_img) cv.cvScale(tmp_img, bw_im1, 255,0) return bw_im1
def get_eye(self): eyes = False face = self.cap.get_area(commons.haar_cds['Face']) if face: cvtile = cv.cvCreateMat(128, 128, cv.CV_8UC3) bwtile = cv.cvCreateMat(128, 128, cv.CV_8U) areas = [(pt[1].x - pt[0].x) * (pt[1].y - pt[0].y) for pt in face] startF = face[areas.index(max(areas))][0] endF = face[areas.index(max(areas))][1] facerect = self.cap.rect(startF.x, startF.y, endF.x - startF.x, endF.y - startF.y) if not facerect: return cv.cvResize(facerect, cvtile) cv.cvCvtColor(cvtile, bwtile, cv.CV_BGR2GRAY) leye, reye, lcp, rcp = self.fel.locateEyes(bwtile) leye = pv.Point(leye) reye = pv.Point(reye) leye_x = int((float(leye.X()) * facerect.width / cvtile.width) + startF.x) leye_y = int((float(leye.Y()) * facerect.height / cvtile.height) + startF.y) reye_x = int((float(reye.X()) * facerect.width / cvtile.width) + startF.x) reye_y = int((float(reye.Y()) * facerect.height / cvtile.height) + startF.y) eye_rect = { "startX": leye_x - 5, "startY": leye_y - 5, "endX": leye_x + 5, "endY": leye_y + 5 } #self.cap.image(self.cap.rect(leye_x - 5, leye_y - 5, 20, 20)) if not hasattr(self.cap, "leye"): self.cap.add( Point("point", "leye", [int(leye_x), int(leye_y)], parent=self.cap, follow=True)) else: self.cap.add( Point("point", "reye", [int(reye_x), int(reye_y)], parent=self.cap, follow=True)) # Shows the face rectangle #self.cap.add( Graphic("rect", "Face", ( startF.x, startF.y ), (endF.x, endF.y), parent=self.cap) ) self.foreheadOrig = None return False
def _singlePredict(self, origExamples=None, resultType=orange.GetValue, returnDFV=False): res = None """ orange.GetBoth - <type 'tuple'> -> (<orange.Value 'Act'='3.44158792'>, <3.442: 1.000>) orange.GetValue - <type 'orange.Value'> -> <orange.Value 'Act'='3.44158792'> orange.GetProbabilities - <type 'orange.DiscDistribution'> -> <0.000, 0.000> """ #dataUtilities.rmAllMeta(examples) if len(origExamples.domain.getmetas()) == 0: examples = origExamples else: examples = dataUtilities.getCopyWithoutMeta(origExamples) #Check if the examples are compatible with the classifier (attributes order and varType compatibility) if self.verbose > 1: dataUtilities.verbose = self.verbose if not self.ExFix.ready: self.ExFix.set_domain(self.imputer.defaults.domain) self.ExFix.set_examplesFixedLog(self.examplesFixedLog) inExamples = self.ExFix.fixExample(examples) if not inExamples: return None #Imput the examples if there are missing values examplesImp = self.imputer(inExamples) # There is a problem with using the imputer when examples contain meta attributes. # Unable to remove meta attributes from the examples. OK to rm meta from ExampleTables, but not from Example objects. if not examplesImp: if self.verbose > 0: print "Unable to predict with the ANN model." if self.verbose > 0: print "Perhaps you need to remove meta attributes from your examples." return None res = None if self.classVar.varType == orange.VarTypes.Continuous: Nout = 1 else: Nout = len(self.classVar.values) out = cv.cvCreateMat(1, Nout, cv.CV_32FC1) self.classifier.predict( dataUtilities.Example2CvMat(examplesImp, self.varNames), out) #print "OUT = ",out #print out,"->",dataUtilities.CvMat2orangeResponse(out,self.classVar,True),":",origExamples[self.classVar.name].value res = dataUtilities.CvMat2orangeResponse(out, self.classVar, True) #print "RES=",res DFV = None if out.cols > 1: fannOutVector = dataUtilities.CvMat2List(out)[0] probabilities = self.__getProbabilities(fannOutVector) #Compute the DFV if self.classVar.varType == orange.VarTypes.Discrete and len( self.classVar.values) == 2: DFV = probabilities[0] # Subtract 0.5 so that the threshold is 0 as all learners DFV DFV -= 0.5 self._updateDFVExtremes(DFV) # Retrun the desired quantity if resultType == orange.GetProbabilities: res = probabilities else: if resultType == orange.GetBoth: res = (res, probabilities) else: #On Regression models, assume the DFV as the value predicted DFV = res.value self._updateDFVExtremes(DFV) y_hat = self.classVar(res.value) dist = Orange.statistics.distribution.Continuous(self.classVar) dist[y_hat] = 1.0 if resultType == orange.GetProbabilities: res = dist else: if resultType == orange.GetBoth: res = (res, dist) self.nPredictions += 1 if returnDFV: return (res, DFV) else: return res
def blob_identification(binary_image): from opencv.highgui import cvSaveImage, cvLoadImageM from opencv.cv import cvCreateImage, cvGetSize, cvCreateMat, cvSet, CV_RGB, cvResize from Blob import CBlob from BlobResult import CBlobResult from classification import classification from os import chdir, environ path = environ.get("HOME") frame_size = cvGetSize(binary_image) blo = cvCreateImage(frame_size, 8, 1) resblo = cvCreateMat(240, 320, binary_image.type) mask = cvCreateImage(frame_size, 8, 1) cvSet(mask, 255) myblobs = CBlobResult(binary_image, mask, 0, True) myblobs.filter_blobs(325, 2000) blob_count = myblobs.GetNumBlobs() count = 0 pixr = [] pixrm = [] for i in range(blob_count): value = [] rowval = [] colval = [] cvSet(blo, 0) my_enum_blob = myblobs.GetBlob(i) my_enum_blob.FillBlob(blo, CV_RGB(255, 0, 255), 0, 0) cvSet(resblo, 0) cvResize(blo, resblo, 1) for rowitem in range(resblo.rows): for colitem in range(resblo.cols): if resblo[rowitem, colitem] != 0: rowval.append(rowitem) colval.append(colitem) value.append(resblo[rowitem, colitem]) pixr.append(rowval[0]) pixrm.append(rowval[-1]) rowmin = min(rowval) rowedit = [] for item in rowval: rowedit.append(item - rowmin) coledit = [] colmin = min(colval) for item in colval: coledit.append(int(item) - colmin) rowmax = max(rowedit) colmax = max(colval) - colmin moved = cvCreateMat(rowmax + 10, colmax + 10, blo.type) cvSet(moved, 0) for i in range(len(rowval)): moved[int(rowedit[i]) + 5, int(coledit[i]) + 5] = int(value[i]) chdir(path + "/alpr/latest/blobs") cvSaveImage("pic" + str(count) + ".png", moved) count += 1 avoid = classification(pixr, pixrm) blob_image = cvCreateImage(frame_size, 8, 1) cvSet(blob_image, 0) for i in range(blob_count): if i not in avoid: my_enum_blob = myblobs.GetBlob(i) my_enum_blob.FillBlob(blob_image, CV_RGB(255, 0, 255), 0, 0) cvSaveImage("blob.jpg", blob_image) return
def blob_identification(binary_image): from opencv.highgui import cvSaveImage,cvLoadImageM from opencv.cv import cvCreateImage,cvGetSize,cvCreateMat,cvSet,CV_RGB,cvResize from Blob import CBlob from BlobResult import CBlobResult from classification import classification from os import chdir,environ path = environ.get("HOME") frame_size = cvGetSize (binary_image) blo = cvCreateImage(frame_size, 8, 1) resblo=cvCreateMat(240,320,binary_image.type) mask = cvCreateImage (frame_size, 8, 1) cvSet(mask, 255) myblobs = CBlobResult(binary_image, mask, 0, True) myblobs.filter_blobs(325,2000) blob_count = myblobs.GetNumBlobs() count=0 pixr=[] pixrm=[] for i in range(blob_count): value=[] rowval=[] colval=[] cvSet(blo,0) my_enum_blob = myblobs.GetBlob(i) my_enum_blob.FillBlob(blo,CV_RGB(255,0,255),0,0) cvSet(resblo,0) cvResize(blo,resblo,1) for rowitem in range(resblo.rows): for colitem in range(resblo.cols): if resblo[rowitem,colitem]!=0: rowval.append(rowitem) colval.append(colitem) value.append(resblo[rowitem,colitem]) pixr.append(rowval[0]) pixrm.append(rowval[-1]) rowmin=min(rowval) rowedit=[] for item in rowval: rowedit.append(item-rowmin) coledit=[] colmin=min(colval) for item in colval: coledit.append(int(item)-colmin) rowmax=max(rowedit) colmax=max(colval)-colmin moved=cvCreateMat(rowmax+10,colmax+10,blo.type) cvSet(moved,0) for i in range(len(rowval)): moved[int(rowedit[i])+5,int(coledit[i])+5]=int(value[i]) chdir(path+"/alpr/latest/blobs") cvSaveImage("pic"+ str(count)+".png",moved) count+=1 avoid=classification(pixr,pixrm) blob_image = cvCreateImage(frame_size, 8, 1) cvSet(blob_image,0) for i in range(blob_count): if i not in avoid: my_enum_blob = myblobs.GetBlob(i) my_enum_blob.FillBlob(blob_image,CV_RGB(255,0,255),0,0) cvSaveImage("blob.jpg",blob_image) return