def detect_faces(self, img_grey): """ Detect faces within an image, then draw around them. The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned for accurate yet slow object detection. For a faster operation on real video images the settings are: scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=<minimum possible face size """ min_size = cv.cvSize(20, 20) self.image_scale = 1.3 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 # Create a small image for better performance small_size = cv.cvSize(cv.cvRound(img_grey.width / self.image_scale), cv.cvRound(img_grey.height / self.image_scale)) small_img = cv.cvCreateImage(small_size, 8, 1) cv.cvResize(img_grey, small_img, cv.CV_INTER_LINEAR) cv.cvEqualizeHist(small_img, small_img) cv.cvClearMemStorage(self.faces_storage) if (self.cascade): t = cv.cvGetTickCount() faces = cv.cvHaarDetectObjects(small_img, self.cascade, self.faces_storage, haar_scale, min_neighbors, haar_flags, min_size) t = cv.cvGetTickCount() - t cv.cvReleaseImage(small_img) #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.)); return faces
def detect_faces(self, img_grey): """ Detect faces within an image, then draw around them. The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned for accurate yet slow object detection. For a faster operation on real video images the settings are: scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=<minimum possible face size """ min_size = cv.cvSize(20,20) self.image_scale = 1.3 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 # Create a small image for better performance small_size = cv.cvSize(cv.cvRound(img_grey.width/self.image_scale),cv.cvRound(img_grey.height/self.image_scale)) small_img = cv.cvCreateImage(small_size, 8, 1) cv.cvResize(img_grey, small_img, cv.CV_INTER_LINEAR) cv.cvEqualizeHist(small_img, small_img) cv.cvClearMemStorage(self.faces_storage) if(self.cascade): t = cv.cvGetTickCount(); faces = cv.cvHaarDetectObjects(small_img, self.cascade, self.faces_storage, haar_scale, min_neighbors, haar_flags, min_size) t = cv.cvGetTickCount() - t cv.cvReleaseImage(small_img) #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.)); return faces
def _detect(image): """ Detects faces on `image` Parameters: @image: image file path Returns: [((x1, y1), (x2, y2)), ...] List of coordenates for top-left and bottom-right corner """ # the OpenCV API says this function is obsolete, but we can't # cast the output of cvLoad to a HaarClassifierCascade, so use # this anyways the size parameter is ignored capture = cvCreateFileCapture(image) if not capture: return [] frame = cvQueryFrame(capture) if not frame: return [] img = cvCreateImage(cvSize(frame.width, frame.height), IPL_DEPTH_8U, frame.nChannels) cvCopy(frame, img) # allocate temporary images gray = cvCreateImage((img.width, img.height), COPY_DEPTH, COPY_CHANNELS) width, height = (cvRound(img.width / IMAGE_SCALE), cvRound(img.height / IMAGE_SCALE)) small_img = cvCreateImage((width, height), COPY_DEPTH, COPY_CHANNELS) # convert color input image to grayscale cvCvtColor(img, gray, CV_BGR2GRAY) # scale input image for faster processing cvResize(gray, small_img, CV_INTER_LINEAR) cvEqualizeHist(small_img, small_img) cvClearMemStorage(STORAGE) coords = [] for haar_file in CASCADES: cascade = cvLoadHaarClassifierCascade(haar_file, cvSize(1, 1)) if cascade: faces = cvHaarDetectObjects(small_img, cascade, STORAGE, HAAR_SCALE, MIN_NEIGHBORS, HAAR_FLAGS, MIN_SIZE) or [] for face_rect in faces: # the input to cvHaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints x, y = face_rect.x, face_rect.y pt1 = (int(x * IMAGE_SCALE), int(y * IMAGE_SCALE)) pt2 = (int((x + face_rect.width) * IMAGE_SCALE), int((y + face_rect.height) * IMAGE_SCALE)) coords.append((pt1, pt2)) return coords
def resizeImage(self, image_location, ouput_location, size): """ resizes the image to a rectangle with the given size and saves it """ width = size height = size input_image = highgui.cvLoadImage(image_location, 1) # flag: >0 the loaded image is forced to be a 3-channel color image output_image = cv.cvCreateImage(cv.cvSize(cv.cvRound(width), cv.cvRound(height)), 8, 3); cv.cvResize(input_image, output_image, cv.CV_INTER_LINEAR); highgui.cvSaveImage(ouput_location, output_image) # save the image to file
def __normImage(self, img, length): #print "Generating norm image..." width = length height = length gray = cv.cvCreateImage(cv.cvSize(img.width,img.height), 8, 1); small_img = cv.cvCreateImage(cv.cvSize(cv.cvRound(width), cv.cvRound(height)), 8, 1 ); # convert color input image to grayscale cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY); # scale input image for faster processing cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR); cv.cvEqualizeHist(small_img, small_img); #cvClearMemStorage(self.storage); norm_image = small_img # save the 'normalized image' return norm_image
def detect_face(self, img): """ Detect faces within an image, then draw around them. The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned for accurate yet slow object detection. For a faster operation on real video images the settings are: scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=<minimum possible face size """ min_size = cv.cvSize(20,20) image_scale = 1.3 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 gray = cv.cvCreateImage(cv.cvSize(img.width,img.height), 8, 1) small_img = cv.cvCreateImage(cv.cvSize(cv.cvRound(img.width/image_scale), cv.cvRound(img.height/image_scale)), 8, 1) cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY) cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR) cv.cvEqualizeHist(small_img, small_img) cv.cvClearMemStorage(self.storage) if(self.cascade): t = cv.cvGetTickCount(); faces = cv.cvHaarDetectObjects(small_img, self.cascade, self.storage, haar_scale, min_neighbors, haar_flags, min_size) t = cv.cvGetTickCount() - t #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.)); if faces: for r in faces: pt1 = cv.cvPoint(int(r.x*image_scale), int(r.y*image_scale)) pt2 = cv.cvPoint(int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale)) cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255,0,0), 3, 8, 0) return img
def detect_face(self, img): """ Detect faces within an image, then draw around them. The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned for accurate yet slow object detection. For a faster operation on real video images the settings are: scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=<minimum possible face size """ min_size = cv.cvSize(20, 20) image_scale = 1.3 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 gray = cv.cvCreateImage(cv.cvSize(img.width, img.height), 8, 1) small_img = cv.cvCreateImage( cv.cvSize(cv.cvRound(img.width / image_scale), cv.cvRound(img.height / image_scale)), 8, 1) cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY) cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR) cv.cvEqualizeHist(small_img, small_img) cv.cvClearMemStorage(self.storage) if (self.cascade): t = cv.cvGetTickCount() faces = cv.cvHaarDetectObjects(small_img, self.cascade, self.storage, haar_scale, min_neighbors, haar_flags, min_size) t = cv.cvGetTickCount() - t #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.)); if faces: for r in faces: pt1 = cv.cvPoint(int(r.x * image_scale), int(r.y * image_scale)) pt2 = cv.cvPoint(int((r.x + r.width) * image_scale), int((r.y + r.height) * image_scale)) cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8, 0) return img
def returnEllipses(contours): ellipses = [] for c in contours.hrange(): count = c.total; if( count < 6 ): continue; PointArray = cv.cvCreateMat(1, count, cv.CV_32SC2) PointArray2D32f= cv.cvCreateMat( 1, count, cv.CV_32FC2) cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)); cv.cvConvert( PointArray, PointArray2D32f ) box = cv.CvBox2D() box = cv.cvFitEllipse2(PointArray2D32f); #cv.cvDrawContours(frame, c, cv.CV_RGB(255,255,255), cv.CV_RGB(255,255,255),0,1,8,cv.cvPoint(0,0)); center = cv.CvPoint() size = cv.CvSize() center.x = cv.cvRound(box.center.x); center.y = cv.cvRound(box.center.y); size.width = cv.cvRound(box.size.width*0.5); size.height = cv.cvRound(box.size.height*0.5); box.angle = -box.angle; ellipses.append({'center':center, 'size':size, 'angle':box.angle}) return ellipses
def hsv2rgb(hue): # convert the hue value to the corresponding rgb value sector_data = [[0, 2, 1], [1, 2, 0], [1, 0, 2], [2, 0, 1], [2, 1, 0], [0, 1, 2]] hue *= 0.1 / 3 sector = cv.cvFloor(hue) p = cv.cvRound(255 * (hue - sector)) if sector & 1: p ^= 255 rgb = {} rgb[sector_data[sector][0]] = 255 rgb[sector_data[sector][1]] = 0 rgb[sector_data[sector][2]] = p return cv.cvScalar(rgb[2], rgb[1], rgb[0], 0)
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width,img_orig.height), 8, 1) cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY) # Detect objects within the frame self.faces_storage = cv.cvCreateMemStorage(0) faces = self.detect_faces(img_grey) self.circles_storage = cv.cvCreateMemStorage(0) circles = self.detect_circles(img_grey) self.squares_storage = cv.cvCreateMemStorage(0) squares = self.detect_squares(img_grey, img_orig) self.lines_storage = cv.cvCreateMemStorage(0) lines = self.detect_lines(img_grey, img_orig) # Draw faces if faces: for face in faces: pt1, pt2 = self.face_points(face) cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255,0,0), 3, 8, 0) # Draw lines if lines: for line in lines: cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255,255,0), 3, 8) # Draw circles if circles: for circle in circles: cv.cvCircle(img_orig, cv.cvPoint(cv.cvRound(circle[0]),cv.cvRound(circle[1])),cv.cvRound(circle[2]),cv.CV_RGB(0,0,255),3,8,0) # Draw squares if squares: i = 0 while i<squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i+1]) pt.append(squares[i+2]) pt.append(squares[i+3]) ## draw the square as a closed polyline cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0,255,0), 3, cv.CV_AA, 0) i += 4 # Resize the image to display properly within the window # CV_INTER_NN - nearest-neigbor interpolation, # CV_INTER_LINEAR - bilinear interpolation (used by default) # CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation) # CV_INTER_CUBIC - bicubic interpolation. img_display = cv.cvCreateImage(cv.cvSize(self.width(),self.height()), 8, 3) cv.cvResize(img_orig, img_display, cv.CV_INTER_NN) img_pil = adaptors.Ipl2PIL(img_display) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
scale = 0. # scale the histograms cv.cvConvertScale(hist.bins, hist.bins, scale, 0) # clear the histogram image cv.cvSetZero(histimg) # compute the width for each bin do display bin_w = histimg.width / hdims for i in range(hdims): # for all the bins # get the value, and scale to the size of the hist image val = cv.cvRound( cv.cvGetReal1D(hist.bins, i) * histimg.height / 255) # compute the color color = hsv2rgb(i * 180. / hdims) # draw the rectangle in the wanted color cv.cvRectangle(histimg, cv.cvPoint(i * bin_w, histimg.height), cv.cvPoint((i + 1) * bin_w, histimg.height - val), color, -1, 8, 0) # Make the sweet negative selection box if mouse_select_object and mouse_selection.width > 0 and mouse_selection.height > 0: a = cv.cvGetSubRect(frame, mouse_selection) cv.cvXorS(a, cv.cvScalarAll(255), a) # Take the negative of the image.. del a
cv.cvCanny(grayimage, cannyedges, 150, 450 , 3) #This is the line that throws the error storage = cv.cvCreateMat(50, 1, cv.CV_32FC3) cv.cvSetZero(storage) #circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 2, grayimage.height/4, 150, 40, long(sys.argv[2]), long(sys.argv[3])) #circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 1, grayimage.height, 200, 40, long(sys.argv[2]), long(sys.argv[3])) circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 1, grayimage.width, 150, 40, long(sys.argv[2]), grayimage.width) print storage for i in storage: print i[0], i[1], i[2] center = cv.cvRound(i[0]), cv.cvRound(i[1]) radius = cv.cvRound(i[2]) cv.cvCircle(image, (center), radius, cv.CV_RGB(255, 0, 0), 1, cv.CV_AA, 0 ) cv.cvCircle(image, (center), 10, cv.CV_RGB(255, 0, 0), -1, cv.CV_AA, 0 ) cv.cvCircle(cannyedges, (center), radius, cv.CV_RGB(255, 255, 255), 1, cv.CV_AA, 0 ) # for c in range(0,3): #v = cv.cvRound( cv.cvmGet(storage, i, c) ) #if v != 0.0: #print v #cv.cvSmooth(grayimage, grayimage,cv.CV_MEDIAN, 3, 3) #Display image highgui.cvShowImage ('GrayScale', grayimage)
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width, img_orig.height), 8, 1) cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY) # Detect objects within the frame self.faces_storage = cv.cvCreateMemStorage(0) faces = self.detect_faces(img_grey) self.circles_storage = cv.cvCreateMemStorage(0) circles = self.detect_circles(img_grey) self.squares_storage = cv.cvCreateMemStorage(0) squares = self.detect_squares(img_grey, img_orig) self.lines_storage = cv.cvCreateMemStorage(0) lines = self.detect_lines(img_grey, img_orig) # Draw faces if faces: for face in faces: pt1, pt2 = self.face_points(face) cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8, 0) # Draw lines if lines: for line in lines: cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255, 255, 0), 3, 8) # Draw circles if circles: for circle in circles: cv.cvCircle( img_orig, cv.cvPoint(cv.cvRound(circle[0]), cv.cvRound(circle[1])), cv.cvRound(circle[2]), cv.CV_RGB(0, 0, 255), 3, 8, 0) # Draw squares if squares: i = 0 while i < squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i + 1]) pt.append(squares[i + 2]) pt.append(squares[i + 3]) ## draw the square as a closed polyline cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0) i += 4 # Resize the image to display properly within the window # CV_INTER_NN - nearest-neigbor interpolation, # CV_INTER_LINEAR - bilinear interpolation (used by default) # CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation) # CV_INTER_CUBIC - bicubic interpolation. img_display = cv.cvCreateImage(cv.cvSize(self.width(), self.height()), 8, 3) cv.cvResize(img_orig, img_display, cv.CV_INTER_NN) img_pil = adaptors.Ipl2PIL(img_display) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
def on_trackbar1(position): global pos1 global pos2 global pos3 global pos4 global pos5 global pos6 global pos7 global img global gray global edges print print position, pos2, pos3, pos4, pos5, pos6, pos7 temp = cv.cvCloneImage(img) gray = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1) edges = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1) dst = cv.cvCreateImage( cv.cvSize(256,256), 8, 3 ) src = cv.cvCloneImage(img) src2 = cv.cvCreateImage( cv.cvGetSize(src), 8, 3 ); cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY) cv.cvCanny(gray, edges, position, pos2, 3) cv.cvSmooth(edges, edges, cv.CV_GAUSSIAN, 9, 9) storage = cv.cvCreateMat(50, 1, cv.CV_32FC3) cv.cvSetZero(storage) try: circles = cv.cvHoughCircles(gray, storage, cv.CV_HOUGH_GRADIENT, 1, float(pos3), float(pos2), float(pos4), long(pos5),long(pos6) ) #print storage for i in storage: print "Center: ", i[0], i[1], " Radius: ", i[2] center = cv.cvRound(i[0]), cv.cvRound(i[1]) radius = cv.cvRound(i[2]) cv.cvCircle(temp, (center), radius, cv.CV_RGB(255, 0, 0), 1, cv.CV_AA, 0 ) cv.cvCircle(edges, (center), radius, cv.CV_RGB(255, 255, 255), 1, cv.CV_AA, 0 ) if radius > 200: print "Circle found over 200 Radius" center_crop_topleft = (center[0]-(radius - pos7)), (center[1]-(radius - pos7)) center_crop_bottomright = (center[0]+(radius - pos7)), (center[1]+(radius - pos7)) print "crop top left: ", center_crop_topleft print "crop bottom right: ", center_crop_bottomright center_crop = cv.cvGetSubRect(src, (center_crop_topleft[0], center_crop_topleft[1] , (center_crop_bottomright[0] - center_crop_topleft[0]), (center_crop_bottomright[1] - center_crop_topleft[1]) )) #center_crop = cv.cvGetSubRect(src, (50, 50, radius/2, radius/2)) cvShowImage( "center_crop", center_crop ) print "center_crop created" #mark found circle's center with blue point and blue circle of pos 7 radius cv.cvCircle(temp ,(center), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) cv.cvCircle(temp ,(center), (radius - pos7), cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) #cvLogPolar(src, dst, (center), 48, CV_INTER_LINEAR +CV_WARP_FILL_OUTLIERS ) #this will draw a smaller cirle outlining the center circle #pos7 = int(pos7 /2.5) #cv.cvCircle(dst ,(img_size.width-pos7, 0), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) #cv.cvLine(dst, (img_size.width-pos7-1, 0), (img_size.width-pos7-1, img_size.height), cv.CV_RGB(0, 0, 255),1,8,0) #cvShowImage( "log-polar", dst ) #print radius, (radius-pos7) #cropped = cv.cvCreateImage( (pos7, img_size.height), 8, 3) #cropped2 = cv.cvCreateImage( (pos7, img_size.height), 8, 3) #coin_edge_img = cv.cvGetSubRect(dst, (img_size.width-pos7, 0, pos7 ,img_size.height )) #to create the center cropped part of coin #img_size = cvGetSize(scr) #cvCopy(coin_edge_img, cropped) #cvSaveImage("temp.png", cropped) #im = Image.open("temp.png").rotate(90) #print "pil image size = ", im.size[0], im.size[1] #im = im.resize((im.size[0]*2, im.size[1]*2)) #print "pil image size = ", im.size #im.show() #im.save("temp2.png") cropped2 = highgui.cvLoadImage("temp2.png") #cvShowImage( "cropped", cropped2) except: print "Exception:", sys.exc_info()[0] print position, pos2, pos3, pos4, pos5, pos6, pos7 pass highgui.cvShowImage("edges", edges) #cvShowImage( "log-polar", dst ) cvShowImage(wname, temp)
def process_image( slider_pos ): """ Define trackbar callback functon. This function find contours, draw it and approximate it by ellipses. """ stor = cv.cvCreateMemStorage(0); # Threshold the source image. This needful for cv.cvFindContours(). cv.cvThreshold( image03, image02, slider_pos, 255, cv.CV_THRESH_BINARY ); # Find all contours. nb_contours, cont = cv.cvFindContours (image02, stor, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) # Clear images. IPL use. cv.cvZero(image02); cv.cvZero(image04); # This cycle draw all contours and approximate it by ellipses. for c in cont.hrange(): count = c.total; # This is number point in contour # Number point must be more than or equal to 6 (for cv.cvFitEllipse_32f). if( count < 6 ): continue; # Alloc memory for contour point set. PointArray = cv.cvCreateMat(1, count, cv.CV_32SC2) PointArray2D32f= cv.cvCreateMat( 1, count, cv.CV_32FC2) # Get contour point set. cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)); # Convert CvPoint set to CvBox2D32f set. cv.cvConvert( PointArray, PointArray2D32f ) box = cv.CvBox2D() # Fits ellipse to current contour. box = cv.cvFitEllipse2(PointArray2D32f); # Draw current contour. cv.cvDrawContours(image04, c, cv.CV_RGB(255,255,255), cv.CV_RGB(255,255,255),0,1,8,cv.cvPoint(0,0)); # Convert ellipse data from float to integer representation. center = cv.CvPoint() size = cv.CvSize() center.x = cv.cvRound(box.center.x); center.y = cv.cvRound(box.center.y); size.width = cv.cvRound(box.size.width*0.5); size.height = cv.cvRound(box.size.height*0.5); box.angle = -box.angle; # Draw ellipse. cv.cvEllipse(image04, center, size, box.angle, 0, 360, cv.CV_RGB(0,0,255), 1, cv.CV_AA, 0); # Show image. HighGUI use. highgui.cvShowImage( "Result", image04 );
scale = 0. # scale the histograms cv.cvConvertScale (hist.bins, hist.bins, scale, 0) # clear the histogram image cv.cvSetZero (histimg) # compute the width for each bin do display bin_w = histimg.width / hdims for i in range (hdims): # for all the bins # get the value, and scale to the size of the hist image val = cv.cvRound (cv.cvGetReal1D (hist.bins, i) * histimg.height / 255) # compute the color color = hsv2rgb (i * 180. / hdims) # draw the rectangle in the wanted color cv.cvRectangle (histimg, cv.cvPoint (i * bin_w, histimg.height), cv.cvPoint ((i + 1) * bin_w, histimg.height - val), color, -1, 8, 0) # we can now display the images highgui.cvShowImage ('Camera', frame) highgui.cvShowImage ('Histogram', histimg) # handle events
def process_image(slider_pos): """ Define trackbar callback functon. This function find contours, draw it and approximate it by ellipses. """ stor = cv.cvCreateMemStorage(0) # Threshold the source image. This needful for cv.cvFindContours(). cv.cvThreshold(image03, image02, slider_pos, 255, cv.CV_THRESH_BINARY) # Find all contours. nb_contours, cont = cv.cvFindContours(image02, stor, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint(0, 0)) # Clear images. IPL use. cv.cvZero(image02) cv.cvZero(image04) # This cycle draw all contours and approximate it by ellipses. for c in cont.hrange(): count = c.total # This is number point in contour # Number point must be more than or equal to 6 (for cv.cvFitEllipse_32f). if (count < 6): continue # Alloc memory for contour point set. PointArray = cv.cvCreateMat(1, count, cv.CV_32SC2) PointArray2D32f = cv.cvCreateMat(1, count, cv.CV_32FC2) # Get contour point set. cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)) # Convert CvPoint set to CvBox2D32f set. cv.cvConvert(PointArray, PointArray2D32f) box = cv.CvBox2D() # Fits ellipse to current contour. box = cv.cvFitEllipse2(PointArray2D32f) # Draw current contour. cv.cvDrawContours(image04, c, cv.CV_RGB(255, 255, 255), cv.CV_RGB(255, 255, 255), 0, 1, 8, cv.cvPoint(0, 0)) # Convert ellipse data from float to integer representation. center = cv.CvPoint() size = cv.CvSize() center.x = cv.cvRound(box.center.x) center.y = cv.cvRound(box.center.y) size.width = cv.cvRound(box.size.width * 0.5) size.height = cv.cvRound(box.size.height * 0.5) box.angle = -box.angle # Draw ellipse. cv.cvEllipse(image04, center, size, box.angle, 0, 360, cv.CV_RGB(0, 0, 255), 1, cv.CV_AA, 0) # Show image. HighGUI use. highgui.cvShowImage("Result", image04)