def findEdges(original, out, threshold1 = 100, threshold2 = None): """Return a new edge detected image with a specified threshold""" warnings.warn("Use findBWEdges instead unless you really need colored edges.", DeprecationWarning) #Define threshold2 if threshold2 == None: threshold2 = threshold1 * 3 # Create two pictures with only one channel for a b/w copy # and one for storring the edges found in the b/w picture gray = cv.cvCreateImage(cv.cvGetSize(original), 8, 1) edge = cv.cvCreateImage(cv.cvGetSize(original), 8, 1) # Create the b/w copy of the original cv.cvCvtColor(original, gray, cv.CV_BGR2GRAY) # Blur the b/w copy, but put the result into edge pic cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0) # Negate the b/w copy of original with newly blurred # b/w copy. This will make egdes stand out cv.cvNot(gray, edge) # Run an edge-finding algorithm called 'Canny' # It will analyse the first argument and store the # resulting picture in the second argument cv.cvCanny(gray, edge, threshold1, threshold2) # We initialize our out-image to black cv.cvSetZero(out) # Finally, we use the found edges, which are b/w, as # a mask for copying the colored edges from the original # to the out-image cv.cvCopy(original, out, edge)
def read(self) : frame=self.input.read() cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv.cvCvtColor(frame,cv_rs,cv.CV_RGB2GRAY) frame = cv_rs if self.enabled : # I think these functions are too specialized for transforms cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 ) cv.cvErode(frame, frame, None, 1) cv.cvDilate(frame, frame, None, 1) num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) if contours is None : return [] else : contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 ); if contours is None : return [] else : final_contours = [] for c in contours.hrange() : area = abs(cv.cvContourArea(c)) #self.debug_print('Polygon Area: %f'%area) if area >= self.min_area : lst = [] for pt in c : lst.append((pt.x,pt.y)) final_contours.append(lst) contours = contours.h_next return final_contours return []
def medianBlur(image, filterSize=43): """Blur an image with a particular strength filter. Note: Changes the original image, and returns the same pointer. >>> from median_opencv import medianBlur >>> from opencv import highgui >>> image_filename = "/usr/share/doc/opencv-doc/examples/c/lena.jpg" >>> i = highgui.cvLoadImage(image_filename) >>> blurred_image = medianBlur(i) >>> highgui.cvSaveImage("blurred_imag_python_opencv_median.jpg", blurred_image) 1 # Using pylab we can visually compare the pre and post filtered images: >>> from pylab import show, imread, imshow, figure, subplot, sum >>> i = imread("/usr/share/doc/opencv-doc/examples/c/lena.jpg") >>> b = imread("blurred_imag_python_opencv_median.jpg") >>> assert sum(i.flatten()) is not sum(b.flatten()) >>> diff = b - i >>> assert sum(diff) > 0 # assert that the image changed >>> plot_ref = subplot(1,2,1) >>> plot_ref = imshow(b) >>> plot_ref = subplot(1,2,2) >>> plot_ref = imshow(i) >>> show() # show the images """ cv.cvSmooth(image, image, cv.CV_MEDIAN, filterSize) return image
def analyzeCut(scaleImage, edgeImage, cut): """Extract the interesting features respecting the cut""" # Set up constraints constraints = regionSelector.Constraints(cv.cvGetSize(scaleImage), cut, margin, superMargin, 0.002, 0.25) # Create temporary images blurImage = cv.cvCreateImage(cv.cvGetSize(scaleImage), 8, 3) workImage = cv.cvCreateImage(cv.cvGetSize(scaleImage), 8, 3) # Create a blurred copy of the original cv.cvSmooth(scaleImage, blurImage, cv.CV_BLUR, 3, 3, 0) # Superimpose the edges onto the blured image cv.cvNot(edgeImage, edgeImage) cv.cvCopy(blurImage, workImage, edgeImage) # Get the edges back to white cv.cvNot(edgeImage, edgeImage) # We're done with the blurred image now cv.cvReleaseImage(blurImage) # Retrive the regions touching the cut component_dictionary = featureDetector.ribbonFloodFill(scaleImage, edgeImage, workImage, cut, margin, lo, up) # Clean up cv.cvReleaseImage(workImage) # Prune components newComponents = regionSelector.pruneRegions(component_dictionary, constraints) # Return the dictionary of accepted components #transformer.translateBoundingBoxes(newComponents, 1) return newComponents
def gaussianBlur(image, filterSize=43, sigma=opencvFilt2sigma(43)): """Blur an image with a particular strength filter. Default is 43, 139 gives a very strong blur, but takes a while """ # Carry out the filter operation cv.cvSmooth(image, image, cv.CV_GAUSSIAN, filterSize, 0, sigma) return image
def read(self) : frame=self.input.read() if self.debug : raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels) cv.cvCopy(frame,raw_frame,None) self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB') if self.enabled : cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) # convert color cv.cvCvtColor(frame,cv_rs,cv.CV_BGR2GRAY) # invert the image cv.cvSubRS(cv_rs, 255, cv_rs, None); # threshold the image frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv.cvThreshold(cv_rs, frame, self.threshold, 255, cv.CV_THRESH_BINARY) if self.debug : thresh_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,3) cv.cvCvtColor(frame,thresh_frame,cv.CV_GRAY2RGB) self.thresh_frame_surface=pygame.image.frombuffer(thresh_frame.imageData,(frame.width,frame.height),'RGB') # I think these functions are too specialized for transforms cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 ) cv.cvErode(frame, frame, None, 1) cv.cvDilate(frame, frame, None, 1) num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0)) if contours is None : return [] else : contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 ); if contours is None : return [] else : final_contours = [] for c in contours.hrange() : area = abs(cv.cvContourArea(c)) #self.debug_print('Polygon Area: %f'%area) if area >= self.min_area : lst = [] for pt in c : lst.append((pt.x,pt.y)) final_contours.append(lst) contours = contours.h_next return final_contours return []
def analyzeCut(original, edgeImage, cut, settings, showBlobs=False): """Extract the interesting features in the vicinity of a given cut""" # Get all data from the settings lo = settings.lo up = settings.up # Set up the margin with respect to the cut margin = marginCalculator.getPixels(original, cut, settings.marginPercentage) superMargin = 0 # ^^ We don't use superMargin # Set up constraints constraints = regionSelector.Constraints(cv.cvGetSize(original), cut, margin, superMargin, 0.002, 0.25) # Create temporary images blurImage = cv.cvCreateImage(cv.cvGetSize(original), 8, 3) workImage = cv.cvCreateImage(cv.cvGetSize(original), 8, 3) # Create a blurred copy of the original cv.cvSmooth(original, blurImage, cv.CV_BLUR, 3, 3, 0) # Superimpose the edges onto the blured image cv.cvNot(edgeImage, edgeImage) cv.cvCopy(blurImage, workImage, edgeImage) # We're done with the blurred image now cv.cvReleaseImage(blurImage) # Get the edges back to white cv.cvNot(edgeImage, edgeImage) # Retrive the regions touching the cut component_dictionary = featureDetector.ribbonFloodFill(original, edgeImage, workImage, cut, margin, lo, up) #start expanded # Prune components BEFORE we delete the workImage tmpnewComponents = regionSelector.pruneExpandedRegions(component_dictionary, constraints) newComponents = regionSelector.pruneExpandedRagionsto(tmpnewComponents, constraints, cut, workImage) # Clean up only if we do not return the image if not showBlobs: cv.cvReleaseImage(workImage) # Return the dictionary of accepted components or both if not showBlobs: return newComponents else: return (workImage, newComponents)
def on_trackbar (position): cv.cvSmooth (gray, edge, cv.CV_BLUR, 3, 3, 0) cv.cvNot (gray, edge) # run the edge dector on gray scale cv.cvCanny (gray, edge, position, position * 3, 3) # reset cv.cvSetZero (col_edge) # copy edge points cv.cvCopy (image, col_edge, edge) # show the image highgui.cvShowImage (win_name, col_edge)
def on_trackbar(position): cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0) cv.cvNot(gray, edge) # run the edge dector on gray scale cv.cvCanny(gray, edge, position, position * 3, 3) # reset cv.cvSetZero(col_edge) # copy edge points cv.cvCopy(image, col_edge, edge) # show the image highgui.cvShowImage(win_name, col_edge)
def on_trackbar (position): #下面两句应该是没什么用的 cv.cvSmooth (gray, edge, cv.CV_BLUR, 3, 3, 0) #图像平滑 cv.cvNot (gray, edge) #计算数组元素的按位取反 # run the edge dector on gray scale cv.cvCanny (gray, edge, position, position * 3, 3) #采用 Canny 算法做边缘检测 # reset cv.cvSetZero (col_edge) #清空数组 # copy edge points cv.cvCopy (image, col_edge, edge) #参数edge影响拷贝的结果 # show the image highgui.cvShowImage (win_name, col_edge)
def detect_faces_on(path): faces = [] image = cvLoadImage(path) # convert to grayscale for faster results grayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1) cvCvtColor(image, grayscale, CV_BGR2GRAY) # smooth picture for better results cvSmooth(grayscale, grayscale, CV_GAUSSIAN, 3, 3) storage = cvCreateMemStorage(0) cvClearMemStorage(storage) cvEqualizeHist(grayscale, grayscale) cascade_files = [ # ('/usr/share/opencv/haarcascades/haarcascade_eye_tree_eyeglasses.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_lowerbody.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_mcs_mouth.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_profileface.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_eye.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_mcs_eyepair_big.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_mcs_nose.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_righteye_2splits.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt2.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_fullbody.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_mcs_eyepair_small.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_mcs_righteye.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_upperbody.xml', (50, 50)), ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt_tree.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_lefteye_2splits.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_mcs_lefteye.xml', (50, 50)), # ('/usr/share/opencv/haarcascades/haarcascade_mcs_upperbody.xml', (50, 50)), # ('parojos_22_5.1.xml', (22, 5)), # ('Mouth.xml', (22, 15)), ] for cascade_file, cascade_sizes in cascade_files: cascade = cvLoadHaarClassifierCascade(os.path.join(cascade_file), cvSize(1, 1)) faces += cvHaarDetectObjects(grayscale, cascade, storage, HAAR_SCALE, HAAR_NEIGHBORS, CV_HAAR_DO_CANNY_PRUNING, cvSize(*cascade_sizes)) return [{'x': f.x, 'y': f.y, 'w': f.width, 'h': f.height} for f in faces]
def findBWEdges(original, out, threshold1, threshold2): """Identical with findEdges except that this returns white edges on a black background. We really don't need colored edges any longer. This also makes it easy to to a manual merge of edge and blur picture.""" if threshold2 == None: threshold2 = threshold1 * 3 gray = cv.cvCreateImage(cv.cvGetSize(original), 8, 1) cv.cvCvtColor(original, gray, cv.CV_BGR2GRAY) cv.cvSmooth(gray, out, cv.CV_BLUR, 3, 3, 0) cv.cvNot(gray, out) cv.cvCanny(gray, out, threshold1, threshold2) return out
def __findedge(self, filename): tmpimg = highgui.cvLoadImage (filename) self.img = cv.cvCreateImage(cv.cvSize(int(tmpimg.width * self.enlarge), int(tmpimg.height * self.enlarge)), 8, 3) cv.cvResize(tmpimg, self.img, cv.CV_INTER_LINEAR) if (self.drawimage): self.drawimg = cv.cvCloneImage(self.img) else: self.drawimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3) greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1) cv.cvCvtColor(self.img, greyimg, cv.CV_BGR2GRAY) self.allcurve = [] for i in range(80, 200, 20): bimg = cv.cvCloneImage(greyimg) cv.cvSmooth(bimg, bimg, cv.CV_MEDIAN, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BILATERAL, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9) cv.cvThreshold(greyimg, bimg, i, 255, cv.CV_THRESH_BINARY) self.__findcurve(bimg)
def threshold_image(image, n=[]): """Record the first 5 images to get a background, then diff current frame with the last saved frame. """ if len(n) < 5: # n[4] will be our background # First capture a few images n.append(cv.cvCloneMat(image)) if len(n) == 5: # last time here # could do averaging here. pass return image original = n[4] differenceImage = cv.cvCloneMat( image ) cv.cvAbsDiff( image, original, differenceImage ) """The threshold value determines the amount of "Change" required before something will show up""" thresholdValue = 50 # 32 cv.cvThreshold( differenceImage, differenceImage, thresholdValue, 255, cv.CV_THRESH_BINARY ) # Convert to one channel gray = cv.cvCreateImage( cv.cvGetSize(differenceImage), 8, 1 ) cv.cvCvtColor( differenceImage, gray, cv.CV_BGR2GRAY ) # Use median filter to remove salt and pepper noise. cv.cvSmooth(gray, gray, cv.CV_MEDIAN, 15) # Dilate and the threshold image # It adds a border to the object. #cv.cvDilate(gray,gray, None, 9) # Add a bit of Blur to the threshold mask cv.cvSmooth(gray, gray, cv.CV_GAUSSIAN, 5) result = cv.cvCloneMat( image) cv.cvSetZero(result) cv.cvAnd(image,image, result, gray) return result
def threshold_image(image, n=[]): """Record the first 5 images to get a background, then diff current frame with the last saved frame. """ if len(n) < 5: # n[4] will be our background # First capture a few images n.append(cv.cvCloneMat(image)) if len(n) == 5: # last time here # could do averaging here. pass return image original = n[4] differenceImage = cv.cvCloneMat(image) cv.cvAbsDiff(image, original, differenceImage) """The threshold value determines the amount of "Change" required before something will show up""" thresholdValue = 50 # 32 cv.cvThreshold(differenceImage, differenceImage, thresholdValue, 255, cv.CV_THRESH_BINARY) # Convert to one channel gray = cv.cvCreateImage(cv.cvGetSize(differenceImage), 8, 1) cv.cvCvtColor(differenceImage, gray, cv.CV_BGR2GRAY) # Use median filter to remove salt and pepper noise. cv.cvSmooth(gray, gray, cv.CV_MEDIAN, 15) # Dilate and the threshold image # It adds a border to the object. #cv.cvDilate(gray,gray, None, 9) # Add a bit of Blur to the threshold mask cv.cvSmooth(gray, gray, cv.CV_GAUSSIAN, 5) result = cv.cvCloneMat(image) cv.cvSetZero(result) cv.cvAnd(image, image, result, gray) return result
def __findContour(self, filename): #find the contour of images, and save all points in self.vKeyPoints self.img = highgui.cvLoadImage (filename) self.grayimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1) self.drawimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,3) cv.cvCvtColor (self.img, self.grayimg, cv.CV_BGR2GRAY) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9) cv.cvThreshold( self.grayimg, self.grayimg, self.threshold, self.threshold +100, cv.CV_THRESH_BINARY ) cv.cvZero(self.drawimg) storage = cv.cvCreateMemStorage(0) nb_contours, cont = cv.cvFindContours (self.grayimg, storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_NONE, cv.cvPoint (0,0)) cv.cvDrawContours (self.drawimg, cont, cv.cvScalar(255,255,255,0), cv.cvScalar(255,255,255,0), 1, 1, cv.CV_AA, cv.cvPoint (0, 0)) self.allcurve = [] idx = 0 for c in cont.hrange(): PointArray = cv.cvCreateMat(1, c.total , cv.CV_32SC2) PointArray2D32f= cv.cvCreateMat( 1, c.total , cv.CV_32FC2) cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX)) fpoints = [] for i in range(c.total): kp = myPoint() kp.x = cv.cvGet2D(PointArray,0, i)[0] kp.y = cv.cvGet2D(PointArray,0, i)[1] kp.index = idx idx += 1 fpoints.append(kp) self.allcurve.append(fpoints) self.curvelength = idx
def detect(image): image_size = cv.cvGetSize(image) # create grayscale version grayscale = cv.cvCreateImage(image_size, 8, 1) cv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY) # create storage storage = cv.cvCreateMemStorage(0) cv.cvClearMemStorage(storage) # equalize histogram cv.cvEqualizeHist(grayscale, grayscale) # detect objects cascade = cv.cvLoadHaarClassifierCascade('haarcascade_frontalface_alt.xml', cv.cvSize(1,1)) faces = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(100, 100)) if faces: for i in faces: r = image[int(i.y):int(i.y+i.height),int(i.x):int(i.x+i.width)] cv.cvSmooth(r,r,cv.CV_BLUR,51,51)
def __findedge(self, filename): tmpimg = highgui.cvLoadImage(filename) self.img = cv.cvCreateImage( cv.cvSize(int(tmpimg.width * self.enlarge), int(tmpimg.height * self.enlarge)), 8, 3) cv.cvResize(tmpimg, self.img, cv.CV_INTER_LINEAR) if (self.drawimage): self.drawimg = cv.cvCloneImage(self.img) else: self.drawimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3) greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8, 1) cv.cvCvtColor(self.img, greyimg, cv.CV_BGR2GRAY) self.allcurve = [] for i in range(80, 200, 20): bimg = cv.cvCloneImage(greyimg) cv.cvSmooth(bimg, bimg, cv.CV_MEDIAN, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BILATERAL, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9) cv.cvThreshold(greyimg, bimg, i, 255, cv.CV_THRESH_BINARY) self.__findcurve(bimg)
def read(self): frame = self.input.read() if self.debug : raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels) cv.cvCopy(frame,raw_frame,None) self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB') if self.enabled : cvt_red = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1) cv.cvSplit(frame,None,None,cvt_red,None) if self.debug : red_frame = cv.cvCreateImage(cv.cvSize(cvt_red.width,cvt_red.height),cvt_red.depth,3) cv.cvMerge(cvt_red,None,None,None,red_frame) self.red_frame_surface = pygame.image.frombuffer(red_frame.imageData,(cvt_red.width,cvt_red.height),'RGB') # I think these functions are too specialized for transforms cv.cvSmooth(cvt_red,cvt_red,cv.CV_GAUSSIAN,3, 0, 0, 0 ) cv.cvErode(cvt_red, cvt_red, None, 1) cv.cvDilate(cvt_red, cvt_red, None, 1) if self.debug : thresh_frame = cv.cvCreateImage(cv.cvSize(cvt_red.width,cvt_red.height),cvt_red.depth,3) cv.cvMerge(cvt_red,None,None,None,thresh_frame) self.thresh_frame_surface = pygame.image.frombuffer(cvt_red.imageData,(cvt_red.width,cvt_red.height),'RGB') cvpt_min = cv.cvPoint(0,0) cvpt_max = cv.cvPoint(0,0) t = cv.cvMinMaxLoc(cvt_red,cvpt_min,cvpt_max) print t if cvpt_max.x == 0 and cvpt_max.y == 0 : return [] return [(cvpt_max.x,cvpt_max.y)]
img_s = cv.cvCreateImage(size, 8, 1) img_v = cv.cvCreateImage(size, 8, 1) thresh_mask = cv.cvCreateImage(size, 8, 1) hist_hue_img = cv.cvCreateImage((int(h_bins * scalewidth), scaleheight), 8, 3) hist_val_img = cv.cvCreateImage((int(v_bins * scalewidth), scaleheight), 8, 3) output_mask = cv.cvCreateImage(size, 8, 1) while True: img = highgui.cvQueryFrame(cap) cv.cvZero(img_h) cv.cvZero(img_s) cv.cvZero(img_v) cv.cvZero(thresh_mask) highgui.cvShowImage("Input", img) # 5x5 Gaussian Blur cv.cvSmooth(img, img, cv.CV_GAUSSIAN, 5, 5) # convert to HSV cv.cvCvtColor(img, img, cv.CV_BGR2HSV) # threshold bad values cv.cvInRangeS(img, hsv_min, hsv_max, thresh_mask) cv.cvAnd(thresh_mask, mask_bw, thresh_mask) # Hue(0,180), Saturation(0,255), Value(0,255) cv.cvSplit(img, img_h, img_s, img_v, 0) # calculate histogram cv.cvCalcHist(img_h, h_hue, 0, thresh_mask) cv.cvCalcHist(img_s, h_sat, 0, thresh_mask) cv.cvCalcHist(img_v, h_val, 0, thresh_mask)
# capture the current image frame = highgui.cvQueryFrame(capture) highgui.cvShowImage('Originale', frame) if frame is None: # no image captured... end the processing break ################ traitement de l'image ################ cv.cvCvtColor(frame, frameGray, cv.CV_BGR2GRAY) # niveau de gris cv.cvSub(frameGray, frameGrayBg, framewithoutbg) # soustraction du background cv.cvMul(framewithoutbg, framewithoutbg, framemul, get_gain()) # amplification cv.cvSmooth(framemul, framelisser1, cv.CV_BLUR, param_liss[0], param2_liss[0]) # lissage if first == 0: # "moyenne" sur deux image cv.cvAnd(framelisser1, framelisser2, framelisser) if first == 1: framelisser = cv.cvCloneImage(framelisser1) framelisser2 = cv.cvCloneImage(framelisser1) cv.cvThreshold(framelisser, frameBin, get_seuil(), float(255), cv.CV_THRESH_BINARY) # binaristaion de l image ################ run detection ################ zone_active = zoneActivePremier(zone_active, frameBin) if len(zone_active) == 0: centrePointeur = []
edgeDetector.findBWEdges(image, edges, threshold1, threshold2) #out = cv.cvCreateImage(cv.cvGetSize(image), 8, 1) #cv.cvCvtColor(image, out, cv.CV_BGR2GRAY) print "Finding the golden means in the picture" lines = lib.findGoldenMeans(cv.cvGetSize(image)) #lines = lib.findMeans(cv.cvGetSize(image), lib.PHI) print "Test plot and line scanner methods" points = lineScanner.naiveBWLineScanner(edges, lines[0]) #cv.cvSmooth(out, out, cv.CV_MEDIAN, 7, 7, 0) cv.cvSmooth(image, blurImage, cv.CV_BLUR, 3, 3, 0) #cv.cvSmooth(out, out, cv.CV_GAUSSIAN, 7, 7, 0) #out = blurImage # Superimpose the edges onto the blured image cv.cvNot(edges, edges) cv.cvCopy(blurImage, out, edges) # We're done with the blurred image now #cv.cvReleaseImage(blurImage) #print points[:0] cut = lines[1] margin = marginCalculator.getPixels(image, cut, 0.024) component_dictionary = featureDetector.ribbonFloodFill(image, edges, out, cut, margin, lo, up) #featureDetector.floodFillLine(image, out, points, cut, lo, up, {})
def compute_saliency(image): global thresh global scale saliency_scale = int(math.pow(2,scale)); bw_im1 = cv.cvCreateImage(cv.cvGetSize(image), cv.IPL_DEPTH_8U,1) cv.cvCvtColor(image, bw_im1, cv.CV_BGR2GRAY) bw_im = cv.cvCreateImage(cv.cvSize(saliency_scale,saliency_scale), cv.IPL_DEPTH_8U,1) cv.cvResize(bw_im1, bw_im) highgui.cvShowImage("BW", bw_im) realInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1); imaginaryInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1); complexInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 2); cv.cvScale(bw_im, realInput, 1.0, 0.0); cv.cvZero(imaginaryInput); cv.cvMerge(realInput, imaginaryInput, None, None, complexInput); dft_M = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.height - 1 ); dft_N = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.width - 1 ); dft_A = cv.cvCreateMat( dft_M, dft_N, cv.CV_32FC2 ); image_Re = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); image_Im = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); # copy A to dft_A and pad dft_A with zeros tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height)); cv.cvCopy( complexInput, tmp, None ); if(dft_A.width > bw_im.width): tmp = cv.cvGetSubRect( dft_A, cv.cvRect(bw_im.width,0, dft_N - bw_im.width, bw_im.height)); cv.cvZero( tmp ); cv.cvDFT( dft_A, dft_A, cv.CV_DXT_FORWARD, complexInput.height ); cv.cvSplit( dft_A, image_Re, image_Im, None, None ); # Compute the phase angle image_Mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); image_Phase = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); #compute the phase of the spectrum cv.cvCartToPolar(image_Re, image_Im, image_Mag, image_Phase, 0) log_mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); cv.cvLog(image_Mag, log_mag) #Box filter the magnitude, then take the difference image_Mag_Filt = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); filt = cv.cvCreateMat(3,3, cv.CV_32FC1); cv.cvSet(filt,cv.cvScalarAll(-1.0/9.0)) cv.cvFilter2D(log_mag, image_Mag_Filt, filt, cv.cvPoint(-1,-1)) cv.cvAdd(log_mag, image_Mag_Filt, log_mag, None) cv.cvExp(log_mag, log_mag) cv.cvPolarToCart(log_mag, image_Phase, image_Re, image_Im,0); cv.cvMerge(image_Re, image_Im, None, None, dft_A) cv.cvDFT( dft_A, dft_A, cv.CV_DXT_INVERSE, complexInput.height) tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height)); cv.cvCopy( tmp, complexInput, None ); cv.cvSplit(complexInput, realInput, imaginaryInput, None, None) min, max = cv.cvMinMaxLoc(realInput); #cv.cvScale(realInput, realInput, 1.0/(max-min), 1.0*(-min)/(max-min)); cv.cvSmooth(realInput, realInput); threshold = thresh/100.0*cv.cvAvg(realInput)[0] cv.cvThreshold(realInput, realInput, threshold, 1.0, cv.CV_THRESH_BINARY) tmp_img = cv.cvCreateImage(cv.cvGetSize(bw_im1),cv.IPL_DEPTH_32F, 1) cv.cvResize(realInput,tmp_img) cv.cvScale(tmp_img, bw_im1, 255,0) return bw_im1
def on_trackbar1(position): global pos1 global pos2 global pos3 global pos4 global pos5 global pos6 global pos7 global img global gray global edges print print position, pos2, pos3, pos4, pos5, pos6, pos7 temp = cv.cvCloneImage(img) gray = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1) edges = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1) dst = cv.cvCreateImage( cv.cvSize(256,256), 8, 3 ) src = cv.cvCloneImage(img) src2 = cv.cvCreateImage( cv.cvGetSize(src), 8, 3 ); cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY) cv.cvCanny(gray, edges, position, pos2, 3) cv.cvSmooth(edges, edges, cv.CV_GAUSSIAN, 9, 9) storage = cv.cvCreateMat(50, 1, cv.CV_32FC3) cv.cvSetZero(storage) try: circles = cv.cvHoughCircles(gray, storage, cv.CV_HOUGH_GRADIENT, 1, float(pos3), float(pos2), float(pos4), long(pos5),long(pos6) ) #print storage for i in storage: print "Center: ", i[0], i[1], " Radius: ", i[2] center = cv.cvRound(i[0]), cv.cvRound(i[1]) radius = cv.cvRound(i[2]) cv.cvCircle(temp, (center), radius, cv.CV_RGB(255, 0, 0), 1, cv.CV_AA, 0 ) cv.cvCircle(edges, (center), radius, cv.CV_RGB(255, 255, 255), 1, cv.CV_AA, 0 ) if radius > 200: print "Circle found over 200 Radius" center_crop_topleft = (center[0]-(radius - pos7)), (center[1]-(radius - pos7)) center_crop_bottomright = (center[0]+(radius - pos7)), (center[1]+(radius - pos7)) print "crop top left: ", center_crop_topleft print "crop bottom right: ", center_crop_bottomright center_crop = cv.cvGetSubRect(src, (center_crop_topleft[0], center_crop_topleft[1] , (center_crop_bottomright[0] - center_crop_topleft[0]), (center_crop_bottomright[1] - center_crop_topleft[1]) )) #center_crop = cv.cvGetSubRect(src, (50, 50, radius/2, radius/2)) cvShowImage( "center_crop", center_crop ) print "center_crop created" #mark found circle's center with blue point and blue circle of pos 7 radius cv.cvCircle(temp ,(center), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) cv.cvCircle(temp ,(center), (radius - pos7), cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) #cvLogPolar(src, dst, (center), 48, CV_INTER_LINEAR +CV_WARP_FILL_OUTLIERS ) #this will draw a smaller cirle outlining the center circle #pos7 = int(pos7 /2.5) #cv.cvCircle(dst ,(img_size.width-pos7, 0), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) #cv.cvLine(dst, (img_size.width-pos7-1, 0), (img_size.width-pos7-1, img_size.height), cv.CV_RGB(0, 0, 255),1,8,0) #cvShowImage( "log-polar", dst ) #print radius, (radius-pos7) #cropped = cv.cvCreateImage( (pos7, img_size.height), 8, 3) #cropped2 = cv.cvCreateImage( (pos7, img_size.height), 8, 3) #coin_edge_img = cv.cvGetSubRect(dst, (img_size.width-pos7, 0, pos7 ,img_size.height )) #to create the center cropped part of coin #img_size = cvGetSize(scr) #cvCopy(coin_edge_img, cropped) #cvSaveImage("temp.png", cropped) #im = Image.open("temp.png").rotate(90) #print "pil image size = ", im.size[0], im.size[1] #im = im.resize((im.size[0]*2, im.size[1]*2)) #print "pil image size = ", im.size #im.show() #im.save("temp2.png") cropped2 = highgui.cvLoadImage("temp2.png") #cvShowImage( "cropped", cropped2) except: print "Exception:", sys.exc_info()[0] print position, pos2, pos3, pos4, pos5, pos6, pos7 pass highgui.cvShowImage("edges", edges) #cvShowImage( "log-polar", dst ) cvShowImage(wname, temp)
def get_smoothed(self,image): cv.cvSmooth(image,image,cv.CV_GAUSSIAN,3,0,0,0) cv.cvErode(image,image,None,1) cv.cvDilate(image,image,None,1) return image