def findEdges(original, out, threshold1 = 100, threshold2 = None): """Return a new edge detected image with a specified threshold""" warnings.warn("Use findBWEdges instead unless you really need colored edges.", DeprecationWarning) #Define threshold2 if threshold2 == None: threshold2 = threshold1 * 3 # Create two pictures with only one channel for a b/w copy # and one for storring the edges found in the b/w picture gray = cv.cvCreateImage(cv.cvGetSize(original), 8, 1) edge = cv.cvCreateImage(cv.cvGetSize(original), 8, 1) # Create the b/w copy of the original cv.cvCvtColor(original, gray, cv.CV_BGR2GRAY) # Blur the b/w copy, but put the result into edge pic cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0) # Negate the b/w copy of original with newly blurred # b/w copy. This will make egdes stand out cv.cvNot(gray, edge) # Run an edge-finding algorithm called 'Canny' # It will analyse the first argument and store the # resulting picture in the second argument cv.cvCanny(gray, edge, threshold1, threshold2) # We initialize our out-image to black cv.cvSetZero(out) # Finally, we use the found edges, which are b/w, as # a mask for copying the colored edges from the original # to the out-image cv.cvCopy(original, out, edge)
def analyzeCut(scaleImage, edgeImage, cut): """Extract the interesting features respecting the cut""" # Set up constraints constraints = regionSelector.Constraints(cv.cvGetSize(scaleImage), cut, margin, superMargin, 0.002, 0.25) # Create temporary images blurImage = cv.cvCreateImage(cv.cvGetSize(scaleImage), 8, 3) workImage = cv.cvCreateImage(cv.cvGetSize(scaleImage), 8, 3) # Create a blurred copy of the original cv.cvSmooth(scaleImage, blurImage, cv.CV_BLUR, 3, 3, 0) # Superimpose the edges onto the blured image cv.cvNot(edgeImage, edgeImage) cv.cvCopy(blurImage, workImage, edgeImage) # Get the edges back to white cv.cvNot(edgeImage, edgeImage) # We're done with the blurred image now cv.cvReleaseImage(blurImage) # Retrive the regions touching the cut component_dictionary = featureDetector.ribbonFloodFill(scaleImage, edgeImage, workImage, cut, margin, lo, up) # Clean up cv.cvReleaseImage(workImage) # Prune components newComponents = regionSelector.pruneRegions(component_dictionary, constraints) # Return the dictionary of accepted components #transformer.translateBoundingBoxes(newComponents, 1) return newComponents
def analyzeImage(original): scaleImage = cv.cvCreateImage(cv.cvSize(int(original.width*scale), int(original.height*scale)), 8, 3) cv.cvResize(original, scaleImage) # Create 1-channel image for the egdes edgeImage = cv.cvCreateImage(cv.cvGetSize(scaleImage), 8, 1) # Retrieve edges edgeDetector.findBWEdges(scaleImage, edgeImage, edgeThreshold1, edgeThreshold2) # Get cuts cuts = lib.findGoldenMeans(cv.cvGetSize(scaleImage)) # Run along allComponents = [] for cut in cuts: cutComponents = analyzeCut(scaleImage, edgeImage, cut) allComponents.append(cutComponents) # Get the collected component_dictionaries for dict in allComponents: lib.drawBoundingBoxes(original, dict, scale) # Draw the margins for cut in cuts: lib.drawMargin(original, cut, margin, scale) #include if super margen is need to drawn #lib.drawMargin(original, cut, superMargin, scale) return (original, allComponents)
def __FindHarris(self, filename): #find the corners of images, and save all corner points in self.vKeyPoints self.img = highgui.cvLoadImage (filename) greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1) w = cv.cvGetSize(self.img).width h = cv.cvGetSize(self.img).height image = cv.cvCreateImage(cv.cvGetSize(self.img), cv.IPL_DEPTH_32F, 1) cv.cvConvert(image, greyimg) self.cornerimg = cv.cvCreateImage(cv.cvGetSize(self.img), cv.IPL_DEPTH_32F, 1) cv.cvCornerHarris(image, self.cornerimg, 11,5,0.1)
def run(self): if self.capture: webcam_frame = highgui.cvQueryFrame( self.capture ) else: print "Capture failed!" return if self.inverted_video.get_active(): highgui.cvConvertImage(webcam_frame, webcam_frame, highgui.CV_CVTIMG_FLIP) highgui.cvConvertImage(webcam_frame, self.display_frame, highgui.CV_CVTIMG_SWAP_RB) if False: # PROCESS WEBCAM FRAME HERE... inputImage = cv.cvCreateImage(cv.cvGetSize(webcam_frame), cv.IPL_DEPTH_8U, 1) cv.cvCvtColor(webcam_frame, inputImage, cv.CV_RGB2GRAY); cv.cvThreshold(inputImage, inputImage, 128, 255, cv.CV_THRESH_BINARY) mysize = cv.cvGetSize(webcam_frame) height = mysize.height width = mysize.width # Find horizontal first-moment: if False: mysum = 0 for i in range(height): mysum += sum(inputImage[i,:]) print "Sum:", mysum cv.cvMerge( inputImage, inputImage, inputImage, None, self.display_frame ) incoming_pixbuf = gtk.gdk.pixbuf_new_from_data( self.display_frame.imageData, gtk.gdk.COLORSPACE_RGB, False, 8, self.display_frame.width, self.display_frame.height, self.display_frame.widthStep) incoming_pixbuf.copy_area(0, 0, self.display_frame.width, self.display_frame.height, self.webcam_pixbuf, 0, 0) self.video_image.queue_draw() return self.video_enabled_button.get_active()
def get_nearest_feature( image, this_point, n=2000 ): """ Get the n-nearest features to a specified image coordinate. Features are determined using cvGoodFeaturesToTrack. """ _red = cv.cvScalar (0, 0, 255, 0); _green = cv.cvScalar (0, 255, 0, 0); _blue = cv.cvScalar (255,0,0,0); _white = cv.cvRealScalar (255) _black = cv.cvRealScalar (0) quality = 0.01 min_distance = 4 N_best = n win_size = 11 grey = cv.cvCreateImage (cv.cvGetSize (image), 8, 1) eig = cv.cvCreateImage (cv.cvGetSize (image), 32, 1) temp = cv.cvCreateImage (cv.cvGetSize (image), 32, 1) # create a grey version of the image cv.cvCvtColor ( image, grey, cv.CV_BGR2GRAY) points = cv.cvGoodFeaturesToTrack ( grey, eig, temp, N_best, quality, min_distance, None, 3, 0, 0.04) # refine the corner locations better_points = cv.cvFindCornerSubPix ( grey, points, cv.cvSize (win_size, win_size), cv.cvSize (-1, -1), cv.cvTermCriteria (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) eigs = [] for i in range(len(points)): eigs.append(cv.cvGetMat(eig)[int(points[i].y)][int(points[i].x)]) mypoints = np.matrix(np.zeros((len(points)*2),dtype=float)).reshape(len(points),2) dists = [] for i,point in enumerate(points): mypoints[i,0]=point.x mypoints[i,1]=point.y dists.append( np.linalg.norm(mypoints[i,:]-this_point) ) dists = np.array(dists) sorteddists = dists.argsort() cv.cvDrawCircle ( image, points[ sorteddists[0] ], 5, _green, 2, 8, 0 ) return better_points[ sorteddists[0] ]
def harrisResponse(image): """pyvision/point/DetectorHarris.py Runs at 10.5 fps... """ gray = cv.cvCreateImage( cv.cvGetSize(image), 8, 1 ) corners = cv.cvCreateImage( cv.cvGetSize(image), 32, 1 ) cv.cvCvtColor( image, gray, cv.CV_BGR2GRAY ) cv.cvCornerHarris(gray,corners,3) image = filter_and_render_cv(image,corners) #IPShellEmbed()() return image
def __FindCorner(self, filename): #find the corners of images, and save all corner points in self.vKeyPoints self.img = highgui.cvLoadImage (filename) greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1) hsvimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3) cv.cvCvtColor(self.img, hsvimg, cv.CV_RGB2HSV) cv.cvCvtColor (hsvimg, greyimg, cv.CV_BGR2GRAY) eigImage = cv.cvCreateImage(cv.cvGetSize(greyimg), cv.IPL_DEPTH_32F, 1) tempImage = cv.cvCreateImage(cv.cvGetSize(greyimg), cv.IPL_DEPTH_32F, 1) self.points = cv.cvGoodFeaturesToTrack(greyimg, eigImage,tempImage, 2000, 0.01, 5, None, 3,0,0.01 ) self.points2 = cv.cvFindCornerSubPix(greyimg, self.points,cv.cvSize(20, 20), cv.cvSize(-1, -1), cv.cvTermCriteria(cv.CV_TERMCRIT_ITER |cv.CV_TERMCRIT_EPS, 20, 0.03)) cv.cvReleaseImage(eigImage) cv.cvReleaseImage(tempImage)
def get_nearest_feature(image, this_point, n=2000): """ Get the n-nearest features to a specified image coordinate. Features are determined using cvGoodFeaturesToTrack. """ _red = cv.cvScalar(0, 0, 255, 0) _green = cv.cvScalar(0, 255, 0, 0) _blue = cv.cvScalar(255, 0, 0, 0) _white = cv.cvRealScalar(255) _black = cv.cvRealScalar(0) quality = 0.01 min_distance = 4 N_best = n win_size = 11 grey = cv.cvCreateImage(cv.cvGetSize(image), 8, 1) eig = cv.cvCreateImage(cv.cvGetSize(image), 32, 1) temp = cv.cvCreateImage(cv.cvGetSize(image), 32, 1) # create a grey version of the image cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY) points = cv.cvGoodFeaturesToTrack(grey, eig, temp, N_best, quality, min_distance, None, 3, 0, 0.04) # refine the corner locations better_points = cv.cvFindCornerSubPix( grey, points, cv.cvSize(win_size, win_size), cv.cvSize(-1, -1), cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) eigs = [] for i in range(len(points)): eigs.append(cv.cvGetMat(eig)[int(points[i].y)][int(points[i].x)]) mypoints = np.matrix(np.zeros((len(points) * 2), dtype=float)).reshape(len(points), 2) dists = [] for i, point in enumerate(points): mypoints[i, 0] = point.x mypoints[i, 1] = point.y dists.append(np.linalg.norm(mypoints[i, :] - this_point)) dists = np.array(dists) sorteddists = dists.argsort() cv.cvDrawCircle(image, points[sorteddists[0]], 5, _green, 2, 8, 0) return better_points[sorteddists[0]]
def analyzeCut(original, edgeImage, cut, settings, showBlobs=False): """Extract the interesting features in the vicinity of a given cut""" # Get all data from the settings lo = settings.lo up = settings.up # Set up the margin with respect to the cut margin = marginCalculator.getPixels(original, cut, settings.marginPercentage) superMargin = 0 # ^^ We don't use superMargin # Set up constraints constraints = regionSelector.Constraints(cv.cvGetSize(original), cut, margin, superMargin, 0.002, 0.25) # Create temporary images blurImage = cv.cvCreateImage(cv.cvGetSize(original), 8, 3) workImage = cv.cvCreateImage(cv.cvGetSize(original), 8, 3) # Create a blurred copy of the original cv.cvSmooth(original, blurImage, cv.CV_BLUR, 3, 3, 0) # Superimpose the edges onto the blured image cv.cvNot(edgeImage, edgeImage) cv.cvCopy(blurImage, workImage, edgeImage) # We're done with the blurred image now cv.cvReleaseImage(blurImage) # Get the edges back to white cv.cvNot(edgeImage, edgeImage) # Retrive the regions touching the cut component_dictionary = featureDetector.ribbonFloodFill(original, edgeImage, workImage, cut, margin, lo, up) #start expanded # Prune components BEFORE we delete the workImage tmpnewComponents = regionSelector.pruneExpandedRegions(component_dictionary, constraints) newComponents = regionSelector.pruneExpandedRagionsto(tmpnewComponents, constraints, cut, workImage) # Clean up only if we do not return the image if not showBlobs: cv.cvReleaseImage(workImage) # Return the dictionary of accepted components or both if not showBlobs: return newComponents else: return (workImage, newComponents)
def run(self): if self.capture: webcam_frame = highgui.cvQueryFrame(self.capture) else: print "Capture failed!" return if self.inverted_video.get_active(): highgui.cvConvertImage(webcam_frame, webcam_frame, highgui.CV_CVTIMG_FLIP) highgui.cvConvertImage(webcam_frame, self.display_frame, highgui.CV_CVTIMG_SWAP_RB) if False: # PROCESS WEBCAM FRAME HERE... inputImage = cv.cvCreateImage(cv.cvGetSize(webcam_frame), cv.IPL_DEPTH_8U, 1) cv.cvCvtColor(webcam_frame, inputImage, cv.CV_RGB2GRAY) cv.cvThreshold(inputImage, inputImage, 128, 255, cv.CV_THRESH_BINARY) mysize = cv.cvGetSize(webcam_frame) height = mysize.height width = mysize.width # Find horizontal first-moment: if False: mysum = 0 for i in range(height): mysum += sum(inputImage[i, :]) print "Sum:", mysum cv.cvMerge(inputImage, inputImage, inputImage, None, self.display_frame) incoming_pixbuf = gtk.gdk.pixbuf_new_from_data( self.display_frame.imageData, gtk.gdk.COLORSPACE_RGB, False, 8, self.display_frame.width, self.display_frame.height, self.display_frame.widthStep) incoming_pixbuf.copy_area(0, 0, self.display_frame.width, self.display_frame.height, self.webcam_pixbuf, 0, 0) self.video_image.queue_draw() return self.video_enabled_button.get_active()
def getBoundingBoxImage(original, settings, cutNo, thickness=1, color=None): """Same as above but will paint the bounding boxes original should be the image data settings should be of class Settings cutNo as int color as CV_RGB""" # Get the cut defined by cutNo from the cuts from the first cut ratio in settings cut = lib.findMeans(cv.cvGetSize(original), settings.cutRatios[0])[cutNo] # Get the BW edge image edgeImage = getEdgeImage(original, settings) # Find the margin margin = marginCalculator.getPixels(original, cut, settings.marginPercentage) tmp = [] tmp.append(cut) components = analyzeCut(original, edgeImage, cut, settings) lib.drawMargin(original, cut, margin) # Draw the components lib.drawBoundingBoxes(original, components, thickness, color) return original
def getBlobImage(original, settings, cutNo): """Show the colored blobs in an image at the cut specified by cutNo as int. The cut ratio are placed in the settings and only the first cut in this ratio-list will be analyzed. original should be the image data settings should be of class Settings cutNo as int""" # Get the cut defined by cutNo from the cuts from the first cut ratio in settings cut = lib.findMeans(cv.cvGetSize(original), settings.cutRatios[0])[cutNo] # Get the BW edge image edgeImage = getEdgeImage(original, settings) # Find the margin margin = marginCalculator.getPixels(original, cut, settings.marginPercentage) # Clever hack for putting the cut in an array tmp = [] tmp.append(cut) # Get results (blobImage, components) = analyzeCut(original, edgeImage, cut, settings, True) lib.drawLines(blobImage, blobImage, tmp) lib.drawMargin(blobImage, cut, margin) # Return result, what a surprise return blobImage
def analyzeImage(original, settings): """Runs the analysis on all cuts on an image""" # Get the BW edge image edgeImage = getEdgeImage(original, settings) # Get cuts and place then in a dictionary by cut ratio # XXX: Notice the ugly string conversion because python has an issue when # converting the ratio to a dictionary index cuts = {} for ratio in settings.cutRatios: cuts[str(ratio)] = lib.findMeans(cv.cvGetSize(original), ratio) # New dictionary for holding the resulting components # Hold on, now we're putting the result (which is a dictionary) # inside a new dict (cutDict). This holds the result for the four cuts # for a given ratio. We now put this dict inside the comps-dictionary # which then can be used for lookup by the cut-ratio comps = {} for ratio in cuts: cutDict = {} for cutNo in range(len(cuts[ratio])): cutComponents = analyzeCut(original, edgeImage, cuts[ratio][cutNo], settings) cutDict[cutNo] = cutComponents comps[ratio] = cutDict # Clean up cv.cvReleaseImage(edgeImage) # This is a dictionary in a dictionary in a dictionary return comps
def main(): print "FaceIn! an OpenCV Python Face Recognition Program" highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow('Camera', 10, 10) device = 0 #use first device found capture = highgui.cvCreateCameraCapture(device) frame = highgui.cvQueryFrame(capture) frame_size = cv.cvGetSize(frame) fps = 30 while 1: frame = highgui.cvQueryFrame(capture) detectFace(frame) # display the frames to have a visual output highgui.cvShowImage('Camera', frame) # handle events k = highgui.cvWaitKey(5) if k % 0x100 == 27: # user has press the ESC key, so exit quit()
def main(): print "FaceIn! an OpenCV Python Face Recognition Program" highgui.cvNamedWindow ('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow ('Camera', 10, 10) device = 0 #use first device found capture = highgui.cvCreateCameraCapture (device) frame = highgui.cvQueryFrame (capture) frame_size = cv.cvGetSize (frame) fps = 30 while 1: frame = highgui.cvQueryFrame (capture) detectFace(frame) # display the frames to have a visual output highgui.cvShowImage ('Camera', frame) # handle events k = highgui.cvWaitKey (5) if k % 0x100 == 27: # user has press the ESC key, so exit quit()
def detect(image, cascade_file='haarcascade_data/haarcascade_frontalface_alt.xml'): image_size = cv.cvGetSize(image) # create grayscale version grayscale = cv.cvCreateImage(image_size, 8, 1) cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY) # create storage storage = cv.cvCreateMemStorage(0) cv.cvClearMemStorage(storage) # equalize histogram cv.cvEqualizeHist(grayscale, grayscale) # detect objects cascade = cv.cvLoadHaarClassifierCascade(cascade_file, cv.cvSize(1,1)) faces = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(50, 50)) positions = [] if faces: for i in faces: positions.append({'x': i.x, 'y': i.y, 'width': i.width, 'height': i.height,}) cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)), cv.cvPoint(int(i.x + i.width), int(i.y + i.height)), cv.CV_RGB(0, 255, 0), 3, 8, 0) return positions
def __calculate(self): print "I want to calculate an image" size = cv.cvGetSize(self.origImage) result = cv.cvCreateMat(size.height,size.width,cv.CV_32FC1) row_sums = cv.cvCreateMat(size.height,size.width,cv.CV_32FC1) for i in range(size.height): for j in range(size.width): image_value = cv.cvGet2D(self.origImage,i,j) image_value = image_value[0] prev_row_sum = 0 if(i == 0): cv.cvmSet(row_sums,i,j,image_value) else: prev_row_sum = cv.cvmGet(row_sums,i-1,j) cv.cvmSet(row_sums,i,j,image_value+prev_row_sum) if(j == 0): cv.cvmSet(result,i,j,prev_row_sum+image_value) else: prev_result = cv.cvmGet(result,i,j-1) cv.cvmSet(result,i,j,prev_row_sum+image_value+prev_result) if(i == 0 and j == 0): print "image_value:",image_value print "prev_row_sum:",prev_row_sum return result
def process(self, videofile, progress): progress(0, _("Extracting histogram")) video = hg.cvCreateFileCapture(str(videofile).encode(sys.getfilesystemencoding())) if not video: raise Exception("Could not open video file") histo = cv.cvCreateHist([256],cv.CV_HIST_ARRAY,[[0,256]], 1) frame = hg.cvQueryFrame(video) frame_gray = cv.cvCreateImage(cv.cvGetSize(frame), frame.depth, 1); hists = [] nbframes = 0 fps = hg.cvGetCaptureProperty(video, hg.CV_CAP_PROP_FPS) while frame : if not progress(hg.cvGetCaptureProperty(video, hg.CV_CAP_PROP_POS_AVI_RATIO)): break hg.cvConvertImage(frame,frame_gray) cv.cvCalcHist(frame_gray,histo,0,None) h = [cv.cvGetReal1D(histo.bins,i) for i in range(255) ] h = numpy.array(h,dtype='int32') hists.append(h) frame = hg.cvQueryFrame(video) nbframes += 1 hists = numpy.array(hists) return hists.reshape(nbframes, -1), fps
def draw_target(img, x, y, color_name): width = 10 if color_name == "GREEN": color = cv.CV_RGB(0,255,0) else: color = cv.CV_RGB(255,0,0) size = cv.cvGetSize(img) if x >= size.width or x < 0 or y >= size.height or y < 0: return for i in range(width): for j in range(width): if i==0 or j==0 or j==9 or i==9: px = x + j - width/2 py = y + i - width/2 if px<0: px = 0 if py<0: py = 0 if px>=size.width: px = size.width-1 if py>=size.height: py = size.height-1 cv.cvSet2D(img,py,px,color)
def detect_lines(self, img_grey, img_orig): """ Detect lines within the image. To switch between standard and probabilistic Hough transform, use cv.CV_HOUGH_STANDARD, or cv.CV_HOUGH_PROBABILISTIC. """ # Set transform method ('standard','probabilistic') transform_method = 'probabilistic' # Clear out our storage cv.cvClearMemStorage(self.lines_storage) sz = cv.cvSize(img_grey.width & -2, img_grey.height & -2) img_dst_color = cv.cvCreateImage(cv.cvGetSize(img_orig), 8, 3) tgrey = cv.cvCreateImage(sz, 8, 1) cv.cvCanny(tgrey, img_grey, 50, 200, 3) if transform_method == 'standard': lines = cv.cvHoughLines2(img_grey, self.lines_storage, cv.CV_HOUGH_STANDARD, 1, cv.CV_PI/180, 100, 0, 0) else: lines = cv.cvHoughLines2(img_grey, self.lines_storage, cv.CV_HOUGH_PROBABILISTIC, 1, cv.CV_PI/180, 50, 50, 10) return lines
def detect_lines(self, img_grey, img_orig): """ Detect lines within the image. To switch between standard and probabilistic Hough transform, use cv.CV_HOUGH_STANDARD, or cv.CV_HOUGH_PROBABILISTIC. """ # Set transform method ('standard','probabilistic') transform_method = 'probabilistic' # Clear out our storage cv.cvClearMemStorage(self.lines_storage) sz = cv.cvSize(img_grey.width & -2, img_grey.height & -2) img_dst_color = cv.cvCreateImage(cv.cvGetSize(img_orig), 8, 3) tgrey = cv.cvCreateImage(sz, 8, 1) cv.cvCanny(tgrey, img_grey, 50, 200, 3) if transform_method == 'standard': lines = cv.cvHoughLines2(img_grey, self.lines_storage, cv.CV_HOUGH_STANDARD, 1, cv.CV_PI / 180, 100, 0, 0) else: lines = cv.cvHoughLines2(img_grey, self.lines_storage, cv.CV_HOUGH_PROBABILISTIC, 1, cv.CV_PI / 180, 50, 50, 10) return lines
def depthmatrix(leftimage, rightimage, precision=4, mask=0): """Returns a 3-channel 32bit floating-point distance matrix. Channels 1,2,3 = x,y,z coordinates of that point. Precision is the number of times to downsample mask. Downsample is the number of loops to go through with successively smaller match areas. If mask is set, only pixels in the mask are set.""" info = cv.cvGetSize(leftimage) width = info.width height = info.height precision_pixels = (2**precision) downsampled_size = cv.cvSize(width/precision_pixels, height/precision_pixels) print "Precision of", downsampled_size.width, downsampled_size.height, "px" if mask: downsampled_mask = cv.cvCreateImage(downsampled_size, 8, 1) cv.cvResize(mask, downsampled_mask) matx = cv.cvCreateImage(downsampled_size, 8, 1) maty = cv.cvCreateImage(downsampled_size, 8, 1) matz = cv.cvCreateImage(downsampled_size, 8, 1) for i in xrange(width/precision_pixels): for j in xrange(height/precision_pixels): if mask: if (not cv.cvGetReal2D(downsampled_mask, j, i)): continue x = i*precision y = j*precision depth = depthmatch(x+precision_pixels/2, y+precision_pixels/2, leftimage, rightimage, roi=precision_pixels, buf=precision_pixels*2) #print i, j # fill in result matrix if mask wasn't 0 at this point (X,Y,Z) cv.cvSetReal2D(matx, j, i, int(depth[0][0])) cv.cvSetReal2D(maty, j, i, int(depth[0][1])) cv.cvSetReal2D(matz, j, i, int(depth[0][2])) return matz
def HarrisPoints(self, imgfile): self.points = [] self.drawimg = highgui.cvLoadImage (imgfile) c = 1 try: gray = cv.cvCreateImage (cv.cvGetSize (self.drawimg), 8, 1) cv.cvCvtColor(self.drawimg, gray, cv.CV_BGR2GRAY) eig = cv.cvCreateImage (cv.cvGetSize (self.drawimg), 32, 1) tmpimg = cv.cvCreateImage (cv.cvGetSize (self.drawimg), 32, 1) p = cv.cvGoodFeaturesToTrack(gray, eig, tmpimg, 100, 0.1, 20, None, 7, 1, 0.04 ) for x in p: cv.cvCircle( self.drawimg, x, 3, cv.CV_RGB(0,255,0), 8, 0 ); self.points.append(x) except Exception,e: print e print 'ERROR: problem handling '+ imgfile
def same2ndValue(frame, x, y): size = cv.cvGetSize(frame) if(x >= 0 and x < size.width and y >= 0 and y < size.height): if(cv.cvGetReal2D(frame, y, x) == 0): return 0 else: return 1 #only return 1 if this pixel is also white else: return 0
def HarrisPoints(self, imgfile): self.points = [] self.drawimg = highgui.cvLoadImage(imgfile) c = 1 try: gray = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 8, 1) cv.cvCvtColor(self.drawimg, gray, cv.CV_BGR2GRAY) eig = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 32, 1) tmpimg = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 32, 1) p = cv.cvGoodFeaturesToTrack(gray, eig, tmpimg, 100, 0.1, 20, None, 7, 1, 0.04) for x in p: cv.cvCircle(self.drawimg, x, 3, cv.CV_RGB(0, 255, 0), 8, 0) self.points.append(x) except Exception, e: print e print 'ERROR: problem handling ' + imgfile
def get_frame(self) : frame=self.input.read() converted=cv.cvCreateImage(cv.cvGetSize(frame),frame.depth,3) cv.cvCvtColor(frame,converted,cv.CV_GRAY2RGB) frame = converted frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB') return frame_surface
def __init__(self, entry): self.id = entry.id filename = entry.filepath self.image = highgui.cvLoadImage(filename) if not self.image: raise SystemError('This picture is not parsable by opencv :'+filename) self.setSize(self.image.width, self.image.height) if entry.getSize() is None: entry.setSize(cv.cvGetSize(self.image))
def depthmatch(x,y,leftimage,rightimage,roi=80,buf=50,baseline=2.7,focal_length=80): """depthmatch function x,y : (int) pixel position of target in left image leftimage, rightimage : (IplImage) stereo images roi: (int) region of interest around x,y to use in matching buf: (int) buffer outside of a straight horizontal search for a match """ #print "Match",x,y info = cv.cvGetSize(leftimage) width = info.width height = info.height centerx = width/2 centery = height/2 (y1,x1,y2,x2) = (y-roi,x-roi,y+roi,x+roi) if y1<0: y1 = 0 if x1<0: x1 = 0 if y2>height: y2 = height if x2>width: x2 = width # copy subregion roi x roi template_rect = cv.cvRect(x1,y1,(x2-x1),(y2-y1)) template = cv.cvGetSubRect(leftimage, template_rect) #(y3,x3,y4,x4) = (y-roi-buf,x-roi-buf,y+roi+buf,width) # +/- 20 pixels in vertical direction, -20 to the right edge (y3,x3,y4,x4) = (y-roi-buf,0,y+roi+buf,x+roi+buf) # +/- buf pixels in vertical direction, +buf to the left edge if x3<0: x3 = 0 if y3<0: y3 = 0 if x4>=width: x4 = width-1 if y4>height: y4 = height #cv.cvSetImageROI(rightimage, (y3,x3,y4,x4)) rightsub_rect = cv.cvRect(x3,y3,(x4-x3),(y4-y3)) rightsub = cv.cvGetSubRect(rightimage, rightsub_rect) # result matrix should be (W - w + 1) x (H - h + 1) where WxH are template dimensions, wxh are rightsub dimensions W = x4-x3 H = y4-y3 w = x2-x1 h = y2-y1 resy = (y4-y3)-(y2-y1)+1 resx = (x4-x3)-(x2-x1)+1 resultmat = cv.cvCreateImage((resx, resy), 32, 1) cv.cvZero(resultmat) # match template image in a subportion of rightimage cv.cvMatchTemplate(rightsub, template, resultmat, cv.CV_TM_SQDIFF) min_val, max_val, min_point, max_point = cv.cvMinMaxLoc(resultmat) cv.cvNormalize(resultmat, resultmat, 1, 0, cv.CV_MINMAX) depth = plane2point(x-centerx, y-centery, x3+min_point.x+roi-centerx, y3+min_point.y+roi-centery, baseline, focal_length) #print "Found match at", min_point.x+x3, min_point.y+y3 return (depth, (x,y), (x3+min_point.x+roi, y3+min_point.y+roi))
def getGoodFeatures(image): # XXX: BETA but working # TODO: Clean up!! Comments properly # This is mostly copy/paster from /usr/share/opencv/samples/python/lkdemo.py """Find features using OpenCV's cvFindGoodFeatures""" win_size = 10 MAX_COUNT = 500 # create the images we need grey = cv.cvCreateImage (cv.cvGetSize (image), 8, 1) eig = cv.cvCreateImage (cv.cvGetSize (grey), 32, 1) temp = cv.cvCreateImage (cv.cvGetSize (grey), 32, 1) # Make a b/w copy cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY) # the default parameters quality = 0.01 min_distance = 10 # search the good points points = cv.cvGoodFeaturesToTrack ( grey, eig, temp, MAX_COUNT, quality, min_distance, None, 3, 1, 0.04) # refine the corner locations cv.cvFindCornerSubPix ( grey, points, cv.cvSize (win_size, win_size), cv.cvSize (-1, -1), cv.cvTermCriteria (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) # release the temporary images for good meassure cv.cvReleaseImage (eig) cv.cvReleaseImage (temp) cv.cvReleaseImage (grey) return points
def getEdgeImage(original, settings): """Helper method for calculating the edge detected image""" # Get the thresholds edgeThreshold1 = settings.edgeThreshold1 edgeThreshold2 = settings.edgeThreshold2 # Create 1-channel image for the egdes edgeImage = cv.cvCreateImage(cv.cvGetSize(original), 8, 1) # Retrieve BW edges from the original # Put the edges in edgeImage edgeDetector.findBWEdges(original, edgeImage, edgeThreshold1, edgeThreshold2) return edgeImage
def drawLines(original, outimage=None, lines=None, color=COL_RED): """Draw a list of lines on an image. If no outimage is supplied, the original is used. If no lines are supplied, dafault to drawing the golden section. If no color is supplied, use red""" if not outimage: outimage = original if not lines: lines = findGoldenMeans(cv.cvGetSize(original)) for line in lines: cv.cvLine(outimage, line.p1, line.p2, color)
def get_thresholded(self,gray_source,threshold): #Allocate a new image threshed=cv.cvCreateImage(cv.cvGetSize(gray_source),gray_source.depth,1) #Subtract 255 from all values in the image? cv.cvSubRS(gray_source,cv.cvRealScalar(255),threshed,None) #Apply a binary threshold to the image cv.cvThreshold(gray_source,threshed,threshold,255,cv.CV_THRESH_BINARY) #Release the source image cv.cvReleaseImage(gray_source) return threshed
def main(): """ Just the test This method is a god resource on how to handle the results """ filename = sys.argv[1] image = highgui.cvLoadImage (filename) print "DO NOT EXPECT THE RUNNING TIME OF THIS TEST TO BE REPRESENTATIVE!" print "" print "THRESHOLDS AND EVERYTHING ELSE ARE HARDCODED!" cutRatios = [0.6667, lib.PHI, 0.6] settings = Settings(cutRatios) # Run the analysis with the above settings comps = naiveMethod.analyzeImage(image, settings) # This is just for drawing the results # The below methods can probably be combined but don't bother # {{{ # Get and draw the cuts cuts = {} for ratio in settings.cutRatios: cuts[str(ratio)] = lib.findMeans(cv.cvGetSize(image), ratio) for ratio in cuts: lib.drawLines(image, None, cuts[ratio], lib.getRandomColor()) # Get and draw the components for ratio in comps: for cut in comps[ratio]: lib.drawBoundingBoxes(image, comps[ratio][cut]) # }}} winname = "Failure" highgui.cvNamedWindow (winname, highgui.CV_WINDOW_AUTOSIZE) while True: highgui.cvShowImage (winname, image) c = highgui.cvWaitKey(0) if c == 'q': print "Exiting ..." print "" sys.exit(0)
def main(): """ Just the test This method is a good resource on how to handle the results. Save images in this method if you have to. """ filename = sys.argv[1] image = highgui.cvLoadImage (filename) cutRatios = [lib.PHI] #cutRatios = [0.618] settings = Settings(cutRatios) image = highgui.cvLoadImage (filename) thickness = 4 settings.setMarginPercentage(0.025) settings.setMethod(sys.argv[3]) cut = int(sys.argv[2]) winname = sys.argv[1] #settings.setThresholds(100,150) # Set the color for the boxes #color = lib.COL_BLACK #color = lib.COL_WHITE #color = lib.COL_RED color = lib.COL_GREEN #color = lib.COL_BLUE blobImg = blobResult(image, settings, cut) boxxImg = boundingBoxResult(image, settings, cut, thickness, color) cutt = lib.findMeans(cv.cvGetSize(image), settings.cutRatios[0])[cut] # cuttet verdi, dog skal det vi generaliseres lidt oriantesen = cutt.getPoints()[0].x == cutt.getPoints()[1].x if oriantesen: cutPixel = cutt.getPoints()[1].x else: cutPixel = cutt.getPoints()[1].y if oriantesen: # print 'hej' cv.cvLine(boxxImg, cv.cvPoint(cutPixel, cutt.getPoints()[0].y), cv.cvPoint(cutPixel, cutt.getPoints()[1].y), lib.COL_RED) else: cv.cvLine(boxxImg, cv.cvPoint(cutt.getPoints()[0].x, cutPixel), cv.cvPoint(cutt.getPoints()[1].x, cutPixel), lib.COL_RED) # Save images highgui.cvSaveImage('flood_cut_%s.png' % cut, boxxImg) highgui.cvSaveImage('blobs_cut_%s.png' % cut, blobImg) # Show images compareImages(blobImg, boxxImg, "blob", winname)
def averageWhitePoints(frame): xtotal = 0.0 ytotal = 0.0 count = 0 size = cv.cvGetSize(frame) for x in range(size.width): for y in range(size.height): if (cv.cvGetReal2D(frame, y, x) > 200): xtotal = xtotal + x ytotal = ytotal + y count += 1 if count == 0: return 0, 0 return int(xtotal / count), int(ytotal / count)
def __init__(self, image, use_texture): """ Create an ImageFeatures object for an input image. use_texture is a boolean that when true results in the inclusion of texture features in addition to spatial location and color features. """ self.image = image self.im_size = cv.cvGetSize(image) self.im_width = self.im_size.width self.im_height = self.im_size.height self.im_colors = 3 self.array_image = ut.cv2np(image) self.tex_feat = None self.selected_features = None #self.im_width = image.shape[1] #self.im_height = image.shape[0] #self.im_colors = image.shape[2] self.use_texture = use_texture self.create_features(self.use_texture) self.mask_image = None
def detect(image): image_size = opencv.cvGetSize(image) # create grayscale version grayscale = opencv.cvCreateImage(image_size, 8, 1) opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY) # create storage storage = opencv.cvCreateMemStorage(0) opencv.cvClearMemStorage(storage) # equalize histogram opencv.cvEqualizeHist(grayscale, grayscale) # detect objects faces = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(100, 100)) # eyes = opencv.cvHaarDetectObjects(grayscale, eye_cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(60,60)) draw_bounding_boxes(faces, image, 127,255,0, 3)
def __init__(self, image, features_object, iter_limit=30, object_center=None, object_diameter=None, mix_obj=None, prior_gmm=None): """ Create a SegmentObject given an image and a features_object computed from that image (ImageFeatures). """ self.image = image self.im_size = cv.cvGetSize(image) self.im_width = self.im_size.width self.im_height = self.im_size.height if object_center is None: self.object_center = [self.im_width / 2.0, self.im_height / 2.0] if object_diameter is None: self.object_diameter = self.im_width / 4.0 self.object_diameter = self.im_width / 6.0 #self.object_diameter = self.im_width/8.0 self.mix_obj = mix_obj if self.mix_obj is None: self.mix_obj = 1.0 / 25.0 #16.0 #20.0 self.mix_bg = 1.0 - self.mix_obj #self.im_width = image.shape[1] #self.im_height = image.shape[0] #self.image_size = cv.cvSize(self.im_width, self.im_height) if features_object.selected_features is not None: self.features = features_object.selected_features else: self.features = features_object.features self.features_object = features_object self.iter_limit = iter_limit self.fit(prior_gmm) self.class_image = None self.clean_class_image = None self.large_obj = None self.fg_object_ellipse = None
def threshold_image(image, n=[]): """Record the first 5 images to get a background, then diff current frame with the last saved frame. """ if len(n) < 5: # n[4] will be our background # First capture a few images n.append(cv.cvCloneMat(image)) if len(n) == 5: # last time here # could do averaging here. pass return image original = n[4] differenceImage = cv.cvCloneMat(image) cv.cvAbsDiff(image, original, differenceImage) """The threshold value determines the amount of "Change" required before something will show up""" thresholdValue = 50 # 32 cv.cvThreshold(differenceImage, differenceImage, thresholdValue, 255, cv.CV_THRESH_BINARY) # Convert to one channel gray = cv.cvCreateImage(cv.cvGetSize(differenceImage), 8, 1) cv.cvCvtColor(differenceImage, gray, cv.CV_BGR2GRAY) # Use median filter to remove salt and pepper noise. cv.cvSmooth(gray, gray, cv.CV_MEDIAN, 15) # Dilate and the threshold image # It adds a border to the object. #cv.cvDilate(gray,gray, None, 9) # Add a bit of Blur to the threshold mask cv.cvSmooth(gray, gray, cv.CV_GAUSSIAN, 5) result = cv.cvCloneMat(image) cv.cvSetZero(result) cv.cvAnd(image, image, result, gray) return result
def __findedge(self, filename): tmpimg = highgui.cvLoadImage(filename) self.img = cv.cvCreateImage( cv.cvSize(int(tmpimg.width * self.enlarge), int(tmpimg.height * self.enlarge)), 8, 3) cv.cvResize(tmpimg, self.img, cv.CV_INTER_LINEAR) if (self.drawimage): self.drawimg = cv.cvCloneImage(self.img) else: self.drawimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3) greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8, 1) cv.cvCvtColor(self.img, greyimg, cv.CV_BGR2GRAY) self.allcurve = [] for i in range(80, 200, 20): bimg = cv.cvCloneImage(greyimg) cv.cvSmooth(bimg, bimg, cv.CV_MEDIAN, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BILATERAL, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9) cv.cvThreshold(greyimg, bimg, i, 255, cv.CV_THRESH_BINARY) self.__findcurve(bimg)
def detect(self, pil_image, cascade_name, recogn_w=50, recogn_h=50): # Get cascade: cascade = self.get_cascade(cascade_name) image = opencv.PIL2Ipl(pil_image) image_size = opencv.cvGetSize(image) grayscale = image if pil_image.mode == "RGB": # create grayscale version grayscale = opencv.cvCreateImage(image_size, 8, 1) # Change to RGB2Gray - I dont think itll affect the conversion opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY) # create storage storage = opencv.cvCreateMemStorage(0) opencv.cvClearMemStorage(storage) # equalize histogram opencv.cvEqualizeHist(grayscale, grayscale) # detect objects return opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(recogn_w, recogn_h))
def draw_target(img, x, y): width = 10 color = cv.CV_RGB(0, 255, 0) size = cv.cvGetSize(img) #cv.cvSet2D(img,x,y,color); for i in range(width): for j in range(width): if i == 0 or j == 0 or j == 9 or i == 9: px = x + j - width / 2 py = y + i - width / 2 if px < 0: px = 0 if py < 0: py = 0 if px >= size.width: px = size.width - 1 if py >= size.height: py = size.height - 1 cv.cvSet2D(img, py, px, color)
# register the mouse callback highgui.cvSetMouseCallback('LkDemo', on_mouse, None) while 1: # do forever # 1. capture the current image frame = highgui.cvQueryFrame(capture) if frame is None: # no image captured... end the processing break if image is None: # create the images we need image = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3) grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) prev_grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) prev_pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) eig = cv.cvCreateImage(cv.cvGetSize(frame), cv.IPL_DEPTH_32F, 1) temp = cv.cvCreateImage(cv.cvGetSize(frame), cv.IPL_DEPTH_32F, 1) points = [[], []] # copy the frame, so we can draw on it cv.cvCopy(frame, image) # create a grey version of the image cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY) if night_mode:
if len(sys.argv) == 1: # no argument on the command line, try to use the camera capture = highgui.cvCreateCameraCapture(device) # set the wanted image size from the camera highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH, 320) highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT, 240) # capture the 1st frame to get some propertie on it frame = highgui.cvQueryFrame(capture) # get some properties of the frame frame_size = cv.cvGetSize(frame) # create some images useful later my_grayscale = cv.cvCreateImage(frame_size, 8, 1) mask = cv.cvCreateImage(frame_size, 8, 1) cv.cvSet(mask, 1) blob_overlay = False while True: # 1. capture the current image frame = highgui.cvQueryFrame(capture) if frame is None: # no image captured... end the processing break
# register the mouse callback highgui.cvSetMouseCallback('LkDemo', on_mouse, None) while 1: # do forever # 1. capture the current image frame = highgui.cvQueryFrame(capture) if frame is None: # no image captured... end the processing break if image is None: # create the images we need image = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3) image.origin = frame.origin grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) prev_grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) prev_pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) points = [[], []] # copy the frame, so we can draw on it cv.cvCopy(frame, image) # create a grey version of the image cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY) if night_mode: # night mode: only display the points
def main(args): global capture global hmax, hmin highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Satuation', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE) highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE) highgui.cvMoveWindow('Camera', 0, 10) highgui.cvMoveWindow('Hue', 0, 350) highgui.cvMoveWindow('Satuation', 360, 10) highgui.cvMoveWindow('Value', 360, 350) highgui.cvMoveWindow('Laser', 700, 40) highgui.cvCreateTrackbar("Brightness Trackbar", "Camera", 0, 255, change_brightness) highgui.cvCreateTrackbar("hmin Trackbar", "Hue", hmin, 180, change_hmin) highgui.cvCreateTrackbar("hmax Trackbar", "Hue", hmax, 180, change_hmax) highgui.cvCreateTrackbar("smin Trackbar", "Satuation", smin, 255, change_smin) highgui.cvCreateTrackbar("smax Trackbar", "Satuation", smax, 255, change_smax) highgui.cvCreateTrackbar("vmin Trackbar", "Value", vmin, 255, change_vmin) highgui.cvCreateTrackbar("vmax Trackbar", "Value", vmax, 255, change_vmax) print "grabbing camera" capture = highgui.cvCreateCameraCapture(0) print "found camera" highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH, 320) highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT, 240) frame = highgui.cvQueryFrame(capture) frameSize = cv.cvGetSize(frame) hsv = cv.cvCreateImage(frameSize, 8, 3) mask = cv.cvCreateImage(frameSize, 8, 1) hue = cv.cvCreateImage(frameSize, 8, 1) satuation = cv.cvCreateImage(frameSize, 8, 1) value = cv.cvCreateImage(frameSize, 8, 1) laser = cv.cvCreateImage(frameSize, 8, 1) while 1: frame = highgui.cvQueryFrame(capture) cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV) #cv.cvInRangeS(hsv,hsv_min,hsv_max,mask) cv.cvSplit(hsv, hue, satuation, value, None) cv.cvInRangeS(hue, hmin, hmax, hue) cv.cvInRangeS(satuation, smin, smax, satuation) cv.cvInRangeS(value, vmin, vmax, value) #cv.cvInRangeS(hue,0,180,hue) cv.cvAnd(hue, value, laser) #cv.cvAnd(laser, value, laser) cenX, cenY = averageWhitePoints(laser) #print cenX,cenY draw_target(frame, cenX, cenY) #draw_target(frame,200,1) highgui.cvShowImage('Camera', frame) highgui.cvShowImage('Hue', hue) highgui.cvShowImage('Satuation', satuation) highgui.cvShowImage('Value', value) highgui.cvShowImage('Laser', laser) k = highgui.cvWaitKey(10) if k == " ": highgui.cvDestroyAllWindows() highgui.cvReleaseCapture(capture) sys.exit()
if go: init() print getBattery() while True: frame = highgui.cvQueryFrame(capture) if frame is None: # no image captured... end the processing break if image is None: # create the images we need image = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3) image.origin = frame.origin hsv = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3) hue = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) mask = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) backproject = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) hist = cv.cvCreateHist([hdims], cv.CV_HIST_ARRAY, hranges, 1) # flip the image cv.cvFlip(frame, image, 1) cv.cvCvtColor(image, hsv, cv.CV_BGR2HSV) cv.cvLine(image, cv.cvPoint(0, image.height / 2), cv.cvPoint(image.width, image.height / 2), cv.CV_RGB(0, 255, 0), 2, 8, 0)
from opencv import highgui hmin = 4 hmax = 18 highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE) #highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE) #highgui.cvCreateTrackbar("hmin Trackbar","Hue",hmin,180, change_hmin); #highgui.cvCreateTrackbar("hmax Trackbar","Hue",hmax,180, change_hmax); print "grabbing camera" capture = highgui.cvCreateCameraCapture(0) print "found camera" time.sleep(1) frame = highgui.cvQueryFrame(capture) frameSize = cv.cvGetSize(frame) print "frameSize =", frameSize time.sleep(1) cam_width = highgui.cvGetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH) cam_height = highgui.cvGetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT) print "camers cam_height =", cam_height print "camers cam_width =", cam_width highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH, 320) highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT, 240) time.sleep(1) cam_width = highgui.cvGetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH) cam_height = highgui.cvGetCaptureProperty(capture,
def SetBinary(self, t): self.drawimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3) cv.cvThreshold(self.img, self.drawimg, t, 255, cv.CV_THRESH_BINARY)
def harrisResponse(frame): """pyvision/point/DetectorHarris.py Runs at 10.5 fps... """ #gray = cv.cvCreateImage( cv.cvGetSize(image), 8, 1 ) #corners = cv.cvCreateImage( cv.cvGetSize(image), 32, 1 ) #cv.cvCvtColor( image, gray, cv.CV_BGR2GRAY ) #cv.cvCornerHarris(gray,corners,15) # This could be done in a persistant way # create the images we need image = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3) grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) prev_grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) prev_pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) eig = cv.cvCreateImage(cv.cvGetSize(frame), cv.IPL_DEPTH_32F, 1) temp = cv.cvCreateImage(cv.cvGetSize(frame), cv.IPL_DEPTH_32F, 1) points = [[], []] # copy the frame, so we can draw on it cv.cvCopy(frame, image) # create a grey version of the image cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY) # search the good points points[1] = cv.cvGoodFeaturesToTrack(grey, eig, temp, MAX_COUNT, quality, min_distance, None, 3, 0, 0.04) # refine the corner locations cv.cvFindCornerSubPix( grey, points[1], cv.cvSize(win_size, win_size), cv.cvSize(-1, -1), cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03)) if len(points[0]) > 0: # we have points, so display them # calculate the optical flow [points[1], status], something = cv.cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid, points[0], len(points[0]), (win_size, win_size), 3, len(points[0]), None, cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), flags) # initializations point_counter = -1 new_points = [] for the_point in points[1]: # go trough all the points # increment the counter point_counter += 1 if add_remove_pt: # we have a point to add, so see if it is close to # another one. If yes, don't use it dx = pt.x - the_point.x dy = pt.y - the_point.y if dx * dx + dy * dy <= 25: # too close add_remove_pt = 0 continue if not status[point_counter]: # we will disable this point continue # this point is a correct point new_points.append(the_point) # draw the current point cv.cvCircle(image, cv.cvPointFrom32f(the_point), 3, cv.cvScalar(0, 255, 0, 0), -1, 8, 0) # set back the points we keep points[1] = new_points # swapping prev_grey, grey = grey, prev_grey prev_pyramid, pyramid = pyramid, prev_pyramid points[0], points[1] = points[1], points[0] return image
def blob_identification(binary_image): from opencv.highgui import cvSaveImage, cvLoadImageM from opencv.cv import cvCreateImage, cvGetSize, cvCreateMat, cvSet, CV_RGB, cvResize from Blob import CBlob from BlobResult import CBlobResult from classification import classification from os import chdir, environ path = environ.get("HOME") frame_size = cvGetSize(binary_image) blo = cvCreateImage(frame_size, 8, 1) resblo = cvCreateMat(240, 320, binary_image.type) mask = cvCreateImage(frame_size, 8, 1) cvSet(mask, 255) myblobs = CBlobResult(binary_image, mask, 0, True) myblobs.filter_blobs(325, 2000) blob_count = myblobs.GetNumBlobs() count = 0 pixr = [] pixrm = [] for i in range(blob_count): value = [] rowval = [] colval = [] cvSet(blo, 0) my_enum_blob = myblobs.GetBlob(i) my_enum_blob.FillBlob(blo, CV_RGB(255, 0, 255), 0, 0) cvSet(resblo, 0) cvResize(blo, resblo, 1) for rowitem in range(resblo.rows): for colitem in range(resblo.cols): if resblo[rowitem, colitem] != 0: rowval.append(rowitem) colval.append(colitem) value.append(resblo[rowitem, colitem]) pixr.append(rowval[0]) pixrm.append(rowval[-1]) rowmin = min(rowval) rowedit = [] for item in rowval: rowedit.append(item - rowmin) coledit = [] colmin = min(colval) for item in colval: coledit.append(int(item) - colmin) rowmax = max(rowedit) colmax = max(colval) - colmin moved = cvCreateMat(rowmax + 10, colmax + 10, blo.type) cvSet(moved, 0) for i in range(len(rowval)): moved[int(rowedit[i]) + 5, int(coledit[i]) + 5] = int(value[i]) chdir(path + "/alpr/latest/blobs") cvSaveImage("pic" + str(count) + ".png", moved) count += 1 avoid = classification(pixr, pixrm) blob_image = cvCreateImage(frame_size, 8, 1) cvSet(blob_image, 0) for i in range(blob_count): if i not in avoid: my_enum_blob = myblobs.GetBlob(i) my_enum_blob.FillBlob(blob_image, CV_RGB(255, 0, 255), 0, 0) cvSaveImage("blob.jpg", blob_image) return
highgui.cvMoveWindow('3-lisser-Smooth', 640, 0) highgui.cvMoveWindow('4-lisser-And', 640, 280) #trackbar pour la modification des variables de reglages highgui.cvCreateTrackbar("nombre division", "Camera", get_nb_div(), 6, set_nb_div) highgui.cvCreateTrackbar("seuil binarisation", "Binarisation", get_seuil(), 255, set_seuil) highgui.cvCreateTrackbar("gain", "2-amplifie", get_gain(), 100, set_gain) #highgui.cvCreateTrackbar ("param lissage", "3-lisser", 3, 3, set_param_liss) #highgui.cvCreateTrackbar ("param 2 lissage", "3-lisser", 1, 10, set_param2_liss) ############################# GO WORK ###################################### frame = highgui.cvQueryFrame(capture) frame_size = cv.cvGetSize(frame) hauteur_image = cv.cvGetSize(frame).height largeur_image = cv.cvGetSize(frame).width print "hauteur_image:" + str(hauteur_image) + " largeur_image:" + str( largeur_image) + " depth:" + str(frame.depth) print "frame per seconds : " + str(highgui.CV_CAP_PROP_FPS) print "" font = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0) frameGray = cv.cvCreateImage(frame_size, frame.depth, 1) ##frameGrayBg = cv.cvCreateImage (frame_size, frame.depth, 1) framewithoutbg = cv.cvCreateImage(frame_size, frame.depth, 1) framemul = cv.cvCreateImage(frame_size, frame.depth, 1) framelisser = cv.cvCreateImage(frame_size, frame.depth, 1)