Esempio n. 1
0
def findEdges(original, out, threshold1 = 100, threshold2 = None):
	"""Return a new edge detected image with a specified threshold"""
	warnings.warn("Use findBWEdges instead unless you really need colored edges.", DeprecationWarning)

	#Define threshold2
	if threshold2 == None:
		threshold2 = threshold1 * 3

	# Create two pictures with only one channel for a b/w copy
	# and one for storring the edges found in the b/w picture
	gray = cv.cvCreateImage(cv.cvGetSize(original), 8, 1)
	edge = cv.cvCreateImage(cv.cvGetSize(original), 8, 1)

	# Create the b/w copy of the original
	cv.cvCvtColor(original, gray, cv.CV_BGR2GRAY)

	# Blur the b/w copy, but put the result into edge pic
	cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0)

	# Negate the b/w copy of original with newly blurred
	# b/w copy. This will make egdes stand out
	cv.cvNot(gray, edge)

	# Run an edge-finding algorithm called 'Canny'
	# It will analyse the first argument and store the
	# resulting picture in the second argument
	cv.cvCanny(gray, edge, threshold1, threshold2)

	# We initialize our out-image to black
	cv.cvSetZero(out)

	# Finally, we use the found edges, which are b/w, as
	# a mask for copying the colored edges from the original
	# to the out-image
	cv.cvCopy(original, out, edge)
Esempio n. 2
0
def analyzeCut(scaleImage, edgeImage, cut):
	"""Extract the interesting features respecting the cut"""

	# Set up constraints
	constraints = regionSelector.Constraints(cv.cvGetSize(scaleImage), cut, margin, superMargin, 0.002, 0.25)

	# Create temporary images
	blurImage = cv.cvCreateImage(cv.cvGetSize(scaleImage), 8, 3)
	workImage = cv.cvCreateImage(cv.cvGetSize(scaleImage), 8, 3)

	# Create a blurred copy of the original
	cv.cvSmooth(scaleImage, blurImage, cv.CV_BLUR, 3, 3, 0)

	# Superimpose the edges onto the blured image
	cv.cvNot(edgeImage, edgeImage)
	cv.cvCopy(blurImage, workImage, edgeImage)

	# Get the edges back to white
	cv.cvNot(edgeImage, edgeImage)

	# We're done with the blurred image now
	cv.cvReleaseImage(blurImage)

	# Retrive the regions touching the cut
	component_dictionary = featureDetector.ribbonFloodFill(scaleImage, edgeImage, workImage, cut, margin, lo, up)

	# Clean up
	cv.cvReleaseImage(workImage)

	# Prune components
	newComponents = regionSelector.pruneRegions(component_dictionary, constraints)

	# Return the dictionary of accepted components
	#transformer.translateBoundingBoxes(newComponents, 1)
	return newComponents
Esempio n. 3
0
File: chroma.py Progetto: bmiro/vpc
def getFilter(frameWidht, frameHeight):    
    cvNamedWindow("Filtred")
    
    cvCreateTrackbar("hmax", "Filtred", getHlsFilter('hmax'), 180, trackBarChangeHmax)
    cvCreateTrackbar("hmin", "Filtred", getHlsFilter('hmin'), 180, trackBarChangeHmin)
    #cvCreateTrackbar("lmax", "Filtred", hlsFilter['lmax'], 255, trackBarChangeLmax)
    #cvCreateTrackbar("lmin", "Filtred", hlsFilter['lmin'], 255, trackBarChangeLmin)
    cvCreateTrackbar("smax", "Filtred", getHlsFilter('smax'), 255, trackBarChangeSmax)
    cvCreateTrackbar("smin", "Filtred", getHlsFilter('smin'), 255, trackBarChangeSmin)

    cvSetMouseCallback("Filtred", mouseClick, None)
    
    frame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    hlsFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    filtredFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)

    mask = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)

    hFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    lFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    sFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    ThHFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThLFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThSFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    key = -1
    while key == -1: 
        if not cvGrabFrame(CAM):
            print "Could not grab a frame"
            exit
        frame = cvQueryFrame(CAM)
        
        cvCvtColor(frame, hlsFrame, CV_BGR2HLS)
    
        cvSplit(hlsFrame, hFrame, lFrame, sFrame, None)
        
        pixelInRange(hFrame, getHlsFilter('hmin'), getHlsFilter('hmax'), 0, 180, ThHFrame) 
        #pixelInRange(lFrame, getHlsFilter('lmin'), getHlsFilter('lmax'), 0, 255, ThLFrame)
        pixelInRange(sFrame, getHlsFilter('smin'), getHlsFilter('smax'), 0, 255, ThSFrame)
        
        cvSetZero(mask)        
        cvAnd(ThHFrame, ThSFrame, mask)
        
        cvSetZero(filtredFrame)
        
        cvCopy(frame, filtredFrame, mask)
        
        cvShowImage("Filtred", filtredFrame)

        key = cvWaitKey(10)
        if key == 'r':
            key = -1
            resetHlsFilter()
            
    cvDestroyWindow("Filtred")    
Esempio n. 4
0
def _detect(image):
    """ Detects faces on `image`
    Parameters:
        @image: image file path

    Returns:
        [((x1, y1), (x2, y2)), ...] List of coordenates for top-left
                                    and bottom-right corner
    """
    # the OpenCV API says this function is obsolete, but we can't
    # cast the output of cvLoad to a HaarClassifierCascade, so use
    # this anyways the size parameter is ignored
    capture = cvCreateFileCapture(image)

    if not capture:
        return []

    frame = cvQueryFrame(capture)
    if not frame:
        return []

    img = cvCreateImage(cvSize(frame.width, frame.height), IPL_DEPTH_8U,
                        frame.nChannels)
    cvCopy(frame, img)

    # allocate temporary images
    gray = cvCreateImage((img.width, img.height), COPY_DEPTH, COPY_CHANNELS)
    width, height = (cvRound(img.width / IMAGE_SCALE),
                     cvRound(img.height / IMAGE_SCALE))
    small_img = cvCreateImage((width, height), COPY_DEPTH, COPY_CHANNELS)

    # convert color input image to grayscale
    cvCvtColor(img, gray, CV_BGR2GRAY)

    # scale input image for faster processing
    cvResize(gray, small_img, CV_INTER_LINEAR)
    cvEqualizeHist(small_img, small_img)
    cvClearMemStorage(STORAGE)

    coords = []
    for haar_file in CASCADES:
        cascade = cvLoadHaarClassifierCascade(haar_file, cvSize(1, 1))
        if cascade:
            faces = cvHaarDetectObjects(small_img, cascade, STORAGE,
                                        HAAR_SCALE, MIN_NEIGHBORS, HAAR_FLAGS,
                                        MIN_SIZE) or []
            for face_rect in faces:
                # the input to cvHaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                x, y = face_rect.x, face_rect.y
                pt1 = (int(x * IMAGE_SCALE), int(y * IMAGE_SCALE))
                pt2 = (int((x + face_rect.width) * IMAGE_SCALE),
                       int((y + face_rect.height) * IMAGE_SCALE))
                coords.append((pt1, pt2))
    return coords
Esempio n. 5
0
def _detect(image):
    """ Detects faces on `image`
    Parameters:
        @image: image file path

    Returns:
        [((x1, y1), (x2, y2)), ...] List of coordenates for top-left
                                    and bottom-right corner
    """
    # the OpenCV API says this function is obsolete, but we can't
    # cast the output of cvLoad to a HaarClassifierCascade, so use
    # this anyways the size parameter is ignored
    capture = cvCreateFileCapture(image) 

    if not capture:
        return []

    frame = cvQueryFrame(capture)
    if not frame:
        return []

    img = cvCreateImage(cvSize(frame.width, frame.height),
                        IPL_DEPTH_8U, frame.nChannels)
    cvCopy(frame, img)

    # allocate temporary images
    gray          = cvCreateImage((img.width, img.height),
                                  COPY_DEPTH, COPY_CHANNELS)
    width, height = (cvRound(img.width / IMAGE_SCALE),
                     cvRound(img.height / IMAGE_SCALE))
    small_img     = cvCreateImage((width, height), COPY_DEPTH, COPY_CHANNELS)

    # convert color input image to grayscale
    cvCvtColor(img, gray, CV_BGR2GRAY)

    # scale input image for faster processing
    cvResize(gray, small_img, CV_INTER_LINEAR)
    cvEqualizeHist(small_img, small_img)
    cvClearMemStorage(STORAGE)

    coords = []
    for haar_file in CASCADES:
        cascade = cvLoadHaarClassifierCascade(haar_file, cvSize(1, 1))
        if cascade:
            faces = cvHaarDetectObjects(small_img, cascade, STORAGE, HAAR_SCALE,
                                        MIN_NEIGHBORS, HAAR_FLAGS, MIN_SIZE) or []
            for face_rect in faces:
                # the input to cvHaarDetectObjects was resized, so scale the 
                # bounding box of each face and convert it to two CvPoints
                x, y = face_rect.x, face_rect.y
                pt1 = (int(x * IMAGE_SCALE), int(y * IMAGE_SCALE))
                pt2 = (int((x + face_rect.width) * IMAGE_SCALE),
                       int((y + face_rect.height) * IMAGE_SCALE))
                coords.append((pt1, pt2))
    return coords
Esempio n. 6
0
	def read(self) :
		frame=self.input.read()
		if self.debug :
			raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels)
			cv.cvCopy(frame,raw_frame,None)
			self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB')

		if self.enabled :
			cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)

			# convert color
			cv.cvCvtColor(frame,cv_rs,cv.CV_BGR2GRAY)

			# invert the image
			cv.cvSubRS(cv_rs, 255, cv_rs, None);

			# threshold the image
			frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)
			cv.cvThreshold(cv_rs, frame, self.threshold, 255, cv.CV_THRESH_BINARY)

			if self.debug :
				thresh_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,3)
				cv.cvCvtColor(frame,thresh_frame,cv.CV_GRAY2RGB)
				self.thresh_frame_surface=pygame.image.frombuffer(thresh_frame.imageData,(frame.width,frame.height),'RGB')

			# I think these functions are too specialized for transforms
			cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 )
			cv.cvErode(frame, frame, None, 1)
			cv.cvDilate(frame, frame, None, 1)

			num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0))
			if contours is None :
				return []
			else :
				contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 );
				if contours is None :
					return []
				else :
					final_contours = []
					for c in contours.hrange() :
						area = abs(cv.cvContourArea(c))
						#self.debug_print('Polygon Area: %f'%area)
						if area >= self.min_area :
							lst = []
							for pt in c :
								lst.append((pt.x,pt.y))
							final_contours.append(lst)
						contours = contours.h_next
					return final_contours

		return []
Esempio n. 7
0
File: chroma.py Progetto: bmiro/vpc
def getBackground(frameWidht, frameHeight):
    cvNamedWindow("Background")
    
    text = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    frame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    background = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)

    font = cvInitFont(CV_FONT_HERSHEY_COMPLEX, 1.0, 1.0, 0.0, 2)
    pt1 = cvPoint(50, 100)
    pt2 = cvPoint(50, 150)
    center = cvPoint(frameWidth/2, frameHeight/2)
    cvPutText(text, "Press enter, run away and wait", pt1, font, CV_RGB(150, 100, 150))
    cvPutText(text, str(delayS) + " seconds to capture background", pt2, font, CV_RGB(150, 100, 150))
    cvShowImage("Background", text)
        
    key = -1
    while key == -1:
        key = cvWaitKey(10)    
        
    like = False
    while not like:
        for i in range(delayS):
            cvZero(text)
            cvPutText(text, str(delayS-i), center, font, CV_RGB(150, 100, 150))
            cvShowImage("Background", text)
            cvWaitKey(1000)
    
        csut = camStartUpTime
        while (csut): # Stats capturing frames in order to give time to the cam to auto-adjust colors
            if not cvGrabFrame(CAM):
                print "Could not grab a frame"
                exit
            cvWaitKey(10)
            csut -= 1
        frame = cvQueryFrame(CAM)
        cvCopy(frame, background)
        
        cvCopy(frame, text)
        cvPutText(text, "Is correct? [y/n]", center, font, CV_RGB(150, 100, 150))

        cvShowImage("Background", text)
        
        key = -1
        while key != 'n' and key != 'y':
            key = cvWaitKey(10)
            if key == 'y': 
                like = True
                
    return background        
    cvDestroyWindow("Background")
Esempio n. 8
0
def analyzeCut(original, edgeImage, cut, settings, showBlobs=False):
	"""Extract the interesting features in the vicinity of a given cut"""
	# Get all data from the settings
	lo = settings.lo
	up = settings.up

	# Set up the margin with respect to the cut
	margin = marginCalculator.getPixels(original, cut, settings.marginPercentage)
	superMargin = 0
	# ^^ We don't use superMargin

	# Set up constraints
	constraints = regionSelector.Constraints(cv.cvGetSize(original), cut, margin, superMargin, 0.002, 0.25)

	# Create temporary images
	blurImage = cv.cvCreateImage(cv.cvGetSize(original), 8, 3)
	workImage = cv.cvCreateImage(cv.cvGetSize(original), 8, 3)

	# Create a blurred copy of the original
	cv.cvSmooth(original, blurImage, cv.CV_BLUR, 3, 3, 0)

	# Superimpose the edges onto the blured image
	cv.cvNot(edgeImage, edgeImage)
	cv.cvCopy(blurImage, workImage, edgeImage)

	# We're done with the blurred image now
	cv.cvReleaseImage(blurImage)

	# Get the edges back to white
	cv.cvNot(edgeImage, edgeImage)

	# Retrive the regions touching the cut
	component_dictionary = featureDetector.ribbonFloodFill(original, edgeImage, workImage, cut, margin, lo, up)

	#start expanded

	# Prune components BEFORE we delete the workImage
	tmpnewComponents = regionSelector.pruneExpandedRegions(component_dictionary, constraints)
	newComponents = regionSelector.pruneExpandedRagionsto(tmpnewComponents, constraints, cut, workImage)

	# Clean up only if we do not return the image
	if not showBlobs:
		cv.cvReleaseImage(workImage)

	# Return the dictionary of accepted components or both
	if not showBlobs:
		return newComponents
	else:
		return (workImage, newComponents)
Esempio n. 9
0
def on_trackbar (position):
    #下面两句应该是没什么用的
    cv.cvSmooth (gray, edge, cv.CV_BLUR, 3, 3, 0)    #图像平滑
    cv.cvNot (gray, edge)     #计算数组元素的按位取反
 
    # run the edge dector on gray scale
    cv.cvCanny (gray, edge, position, position * 3, 3)   #采用 Canny 算法做边缘检测
 
    # reset
    cv.cvSetZero (col_edge)   #清空数组
 
    # copy edge points
    cv.cvCopy (image, col_edge, edge)   #参数edge影响拷贝的结果
 
    # show the image
    highgui.cvShowImage (win_name, col_edge)
Esempio n. 10
0
def on_trackbar(position):

    cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0)
    cv.cvNot(gray, edge)

    # run the edge dector on gray scale
    cv.cvCanny(gray, edge, position, position * 3, 3)

    # reset
    cv.cvSetZero(col_edge)

    # copy edge points
    cv.cvCopy(image, col_edge, edge)

    # show the image
    highgui.cvShowImage(win_name, col_edge)
Esempio n. 11
0
def on_trackbar (position):

    cv.cvSmooth (gray, edge, cv.CV_BLUR, 3, 3, 0)
    cv.cvNot (gray, edge)

    # run the edge dector on gray scale
    cv.cvCanny (gray, edge, position, position * 3, 3)

    # reset
    cv.cvSetZero (col_edge)

    # copy edge points
    cv.cvCopy (image, col_edge, edge)
    
    # show the image
    highgui.cvShowImage (win_name, col_edge)
Esempio n. 12
0
	def timerEvent(self, ev):
		# Fetch a frame from the video camera
		frame									= highgui.cvQueryFrame(self.cap)
		img_orig								= cv.cvCreateImage(cv.cvSize(frame.width,frame.height),cv.IPL_DEPTH_8U, frame.nChannels)
		if (frame.origin == cv.IPL_ORIGIN_TL):
			cv.cvCopy(frame, img_orig)
		else:
			cv.cvFlip(frame, img_orig, 0)

		# Create a grey frame to clarify data

		#img									= self.detect_face(frame_copy)
		img										= self.detect_squares(frame_copy)
		img_pil									= adaptors.Ipl2PIL(img)
		s										= StringIO()
		img_pil.save(s, "PNG")
		s.seek(0)
		q_img									= QImage()
		q_img.loadFromData(s.read())
		bitBlt(self, 0, 0, q_img)
Esempio n. 13
0
File: chroma.py Progetto: bmiro/vpc
def startChroma(background, frameWidht, frameHeight):
    #cvNamedWindow("Original")
    cvNamedWindow("Chroma")
    
    hlsFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    transparency = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)

    mask = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)

    hFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    lFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    sFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    ThHFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThLFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThSFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    key = -1
    while key == -1:
        if not cvGrabFrame(CAM):
            print "Could not grab a frame"
            exit
        frame = cvQueryFrame(CAM)
        
        cvCvtColor(frame, hlsFrame, CV_BGR2HLS)
    
        cvSplit(hlsFrame, hFrame, lFrame, sFrame, None)

        pixelInRange(hFrame, getHlsFilter('hmin'), getHlsFilter('hmax'), 0, 180, ThHFrame) 
        #pixelInRange(lFrame, getHlsFilter('lmin'), getHlsFilter('lmax'), 0, 255, ThLFrame)
        pixelInRange(sFrame, getHlsFilter('smin'), getHlsFilter('smax'), 0, 255, ThSFrame)

        cvAnd(ThHFrame, ThSFrame, mask)
               
        cvCopy(background, frame, mask)
        
        cvShowImage("Chroma", frame)

        key = cvWaitKey(10)
        
    cvDestroyWindow("Chroma")
Esempio n. 14
0
    def timerEvent(self, ev):
        # Fetch a frame from the video camera
        frame = highgui.cvQueryFrame(self.cap)
        img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height),
                                    cv.IPL_DEPTH_8U, frame.nChannels)
        if (frame.origin == cv.IPL_ORIGIN_TL):
            cv.cvCopy(frame, img_orig)
        else:
            cv.cvFlip(frame, img_orig, 0)

        # Create a grey frame to clarify data

        #img									= self.detect_face(frame_copy)
        img = self.detect_squares(frame_copy)
        img_pil = adaptors.Ipl2PIL(img)
        s = StringIO()
        img_pil.save(s, "PNG")
        s.seek(0)
        q_img = QImage()
        q_img.loadFromData(s.read())
        bitBlt(self, 0, 0, q_img)
Esempio n. 15
0
	def read(self):

		frame = self.input.read()
		if self.debug :
			raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels)
			cv.cvCopy(frame,raw_frame,None)
			self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB')

		if self.enabled :

			cvt_red = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)
			cv.cvSplit(frame,None,None,cvt_red,None)

			if self.debug :
				red_frame = cv.cvCreateImage(cv.cvSize(cvt_red.width,cvt_red.height),cvt_red.depth,3)
				cv.cvMerge(cvt_red,None,None,None,red_frame)
				self.red_frame_surface = pygame.image.frombuffer(red_frame.imageData,(cvt_red.width,cvt_red.height),'RGB')

			# I think these functions are too specialized for transforms
			cv.cvSmooth(cvt_red,cvt_red,cv.CV_GAUSSIAN,3, 0, 0, 0 )
			cv.cvErode(cvt_red, cvt_red, None, 1)
			cv.cvDilate(cvt_red, cvt_red, None, 1)

			if self.debug :
				thresh_frame = cv.cvCreateImage(cv.cvSize(cvt_red.width,cvt_red.height),cvt_red.depth,3)
				cv.cvMerge(cvt_red,None,None,None,thresh_frame)
				self.thresh_frame_surface = pygame.image.frombuffer(cvt_red.imageData,(cvt_red.width,cvt_red.height),'RGB')

			cvpt_min = cv.cvPoint(0,0)
			cvpt_max = cv.cvPoint(0,0)
			t = cv.cvMinMaxLoc(cvt_red,cvpt_min,cvpt_max)

			print t
			if cvpt_max.x == 0 and cvpt_max.y == 0 :
				return []
			return [(cvpt_max.x,cvpt_max.y)]
Esempio n. 16
0
        variable_focal=v
    def cb_base(v):
        global variable_base
        variable_base = v/10.0
    xmatch = size.width / 2 + 1
    ymatch = size.height / 2 + 1
    highgui.cvSetMouseCallback("depthmatch - left", mousecb)
    highgui.cvCreateTrackbar("ROI", "depthmatch - left", variable_roi, size.width, cb_roi)
    highgui.cvCreateTrackbar("Buffer", "depthmatch - left", variable_buf, size.width, cb_buf)
    highgui.cvCreateTrackbar("Focal Length", "depthmatch - left", variable_focal, 1000, cb_focal)
    highgui.cvCreateTrackbar("Baseline/10", "depthmatch - left", variable_base, 1000, cb_base)
    leftdraw = cv.cvCreateImage(size, 8, 3)
    rightdraw = cv.cvCreateImage(size, 8, 3)
    while 1:
        depth = depthmatch(xmatch, ymatch, left, right, roi=variable_roi, buf=variable_buf,baseline=variable_base, focal_length=variable_focal)
        cv.cvCopy(left, leftdraw)
        cv.cvCopy(right, rightdraw)
        cv.cvLine(leftdraw, depth[1], depth[2], (0,255,0), 2)
        cv.cvPutText(leftdraw, "%2f(m) at (%2f,%2f)" % (depth[0][2],depth[0][0],depth[0][1]), (xmatch,ymatch), font, (0,0,255))
        cv.cvLine(rightdraw, depth[2], depth[2], (0,0,255), 5)
        highgui.cvShowImage("depthmatch - left", leftdraw)
        highgui.cvShowImage("depthmatch - right", rightdraw)
        print depth
        highgui.cvWaitKey(10)
        
        
if __name__ == "__main__" and test_number == 2:
    left = highgui.cvLoadImage(str(sys.argv[1]))
    right = highgui.cvLoadImage(str(sys.argv[2]))
    highgui.cvNamedWindow("Depth")
    depth = depthmatrix(left, right, 4)
Esempio n. 17
0
    def timerEvent(self, ev):
        # Fetch a frame from the video camera
        frame = highgui.cvQueryFrame(self.cap)
        img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height),
                                    cv.IPL_DEPTH_8U, frame.nChannels)
        if (frame.origin == cv.IPL_ORIGIN_TL):
            cv.cvCopy(frame, img_orig)
        else:
            cv.cvFlip(frame, img_orig, 0)

        # Create a grey frame to clarify data
        img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width, img_orig.height),
                                    8, 1)
        cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY)
        # Detect objects within the frame
        self.faces_storage = cv.cvCreateMemStorage(0)
        faces = self.detect_faces(img_grey)
        self.circles_storage = cv.cvCreateMemStorage(0)
        circles = self.detect_circles(img_grey)
        self.squares_storage = cv.cvCreateMemStorage(0)
        squares = self.detect_squares(img_grey, img_orig)
        self.lines_storage = cv.cvCreateMemStorage(0)
        lines = self.detect_lines(img_grey, img_orig)

        # Draw faces
        if faces:
            for face in faces:
                pt1, pt2 = self.face_points(face)
                cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8,
                               0)

        # Draw lines
        if lines:
            for line in lines:
                cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255, 255, 0),
                          3, 8)
        # Draw circles
        if circles:
            for circle in circles:
                cv.cvCircle(
                    img_orig,
                    cv.cvPoint(cv.cvRound(circle[0]), cv.cvRound(circle[1])),
                    cv.cvRound(circle[2]), cv.CV_RGB(0, 0, 255), 3, 8, 0)

        # Draw squares
        if squares:
            i = 0
            while i < squares.total:
                pt = []
                # read 4 vertices
                pt.append(squares[i])
                pt.append(squares[i + 1])
                pt.append(squares[i + 2])
                pt.append(squares[i + 3])
                ## draw the square as a closed polyline
                cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0, 255, 0), 3,
                              cv.CV_AA, 0)
                i += 4

        # Resize the image to display properly within the window
        #	CV_INTER_NN - nearest-neigbor interpolation,
        #	CV_INTER_LINEAR - bilinear interpolation (used by default)
        #	CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation)
        #	CV_INTER_CUBIC - bicubic interpolation.
        img_display = cv.cvCreateImage(cv.cvSize(self.width(), self.height()),
                                       8, 3)
        cv.cvResize(img_orig, img_display, cv.CV_INTER_NN)
        img_pil = adaptors.Ipl2PIL(img_display)
        s = StringIO()
        img_pil.save(s, "PNG")
        s.seek(0)
        q_img = QImage()
        q_img.loadFromData(s.read())
        bitBlt(self, 0, 0, q_img)
Esempio n. 18
0
            # no image captured... end the processing
            break

        if image is None:
            # create the images we need
            image = cv.cvCreateImage (cv.cvGetSize (frame), 8, 3)
            grey = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
            prev_grey = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
            pyramid = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
            prev_pyramid = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
            eig = cv.cvCreateImage (cv.cvGetSize (frame), cv.IPL_DEPTH_32F, 1)
            temp = cv.cvCreateImage (cv.cvGetSize (frame), cv.IPL_DEPTH_32F, 1)
            points = [[], []]

        # copy the frame, so we can draw on it
        cv.cvCopy (frame, image)

        # create a grey version of the image
        cv.cvCvtColor (image, grey, cv.CV_BGR2GRAY)

        if night_mode:
            # night mode: only display the points
            cv.cvSetZero (image)

        if need_to_init:
            # we want to search all the good points
            # create the wanted images
            
            # search the good points
            points [1] = cv.cvGoodFeaturesToTrack (
                grey, eig, temp,
Esempio n. 19
0
print "Finding the golden means in the picture"

lines = lib.findGoldenMeans(cv.cvGetSize(image))
#lines = lib.findMeans(cv.cvGetSize(image), lib.PHI)

print "Test plot and line scanner methods"
points = lineScanner.naiveBWLineScanner(edges, lines[0])

#cv.cvSmooth(out, out, cv.CV_MEDIAN, 7, 7, 0)
cv.cvSmooth(image, blurImage, cv.CV_BLUR, 3, 3, 0)
#cv.cvSmooth(out, out, cv.CV_GAUSSIAN, 7, 7, 0)
#out = blurImage

# Superimpose the edges onto the blured image
cv.cvNot(edges, edges)
cv.cvCopy(blurImage, out, edges)

# We're done with the blurred image now
#cv.cvReleaseImage(blurImage)

#print points[:0]
cut = lines[1]
margin = marginCalculator.getPixels(image, cut, 0.024)
component_dictionary = featureDetector.ribbonFloodFill(image, edges, out, cut, margin, lo, up)
#featureDetector.floodFillLine(image, out, points, cut, lo, up, {})
#flags = cv.CV_FLOODFILL_FIXED_RANGE
#flags = 4
color = lib.getRandomColor()
comp = cv.CvConnectedComp()
#cv.cvFloodFill(out, cv.cvPoint(x,y), color, cv.CV_RGB(lo,lo,lo), cv.CV_RGB(up,up,up),comp ,flags)#, None);
#lib.plot(out, cv.cvPoint(x,y), 3, lib.COL_RED)
Esempio n. 20
0
threshold2 = 70;
out = cv.cvCreateImage(cv.cvGetSize(image), 8, 3)
edgeDetector.findEdges(image, out, threshold1, threshold2)

print "Finding the golden means in the picture"

lines = lib.findMeans(cv.cvGetSize(image))

print "Test plot and line scanner methods"
points = lineScanner.naiveLineScanner(out, image, lines[0])

out = highgui.cvLoadImage (filename)
outcopy = highgui.cvLoadImage (filename)

(out,areaOfBlobs) = featureDetector.floodFillLine(out, points, lines[0], lo, up)
copy = cv.cvCopy(out,outcopy)
#print "Finding the golden means in the picture"
#lines = lib.findMeans(cv.cvGetSize(out))

#print "Drawing the means"
#lib.drawLines(lines, out)

#out = out[:,points[0].x :]

listOverSizeOfBlobs = statestik.splitOpBlob(out,outcopy,points,lines[0])
#print listOverSizeOfBlobs
#startpoint = lines[0].getPoints()[0]
#points.append(lines[0].getPoints()[1])
#for point in points:
#	out = floofill.floofill(out, lowerThres, upperThres, startpoint, point, 1)
#	startpoint = point
Esempio n. 21
0
if not image:
	print "Error loading image '%s'" % filename
	print ""
	sys.exit(-1)

print "Finding edges using Canny"
bwe = cv.cvCreateImage(cv.cvGetSize(image), 8, 1)
out = cv.cvCreateImage(cv.cvGetSize(image), 8, 3)
#edgeDetector.findEdges(image, out, threshold1, threshold2)
edgeDetector.findBWEdges(image, bwe, threshold1, threshold2)

#set if you need the image and the Edges togeter
#cv.cvNot(bwe, bwe)

cv.cvCopy(image, out, bwe)

outname = "edgeDetectorTest"
orgname = "Original"
nystr = str(str(threshold1)+'-'+str(threshold2)+'.png')
print nystr
highgui.cvNamedWindow (outname, highgui.CV_WINDOW_AUTOSIZE)
highgui.cvNamedWindow (orgname, highgui.CV_WINDOW_AUTOSIZE)
highgui.cvSaveImage(nystr, out)

while True:
	
	highgui.cvShowImage (orgname, image)
	highgui.cvShowImage (outname, out)

	c = highgui.cvWaitKey(0)
Esempio n. 22
0
threshold2 = 2.6 * 70;

# Set up the needed images
out = cv.cvCreateImage(cv.cvGetSize(image), 8, 3)
blur = cv.cvCreateImage(cv.cvGetSize(image), 8, 3)
edges = cv.cvCreateImage(cv.cvGetSize(image), 8, 1)

# Blur the image
cv.cvSmooth(image, blur, cv.CV_BLUR, 3, 3, 0)

# Find edges in the original image
edgeDetector.findBWEdges(image, edges, threshold1, threshold2)

# Superimpose the edges onto the blured image
cv.cvNot(edges, edges)
cv.cvCopy(blur, out, edges)

# Get the edges back to white
cv.cvNot(edges, edges)

print "Finding the golden means in the picture"

lines = lib.findGoldenMeans(cv.cvGetSize(image))

# Define cut
cut = lines[0]

# Set margin
margin = 15

components = featureDetector.ribbonFloodFill(image, edges, out, cut, margin, lo, up)
Esempio n. 23
0
	def timerEvent(self, ev):
		# Fetch a frame from the video camera
		frame									= highgui.cvQueryFrame(self.cap)
		img_orig								= cv.cvCreateImage(cv.cvSize(frame.width,frame.height),cv.IPL_DEPTH_8U, frame.nChannels)
		if (frame.origin == cv.IPL_ORIGIN_TL):
			cv.cvCopy(frame, img_orig)
		else:
			cv.cvFlip(frame, img_orig, 0)

		# Create a grey frame to clarify data
		img_grey								= cv.cvCreateImage(cv.cvSize(img_orig.width,img_orig.height), 8, 1)
		cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY)
		# Detect objects within the frame
		self.faces_storage						= cv.cvCreateMemStorage(0)
		faces									= self.detect_faces(img_grey)
		self.circles_storage					= cv.cvCreateMemStorage(0)
		circles									= self.detect_circles(img_grey)
		self.squares_storage					= cv.cvCreateMemStorage(0)
		squares									= self.detect_squares(img_grey, img_orig)
		self.lines_storage						= cv.cvCreateMemStorage(0)
		lines									= self.detect_lines(img_grey, img_orig)

		# Draw faces
		if faces:
			for face in faces:
				pt1, pt2						= self.face_points(face)
				cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255,0,0), 3, 8, 0)

		# Draw lines
		if lines:
			for line in lines:
				cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255,255,0), 3, 8)
		# Draw circles
		if circles:
			for circle in circles:
				cv.cvCircle(img_orig, cv.cvPoint(cv.cvRound(circle[0]),cv.cvRound(circle[1])),cv.cvRound(circle[2]),cv.CV_RGB(0,0,255),3,8,0)

		# Draw squares
		if squares:
			i									= 0
			while i<squares.total:
					pt							= []
					# read 4 vertices
					pt.append(squares[i])
					pt.append(squares[i+1])
					pt.append(squares[i+2])
					pt.append(squares[i+3])
					## draw the square as a closed polyline
					cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0,255,0), 3, cv.CV_AA, 0)
					i							+= 4

		# Resize the image to display properly within the window
		#	CV_INTER_NN - nearest-neigbor interpolation, 
		#	CV_INTER_LINEAR - bilinear interpolation (used by default) 
		#	CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation)
		#	CV_INTER_CUBIC - bicubic interpolation.
		img_display								= cv.cvCreateImage(cv.cvSize(self.width(),self.height()), 8, 3)
		cv.cvResize(img_orig, img_display, cv.CV_INTER_NN)
		img_pil									= adaptors.Ipl2PIL(img_display)
		s										= StringIO()
		img_pil.save(s, "PNG")
		s.seek(0)
		q_img									= QImage()
		q_img.loadFromData(s.read())
		bitBlt(self, 0, 0, q_img)
Esempio n. 24
0
def mask_image(image_to_mask, mask):
    ret_image = cv.cvCreateImage(cv.cvGetSize(image_to_mask), cv.IPL_DEPTH_8U,3)
    cv.cvScale(image_to_mask, ret_image, 0.5, 0.0);
    cv.cvCopy(image_to_mask, ret_image, mask)
    return ret_image
Esempio n. 25
0
        if frame is None:
            # no image captured... end the processing
            break

        if image is None:
            # create the images we need
            image = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3)
            image.origin = frame.origin
            grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
            prev_grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
            pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
            prev_pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
            points = [[], []]

        # copy the frame, so we can draw on it
        cv.cvCopy(frame, image)

        # create a grey version of the image
        cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY)

        if night_mode:
            # night mode: only display the points
            cv.cvSetZero(image)

        if need_to_init:
            # we want to search all the good points

            # create the wanted images
            eig = cv.cvCreateImage(cv.cvGetSize(grey), 32, 1)
            temp = cv.cvCreateImage(cv.cvGetSize(grey), 32, 1)
Esempio n. 26
0
def compute_saliency(image):
    global thresh
    global scale
    saliency_scale = int(math.pow(2,scale));
    bw_im1 = cv.cvCreateImage(cv.cvGetSize(image), cv.IPL_DEPTH_8U,1)
    cv.cvCvtColor(image, bw_im1, cv.CV_BGR2GRAY)
    bw_im = cv.cvCreateImage(cv.cvSize(saliency_scale,saliency_scale), cv.IPL_DEPTH_8U,1)
    cv.cvResize(bw_im1, bw_im)
    highgui.cvShowImage("BW", bw_im)
    realInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1);
    imaginaryInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1);
    complexInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 2);

    cv.cvScale(bw_im, realInput, 1.0, 0.0);
    cv.cvZero(imaginaryInput);
    cv.cvMerge(realInput, imaginaryInput, None, None, complexInput);

    dft_M = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.height - 1 );
    dft_N = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.width - 1 );

    dft_A = cv.cvCreateMat( dft_M, dft_N, cv.CV_32FC2 );
    image_Re = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    image_Im = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);

    # copy A to dft_A and pad dft_A with zeros
    tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height));
    cv.cvCopy( complexInput, tmp, None );
    if(dft_A.width > bw_im.width):
        tmp = cv.cvGetSubRect( dft_A, cv.cvRect(bw_im.width,0, dft_N - bw_im.width, bw_im.height));
        cv.cvZero( tmp );
    
    cv.cvDFT( dft_A, dft_A, cv.CV_DXT_FORWARD, complexInput.height );
    cv.cvSplit( dft_A, image_Re, image_Im, None, None );
    
    # Compute the phase angle 
    image_Mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    image_Phase = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    

    #compute the phase of the spectrum
    cv.cvCartToPolar(image_Re, image_Im, image_Mag, image_Phase, 0)

    log_mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    cv.cvLog(image_Mag, log_mag)
    #Box filter the magnitude, then take the difference
    image_Mag_Filt = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    filt = cv.cvCreateMat(3,3, cv.CV_32FC1);
    cv.cvSet(filt,cv.cvScalarAll(-1.0/9.0))
    cv.cvFilter2D(log_mag, image_Mag_Filt, filt, cv.cvPoint(-1,-1))

    cv.cvAdd(log_mag, image_Mag_Filt, log_mag, None)
    cv.cvExp(log_mag, log_mag)
    cv.cvPolarToCart(log_mag, image_Phase, image_Re, image_Im,0);

    cv.cvMerge(image_Re, image_Im, None, None, dft_A)
    cv.cvDFT( dft_A, dft_A, cv.CV_DXT_INVERSE, complexInput.height)
            
    tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height));
    cv.cvCopy( tmp, complexInput, None );
    cv.cvSplit(complexInput, realInput, imaginaryInput, None, None)
    min, max = cv.cvMinMaxLoc(realInput);
    #cv.cvScale(realInput, realInput, 1.0/(max-min), 1.0*(-min)/(max-min));
    cv.cvSmooth(realInput, realInput);
    threshold = thresh/100.0*cv.cvAvg(realInput)[0]
    cv.cvThreshold(realInput, realInput, threshold, 1.0, cv.CV_THRESH_BINARY)
    tmp_img = cv.cvCreateImage(cv.cvGetSize(bw_im1),cv.IPL_DEPTH_32F, 1)
    cv.cvResize(realInput,tmp_img)
    cv.cvScale(tmp_img, bw_im1, 255,0)
    return bw_im1
Esempio n. 27
0
    highgui.cvNamedWindow (win_name, highgui.CV_WINDOW_AUTOSIZE)

    # create the trackbar
    highgui.cvCreateTrackbar (trackbar_name, win_name, 1, 100, on_trackbar)

    # show the image
    on_trackbar (0)

    frame_copy = None
    while True:
      image = highgui.cvQueryFrame( capture );
      if( not image ):
        break;
      if( not frame_copy ):
        frame_copy = cv.cvCreateImage( cv.cvSize(image.width,image.height),
                                    cv.IPL_DEPTH_8U, image.nChannels );
      if( image.origin == cv.IPL_ORIGIN_TL ):
        cv.cvCopy( image, frame_copy );
      else:
        cv.cvFlip( image, frame_copy, 0 );
      
      gray = cv.cvCreateImage (cv.cvSize (image.width, image.height), 8, 1)
      edge = cv.cvCreateImage (cv.cvSize (image.width, image.height), 8, 1)
      cv.cvCvtColor (image, gray, cv.CV_BGR2GRAY)
      highgui.cvShowImage (win_name, col_edge)
      cv.cvFlip( image, frame_copy, 0 );
      
      if( highgui.cvWaitKey( 10 ) >= 0 ):
        break;

Esempio n. 28
0
def harrisResponse(frame):
    """pyvision/point/DetectorHarris.py
    Runs at 10.5 fps...
    """
    #gray = cv.cvCreateImage( cv.cvGetSize(image), 8, 1 )
    #corners = cv.cvCreateImage( cv.cvGetSize(image), 32, 1 )
    #cv.cvCvtColor( image, gray, cv.CV_BGR2GRAY )

    #cv.cvCornerHarris(gray,corners,15)

    # This could be done in a persistant way
    # create the images we need
    image = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3)
    grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
    prev_grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
    pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
    prev_pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
    eig = cv.cvCreateImage(cv.cvGetSize(frame), cv.IPL_DEPTH_32F, 1)
    temp = cv.cvCreateImage(cv.cvGetSize(frame), cv.IPL_DEPTH_32F, 1)
    points = [[], []]

    # copy the frame, so we can draw on it
    cv.cvCopy(frame, image)

    # create a grey version of the image
    cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY)

    # search the good points
    points[1] = cv.cvGoodFeaturesToTrack(grey, eig, temp, MAX_COUNT, quality,
                                         min_distance, None, 3, 0, 0.04)

    # refine the corner locations
    cv.cvFindCornerSubPix(
        grey, points[1], cv.cvSize(win_size, win_size), cv.cvSize(-1, -1),
        cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))

    if len(points[0]) > 0:
        # we have points, so display them

        # calculate the optical flow
        [points[1], status], something = cv.cvCalcOpticalFlowPyrLK(
            prev_grey, grey, prev_pyramid, pyramid, points[0], len(points[0]),
            (win_size, win_size), 3, len(points[0]), None,
            cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20,
                              0.03), flags)

        # initializations
        point_counter = -1
        new_points = []

        for the_point in points[1]:
            # go trough all the points

            # increment the counter
            point_counter += 1

            if add_remove_pt:
                # we have a point to add, so see if it is close to
                # another one. If yes, don't use it
                dx = pt.x - the_point.x
                dy = pt.y - the_point.y
                if dx * dx + dy * dy <= 25:
                    # too close
                    add_remove_pt = 0
                    continue

            if not status[point_counter]:
                # we will disable this point
                continue

            # this point is a correct point
            new_points.append(the_point)

            # draw the current point
            cv.cvCircle(image, cv.cvPointFrom32f(the_point), 3,
                        cv.cvScalar(0, 255, 0, 0), -1, 8, 0)

        # set back the points we keep
        points[1] = new_points

    # swapping
    prev_grey, grey = grey, prev_grey
    prev_pyramid, pyramid = pyramid, prev_pyramid
    points[0], points[1] = points[1], points[0]

    return image
Esempio n. 29
0
def harrisResponse(frame):
    """pyvision/point/DetectorHarris.py
    Runs at 10.5 fps...
    """
    #gray = cv.cvCreateImage( cv.cvGetSize(image), 8, 1 )
    #corners = cv.cvCreateImage( cv.cvGetSize(image), 32, 1 )
    #cv.cvCvtColor( image, gray, cv.CV_BGR2GRAY )

    #cv.cvCornerHarris(gray,corners,15)
    
    # This could be done in a persistant way
    # create the images we need
    image = cv.cvCreateImage (cv.cvGetSize (frame), 8, 3)
    grey = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
    prev_grey = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
    pyramid = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
    prev_pyramid = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
    eig = cv.cvCreateImage (cv.cvGetSize (frame), cv.IPL_DEPTH_32F, 1)
    temp = cv.cvCreateImage (cv.cvGetSize (frame), cv.IPL_DEPTH_32F, 1)
    points = [[], []]

    # copy the frame, so we can draw on it
    cv.cvCopy (frame, image)

    # create a grey version of the image
    cv.cvCvtColor (image, grey, cv.CV_BGR2GRAY)

        
    # search the good points
    points [1] = cv.cvGoodFeaturesToTrack (
        grey, eig, temp,
        MAX_COUNT,
        quality, min_distance, None, 3, 0, 0.04)
    
    # refine the corner locations
    cv.cvFindCornerSubPix (
        grey,
        points [1],
        cv.cvSize (win_size, win_size), cv.cvSize (-1, -1),
        cv.cvTermCriteria (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,
                           20, 0.03))
                                           
    if len (points [0]) > 0:
        # we have points, so display them

        # calculate the optical flow
        [points [1], status], something = cv.cvCalcOpticalFlowPyrLK (
            prev_grey, grey, prev_pyramid, pyramid,
            points [0], len (points [0]),
            (win_size, win_size), 3,
            len (points [0]),
            None,
            cv.cvTermCriteria (cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,
                               20, 0.03),
            flags)
        
        # initializations
        point_counter = -1
        new_points = []
        
        for the_point in points [1]:
            # go trough all the points

            # increment the counter
            point_counter += 1
            
            if add_remove_pt:
                # we have a point to add, so see if it is close to
                # another one. If yes, don't use it
                dx = pt.x - the_point.x
                dy = pt.y - the_point.y
                if dx * dx + dy * dy <= 25:
                    # too close
                    add_remove_pt = 0
                    continue

            if not status [point_counter]:
                # we will disable this point
                continue

            # this point is a correct point
            new_points.append (the_point)

            # draw the current point
            cv.cvCircle (image,
                         cv.cvPointFrom32f(the_point),
                         3, cv.cvScalar (0, 255, 0, 0),
                         -1, 8, 0)

        # set back the points we keep
        points [1] = new_points
        
    

    # swapping
    prev_grey, grey = grey, prev_grey
    prev_pyramid, pyramid = pyramid, prev_pyramid
    points [0], points [1] = points [1], points [0]


    return image