示例#1
0
 def __findContour(self, filename): #find the contour of images, and save all points in self.vKeyPoints
     self.img = highgui.cvLoadImage (filename)
     self.grayimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1)
     self.drawimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,3)
     cv.cvCvtColor (self.img, self.grayimg, cv.CV_BGR2GRAY)
     cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9)
     cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9)
     cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9)
     cv.cvThreshold( self.grayimg, self.grayimg, self.threshold, self.threshold +100, cv.CV_THRESH_BINARY )
     cv.cvZero(self.drawimg)
     storage = cv.cvCreateMemStorage(0)
     nb_contours, cont = cv.cvFindContours (self.grayimg,
         storage,
         cv.sizeof_CvContour,
         cv.CV_RETR_LIST,
         cv.CV_CHAIN_APPROX_NONE,
         cv.cvPoint (0,0))
         
     cv.cvDrawContours (self.drawimg, cont, cv.cvScalar(255,255,255,0), cv.cvScalar(255,255,255,0), 1, 1, cv.CV_AA, cv.cvPoint (0, 0))
     self.allcurve = []
     idx = 0
     for c in cont.hrange():
         PointArray = cv.cvCreateMat(1, c.total  , cv.CV_32SC2)
         PointArray2D32f= cv.cvCreateMat( 1, c.total  , cv.CV_32FC2)
         cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX))
         fpoints = []
         for i in range(c.total):
             kp = myPoint()
             kp.x = cv.cvGet2D(PointArray,0, i)[0]
             kp.y = cv.cvGet2D(PointArray,0, i)[1]
             kp.index = idx
             idx += 1
             fpoints.append(kp)
         self.allcurve.append(fpoints)
     self.curvelength = idx
示例#2
0
文件: camera.py 项目: fnv/mousetrap
    def sync(self):
        """
        Synchronizes the Capture image with the Camera image

        Arguments:
        - self: The main object pointer.
        """

        self.__camera.query_image()

        if not self.__image:
            self.__images_cn   = { 1 : cv.cvCreateImage ( self.__camera.imgSize, 8, 1 ),
                                   3 : cv.cvCreateImage ( self.__camera.imgSize, 8, 3 ),
                                   4 : cv.cvCreateImage ( self.__camera.imgSize, 8, 4 ) }

        self.__color       = "bgr"
        self.__image_orig  = self.__image = self.__camera.img

        if self.__color != self.__color_set:
            self.__image = self.color("rgb")

        # TODO: Workaround, I've to fix it
        if len(self.__camera.img_lkpoints["last"]) > 0:
            self.__camera.show_lkpoints()

        self.__camera.swap_lkpoints()

        self.show_rectangles(self.rectangles())

        self.__image = self.resize(200, 160)

        return self.async
示例#3
0
    def evalCurrentImageGradient(self):
        key = {}
        try:
            self._cnt.emit(qt.PYSIGNAL("getImage"), (key, ))
            qimage = key['image']
        except KeyError:
            return
        ##EVAL IMAGE GRADIENT
        x, y = self._cnt.focusPointSelected
        rectangleSize = self._cnt.focusRectangleSize
        im = qimage.copy(x, y, x + rectangleSize, y + rectangleSize)
        srcColorImage = qtTools.getImageOpencvFromQImage(im)

        if srcColorImage.nChannels > 1:
            srcImage = cv.cvCreateImage(
                cv.cvSize(srcColorImage.width, srcColorImage.height),
                srcColorImage.depth, 1)
            cv.cvCvtColor(srcColorImage, srcImage, cv.CV_RGB2GRAY)
        else:  # In Fact It's a grey image
            srcImage = srcColorImage

        destImage = cv.cvCreateImage(
            cv.cvSize(srcImage.width, srcImage.height), cv.IPL_DEPTH_16S, 1)
        cv.cvSobel(srcImage, destImage, 1, 0, 3)
        array = numpy.fromstring(destImage.imageData_get(), dtype=numpy.int16)
        focusQuality = array.std()
        return focusQuality
示例#4
0
def depthmatrix(leftimage, rightimage, precision=4, mask=0):
    """Returns a 3-channel 32bit floating-point distance matrix. Channels 1,2,3 = x,y,z coordinates of that point.
    Precision is the number of times to downsample mask. Downsample is the number of loops to 
    go through with successively smaller match areas. If mask is set, only pixels in the mask are set."""
    
    info = cv.cvGetSize(leftimage)
    width = info.width
    height = info.height
    precision_pixels = (2**precision)
    downsampled_size = cv.cvSize(width/precision_pixels, height/precision_pixels)
    print "Precision of", downsampled_size.width, downsampled_size.height, "px"
    if mask:
        downsampled_mask = cv.cvCreateImage(downsampled_size, 8, 1)
        cv.cvResize(mask, downsampled_mask)
    matx = cv.cvCreateImage(downsampled_size, 8, 1)
    maty = cv.cvCreateImage(downsampled_size, 8, 1)
    matz = cv.cvCreateImage(downsampled_size, 8, 1)
    for i in xrange(width/precision_pixels):
        for j in xrange(height/precision_pixels):
            if mask:
                if (not cv.cvGetReal2D(downsampled_mask, j, i)):
                    continue
            x = i*precision
            y = j*precision
            depth = depthmatch(x+precision_pixels/2, y+precision_pixels/2, leftimage, rightimage, roi=precision_pixels, buf=precision_pixels*2)
            #print i, j
            # fill in result matrix if mask wasn't 0 at this point (X,Y,Z)
            cv.cvSetReal2D(matx, j, i, int(depth[0][0]))
            cv.cvSetReal2D(maty, j, i, int(depth[0][1]))
            cv.cvSetReal2D(matz, j, i, int(depth[0][2]))
    return matz
示例#5
0
def analyzeImage(original):
	scaleImage = cv.cvCreateImage(cv.cvSize(int(original.width*scale), int(original.height*scale)), 8, 3)
	cv.cvResize(original, scaleImage)

	# Create 1-channel image for the egdes
	edgeImage = cv.cvCreateImage(cv.cvGetSize(scaleImage), 8, 1)

	# Retrieve edges
	edgeDetector.findBWEdges(scaleImage, edgeImage, edgeThreshold1, edgeThreshold2)

	# Get cuts
	cuts = lib.findGoldenMeans(cv.cvGetSize(scaleImage))

	# Run along
	allComponents = []
	for cut in cuts:
		cutComponents = analyzeCut(scaleImage, edgeImage, cut)
		allComponents.append(cutComponents)

	# Get the collected component_dictionaries
	for dict in allComponents:
		lib.drawBoundingBoxes(original, dict, scale)

	# Draw the margins
	for cut in cuts:
		lib.drawMargin(original, cut, margin, scale)
		#include if super margen is need to drawn
		#lib.drawMargin(original, cut, superMargin, scale)

	return (original, allComponents)
示例#6
0
def findEdges(original, out, threshold1 = 100, threshold2 = None):
	"""Return a new edge detected image with a specified threshold"""
	warnings.warn("Use findBWEdges instead unless you really need colored edges.", DeprecationWarning)

	#Define threshold2
	if threshold2 == None:
		threshold2 = threshold1 * 3

	# Create two pictures with only one channel for a b/w copy
	# and one for storring the edges found in the b/w picture
	gray = cv.cvCreateImage(cv.cvGetSize(original), 8, 1)
	edge = cv.cvCreateImage(cv.cvGetSize(original), 8, 1)

	# Create the b/w copy of the original
	cv.cvCvtColor(original, gray, cv.CV_BGR2GRAY)

	# Blur the b/w copy, but put the result into edge pic
	cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0)

	# Negate the b/w copy of original with newly blurred
	# b/w copy. This will make egdes stand out
	cv.cvNot(gray, edge)

	# Run an edge-finding algorithm called 'Canny'
	# It will analyse the first argument and store the
	# resulting picture in the second argument
	cv.cvCanny(gray, edge, threshold1, threshold2)

	# We initialize our out-image to black
	cv.cvSetZero(out)

	# Finally, we use the found edges, which are b/w, as
	# a mask for copying the colored edges from the original
	# to the out-image
	cv.cvCopy(original, out, edge)
示例#7
0
def analyzeCut(scaleImage, edgeImage, cut):
	"""Extract the interesting features respecting the cut"""

	# Set up constraints
	constraints = regionSelector.Constraints(cv.cvGetSize(scaleImage), cut, margin, superMargin, 0.002, 0.25)

	# Create temporary images
	blurImage = cv.cvCreateImage(cv.cvGetSize(scaleImage), 8, 3)
	workImage = cv.cvCreateImage(cv.cvGetSize(scaleImage), 8, 3)

	# Create a blurred copy of the original
	cv.cvSmooth(scaleImage, blurImage, cv.CV_BLUR, 3, 3, 0)

	# Superimpose the edges onto the blured image
	cv.cvNot(edgeImage, edgeImage)
	cv.cvCopy(blurImage, workImage, edgeImage)

	# Get the edges back to white
	cv.cvNot(edgeImage, edgeImage)

	# We're done with the blurred image now
	cv.cvReleaseImage(blurImage)

	# Retrive the regions touching the cut
	component_dictionary = featureDetector.ribbonFloodFill(scaleImage, edgeImage, workImage, cut, margin, lo, up)

	# Clean up
	cv.cvReleaseImage(workImage)

	# Prune components
	newComponents = regionSelector.pruneRegions(component_dictionary, constraints)

	# Return the dictionary of accepted components
	#transformer.translateBoundingBoxes(newComponents, 1)
	return newComponents
示例#8
0
    def detect_lines(self, img_grey, img_orig):
        """ Detect lines within the image. To switch between standard and
			probabilistic Hough transform, use cv.CV_HOUGH_STANDARD, or
			cv.CV_HOUGH_PROBABILISTIC.
		"""
        # Set transform method ('standard','probabilistic')
        transform_method = 'probabilistic'

        # Clear out our storage
        cv.cvClearMemStorage(self.lines_storage)
        sz = cv.cvSize(img_grey.width & -2, img_grey.height & -2)
        img_dst_color = cv.cvCreateImage(cv.cvGetSize(img_orig), 8, 3)
        tgrey = cv.cvCreateImage(sz, 8, 1)

        cv.cvCanny(tgrey, img_grey, 50, 200, 3)
        if transform_method == 'standard':
            lines = cv.cvHoughLines2(img_grey, self.lines_storage,
                                     cv.CV_HOUGH_STANDARD, 1, cv.CV_PI / 180,
                                     100, 0, 0)
        else:
            lines = cv.cvHoughLines2(img_grey, self.lines_storage,
                                     cv.CV_HOUGH_PROBABILISTIC, 1,
                                     cv.CV_PI / 180, 50, 50, 10)

        return lines
示例#9
0
	def _cv_to_pygame(self,frame,channel=-1) :

		# scale the image to size of the window
		cvt_scale = cv.cvCreateImage(cv.cvSize(self.image_dims[0],self.image_dims[1]),frame.depth,frame.nChannels)
		#cv.cvResize(frame,cvt_scale,cv.CV_INTER_LINEAR)
		cv.cvResize(frame,cvt_scale,cv.CV_INTER_NN)

		# need to convert the colorspace differently depending on where the image came from
		cvt_color = cv.cvCreateImage(cv.cvSize(cvt_scale.width,cvt_scale.height),cvt_scale.depth,3)
		if frame.nChannels == 3 :
			# frame is in BGR format, convert it to RGB so the sky isn't orange
			cv.cvCvtColor(cvt_scale,cvt_color,cv.CV_BGR2RGB)
		elif frame.nChannels == 1 : # image has only one channel, iow 1 color
			if channel == 0 :
				cv.cvMerge(frame,None,None,None,cvt_color)
			elif channel == 1 :
				cv.cvMerge(None,frame,None,None,cvt_color)
			elif channel == 2 :
				cv.cvMerge(None,None,frame,None,cvt_color)
			elif channel == 3 :
				cv.cvMerge(None,None,None,frame,cvt_color)
			else :
				cv.cvCvtColor(cvt_scale,cvt_color,cv.CV_GRAY2RGB)

		# create a pygame surface
		frame_surface=pygame.image.frombuffer(cvt_color.imageData,self.image_dims,'RGB')

		return frame_surface
示例#10
0
	def detect_lines(self, img_grey, img_orig):
		""" Detect lines within the image. To switch between standard and
			probabilistic Hough transform, use cv.CV_HOUGH_STANDARD, or
			cv.CV_HOUGH_PROBABILISTIC.
		"""
		# Set transform method ('standard','probabilistic')
		transform_method						= 'probabilistic'

		# Clear out our storage
		cv.cvClearMemStorage(self.lines_storage)
		sz										= cv.cvSize(img_grey.width & -2, img_grey.height & -2)
		img_dst_color							= cv.cvCreateImage(cv.cvGetSize(img_orig), 8, 3)
		tgrey									= cv.cvCreateImage(sz, 8, 1)

		cv.cvCanny(tgrey, img_grey, 50, 200, 3)
		if transform_method == 'standard':
			lines								= cv.cvHoughLines2(img_grey,
																	self.lines_storage,
																	cv.CV_HOUGH_STANDARD,
																	1,
																	cv.CV_PI/180,
																	100,
																	0,
																	0)
		else:
			lines								= cv.cvHoughLines2(img_grey,
																	self.lines_storage,
																	cv.CV_HOUGH_PROBABILISTIC,
																	1,
																	cv.CV_PI/180,
																	50,
																	50,
																	10)

		return lines
示例#11
0
文件: chroma.py 项目: bmiro/vpc
def pixelInRange(src, rmin, rmax, floor, roof, dst):
    if rmax > rmin: # normal case
        cvInRangeS(src, rmin, rmax, dst)
    else: # considering range as a cycle
        dst0 = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
        dst1 = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
        cvInRangeS(src, floor, rmax, dst0)
        cvInRangeS(src, rmin, roof, dst1)
        cvOr(dst0, dst1, dst)
示例#12
0
文件: chroma.py 项目: bmiro/vpc
def getFilter(frameWidht, frameHeight):    
    cvNamedWindow("Filtred")
    
    cvCreateTrackbar("hmax", "Filtred", getHlsFilter('hmax'), 180, trackBarChangeHmax)
    cvCreateTrackbar("hmin", "Filtred", getHlsFilter('hmin'), 180, trackBarChangeHmin)
    #cvCreateTrackbar("lmax", "Filtred", hlsFilter['lmax'], 255, trackBarChangeLmax)
    #cvCreateTrackbar("lmin", "Filtred", hlsFilter['lmin'], 255, trackBarChangeLmin)
    cvCreateTrackbar("smax", "Filtred", getHlsFilter('smax'), 255, trackBarChangeSmax)
    cvCreateTrackbar("smin", "Filtred", getHlsFilter('smin'), 255, trackBarChangeSmin)

    cvSetMouseCallback("Filtred", mouseClick, None)
    
    frame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    hlsFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    filtredFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)

    mask = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)

    hFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    lFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    sFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    ThHFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThLFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThSFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    key = -1
    while key == -1: 
        if not cvGrabFrame(CAM):
            print "Could not grab a frame"
            exit
        frame = cvQueryFrame(CAM)
        
        cvCvtColor(frame, hlsFrame, CV_BGR2HLS)
    
        cvSplit(hlsFrame, hFrame, lFrame, sFrame, None)
        
        pixelInRange(hFrame, getHlsFilter('hmin'), getHlsFilter('hmax'), 0, 180, ThHFrame) 
        #pixelInRange(lFrame, getHlsFilter('lmin'), getHlsFilter('lmax'), 0, 255, ThLFrame)
        pixelInRange(sFrame, getHlsFilter('smin'), getHlsFilter('smax'), 0, 255, ThSFrame)
        
        cvSetZero(mask)        
        cvAnd(ThHFrame, ThSFrame, mask)
        
        cvSetZero(filtredFrame)
        
        cvCopy(frame, filtredFrame, mask)
        
        cvShowImage("Filtred", filtredFrame)

        key = cvWaitKey(10)
        if key == 'r':
            key = -1
            resetHlsFilter()
            
    cvDestroyWindow("Filtred")    
示例#13
0
 def __FindHarris(self, filename): #find the corners of images, and save all corner points in self.vKeyPoints
     self.img = highgui.cvLoadImage (filename)
     greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1)
     w = cv.cvGetSize(self.img).width
     h = cv.cvGetSize(self.img).height
     
     image = cv.cvCreateImage(cv.cvGetSize(self.img), cv.IPL_DEPTH_32F, 1)
     cv.cvConvert(image, greyimg)
     self.cornerimg = cv.cvCreateImage(cv.cvGetSize(self.img), cv.IPL_DEPTH_32F, 1)
     cv.cvCornerHarris(image, self.cornerimg, 11,5,0.1)
示例#14
0
def _detect(image):
    """ Detects faces on `image`
    Parameters:
        @image: image file path

    Returns:
        [((x1, y1), (x2, y2)), ...] List of coordenates for top-left
                                    and bottom-right corner
    """
    # the OpenCV API says this function is obsolete, but we can't
    # cast the output of cvLoad to a HaarClassifierCascade, so use
    # this anyways the size parameter is ignored
    capture = cvCreateFileCapture(image) 

    if not capture:
        return []

    frame = cvQueryFrame(capture)
    if not frame:
        return []

    img = cvCreateImage(cvSize(frame.width, frame.height),
                        IPL_DEPTH_8U, frame.nChannels)
    cvCopy(frame, img)

    # allocate temporary images
    gray          = cvCreateImage((img.width, img.height),
                                  COPY_DEPTH, COPY_CHANNELS)
    width, height = (cvRound(img.width / IMAGE_SCALE),
                     cvRound(img.height / IMAGE_SCALE))
    small_img     = cvCreateImage((width, height), COPY_DEPTH, COPY_CHANNELS)

    # convert color input image to grayscale
    cvCvtColor(img, gray, CV_BGR2GRAY)

    # scale input image for faster processing
    cvResize(gray, small_img, CV_INTER_LINEAR)
    cvEqualizeHist(small_img, small_img)
    cvClearMemStorage(STORAGE)

    coords = []
    for haar_file in CASCADES:
        cascade = cvLoadHaarClassifierCascade(haar_file, cvSize(1, 1))
        if cascade:
            faces = cvHaarDetectObjects(small_img, cascade, STORAGE, HAAR_SCALE,
                                        MIN_NEIGHBORS, HAAR_FLAGS, MIN_SIZE) or []
            for face_rect in faces:
                # the input to cvHaarDetectObjects was resized, so scale the 
                # bounding box of each face and convert it to two CvPoints
                x, y = face_rect.x, face_rect.y
                pt1 = (int(x * IMAGE_SCALE), int(y * IMAGE_SCALE))
                pt2 = (int((x + face_rect.width) * IMAGE_SCALE),
                       int((y + face_rect.height) * IMAGE_SCALE))
                coords.append((pt1, pt2))
    return coords
示例#15
0
def _detect(image):
    """ Detects faces on `image`
    Parameters:
        @image: image file path

    Returns:
        [((x1, y1), (x2, y2)), ...] List of coordenates for top-left
                                    and bottom-right corner
    """
    # the OpenCV API says this function is obsolete, but we can't
    # cast the output of cvLoad to a HaarClassifierCascade, so use
    # this anyways the size parameter is ignored
    capture = cvCreateFileCapture(image)

    if not capture:
        return []

    frame = cvQueryFrame(capture)
    if not frame:
        return []

    img = cvCreateImage(cvSize(frame.width, frame.height), IPL_DEPTH_8U,
                        frame.nChannels)
    cvCopy(frame, img)

    # allocate temporary images
    gray = cvCreateImage((img.width, img.height), COPY_DEPTH, COPY_CHANNELS)
    width, height = (cvRound(img.width / IMAGE_SCALE),
                     cvRound(img.height / IMAGE_SCALE))
    small_img = cvCreateImage((width, height), COPY_DEPTH, COPY_CHANNELS)

    # convert color input image to grayscale
    cvCvtColor(img, gray, CV_BGR2GRAY)

    # scale input image for faster processing
    cvResize(gray, small_img, CV_INTER_LINEAR)
    cvEqualizeHist(small_img, small_img)
    cvClearMemStorage(STORAGE)

    coords = []
    for haar_file in CASCADES:
        cascade = cvLoadHaarClassifierCascade(haar_file, cvSize(1, 1))
        if cascade:
            faces = cvHaarDetectObjects(small_img, cascade, STORAGE,
                                        HAAR_SCALE, MIN_NEIGHBORS, HAAR_FLAGS,
                                        MIN_SIZE) or []
            for face_rect in faces:
                # the input to cvHaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                x, y = face_rect.x, face_rect.y
                pt1 = (int(x * IMAGE_SCALE), int(y * IMAGE_SCALE))
                pt2 = (int((x + face_rect.width) * IMAGE_SCALE),
                       int((y + face_rect.height) * IMAGE_SCALE))
                coords.append((pt1, pt2))
    return coords
示例#16
0
def get_nearest_feature( image, this_point, n=2000 ):
	"""
	Get the n-nearest features to a specified image coordinate.
	Features are determined using cvGoodFeaturesToTrack.
	"""

	_red = cv.cvScalar (0, 0, 255, 0);
	_green = cv.cvScalar (0, 255, 0, 0);
	_blue = cv.cvScalar (255,0,0,0);
	_white = cv.cvRealScalar (255)
	_black = cv.cvRealScalar (0)

	quality = 0.01
	min_distance = 4
	N_best = n
	win_size = 11

	grey = cv.cvCreateImage (cv.cvGetSize (image), 8, 1)
	eig = cv.cvCreateImage (cv.cvGetSize (image), 32, 1)
	temp = cv.cvCreateImage (cv.cvGetSize (image), 32, 1)

	# create a grey version of the image
	cv.cvCvtColor ( image, grey, cv.CV_BGR2GRAY)

	points = cv.cvGoodFeaturesToTrack ( 
		grey, eig, temp,
		N_best,
		quality, min_distance, None, 3, 0, 0.04)

	# refine the corner locations
	better_points = cv.cvFindCornerSubPix (
		grey,
		points,
		cv.cvSize (win_size, win_size), cv.cvSize (-1, -1),
		cv.cvTermCriteria (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,
						   20, 0.03))

	eigs = []
	for i in range(len(points)):
		eigs.append(cv.cvGetMat(eig)[int(points[i].y)][int(points[i].x)])

	mypoints = np.matrix(np.zeros((len(points)*2),dtype=float)).reshape(len(points),2)
	dists = []
	for i,point in enumerate(points):
		mypoints[i,0]=point.x
		mypoints[i,1]=point.y
		dists.append( np.linalg.norm(mypoints[i,:]-this_point) )

	dists = np.array(dists)
	sorteddists = dists.argsort()

	cv.cvDrawCircle ( image, points[ sorteddists[0] ], 5, _green, 2, 8, 0 )

	return better_points[ sorteddists[0] ]
示例#17
0
    def texture_features(self, block_size=5, filter_size=3):
        """
        Calculates the texture features associated with the image.
        block_size gives the size of the texture neighborhood to be processed
        filter_size gives the size of the Sobel operator used to find gradient information
        """
        #block_size = cv.cvSize(block_size, block_size)

        #convert to grayscale float
        channels = 1
        self.gray_image = cv.cvCreateImage(
            cv.cvSize(self.im_width, self.im_height),
            cv.IPL_DEPTH_8U,  #cv.IPL_DEPTH_16U, #cv.IPL_DEPTH_32F,
            channels)

        #cv.CV_32FC1, #cv.IPL_DEPTH_32F, #cv.IPL_DEPTH_8U, #cv.IPL_DEPTH_16U,
        channels = 1
        eig_tex = cv.cvCreateImage(
            cv.cvSize(self.im_width * 6, self.im_height), cv.IPL_DEPTH_32F,
            channels)

        cv.cvCvtColor(self.image, self.gray_image, cv.CV_BGR2GRAY)

        #cv.cvAdd(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL );

        #highgui.cvConvertImage(self.image, self.gray_image)

        cv.cvCornerEigenValsAndVecs(
            self.gray_image,
            eig_tex,  #CvArr* eigenvv,
            block_size,
            filter_size)

        eig_tex = ut.cv2np(eig_tex)
        eig_tex = np.reshape(eig_tex, [self.im_height, self.im_width, 6])
        #print eig_tex.shape ## [480,640,3]
        ## (l1, l2, x1, y1, x2, y2), where
        ## l1, l2 - eigenvalues of M; not sorted
        ## (x1, y1) - eigenvector corresponding to l1
        ## (x2, y2) - eigenvector corresponding to l2
        tex_feat = np.zeros([3, self.im_height * self.im_width],
                            dtype=np.float32)
        tmp = np.reshape(eig_tex, [self.im_height * self.im_width, 6]).T
        s = tmp[0] > tmp[1]
        tex_feat[1:3, s] = tmp[0, s] * tmp[2:4, s]
        tex_feat[0, s] = tmp[1, s]
        tex_feat[1:3, -s] = tmp[1, -s] * tmp[4:6, -s]
        tex_feat[0, -s] = tmp[0, -s]

        self.tex_feat = tex_feat.T
        self.tex_image = np.reshape(self.tex_feat,
                                    [self.im_height, self.im_width, 3])
示例#18
0
def harrisResponse(image):
    """pyvision/point/DetectorHarris.py
    Runs at 10.5 fps...
    """
    gray = cv.cvCreateImage( cv.cvGetSize(image), 8, 1 )
    corners = cv.cvCreateImage( cv.cvGetSize(image), 32, 1 )
    cv.cvCvtColor( image, gray, cv.CV_BGR2GRAY )

    cv.cvCornerHarris(gray,corners,3)
    
    image = filter_and_render_cv(image,corners)
    #IPShellEmbed()()
    return image
示例#19
0
	def read(self) :
		frame=self.input.read()
		if self.debug :
			raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels)
			cv.cvCopy(frame,raw_frame,None)
			self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB')

		if self.enabled :
			cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)

			# convert color
			cv.cvCvtColor(frame,cv_rs,cv.CV_BGR2GRAY)

			# invert the image
			cv.cvSubRS(cv_rs, 255, cv_rs, None);

			# threshold the image
			frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)
			cv.cvThreshold(cv_rs, frame, self.threshold, 255, cv.CV_THRESH_BINARY)

			if self.debug :
				thresh_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,3)
				cv.cvCvtColor(frame,thresh_frame,cv.CV_GRAY2RGB)
				self.thresh_frame_surface=pygame.image.frombuffer(thresh_frame.imageData,(frame.width,frame.height),'RGB')

			# I think these functions are too specialized for transforms
			cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 )
			cv.cvErode(frame, frame, None, 1)
			cv.cvDilate(frame, frame, None, 1)

			num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0))
			if contours is None :
				return []
			else :
				contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 );
				if contours is None :
					return []
				else :
					final_contours = []
					for c in contours.hrange() :
						area = abs(cv.cvContourArea(c))
						#self.debug_print('Polygon Area: %f'%area)
						if area >= self.min_area :
							lst = []
							for pt in c :
								lst.append((pt.x,pt.y))
							final_contours.append(lst)
						contours = contours.h_next
					return final_contours

		return []
示例#20
0
文件: chroma.py 项目: bmiro/vpc
def getBackground(frameWidht, frameHeight):
    cvNamedWindow("Background")
    
    text = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    frame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    background = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)

    font = cvInitFont(CV_FONT_HERSHEY_COMPLEX, 1.0, 1.0, 0.0, 2)
    pt1 = cvPoint(50, 100)
    pt2 = cvPoint(50, 150)
    center = cvPoint(frameWidth/2, frameHeight/2)
    cvPutText(text, "Press enter, run away and wait", pt1, font, CV_RGB(150, 100, 150))
    cvPutText(text, str(delayS) + " seconds to capture background", pt2, font, CV_RGB(150, 100, 150))
    cvShowImage("Background", text)
        
    key = -1
    while key == -1:
        key = cvWaitKey(10)    
        
    like = False
    while not like:
        for i in range(delayS):
            cvZero(text)
            cvPutText(text, str(delayS-i), center, font, CV_RGB(150, 100, 150))
            cvShowImage("Background", text)
            cvWaitKey(1000)
    
        csut = camStartUpTime
        while (csut): # Stats capturing frames in order to give time to the cam to auto-adjust colors
            if not cvGrabFrame(CAM):
                print "Could not grab a frame"
                exit
            cvWaitKey(10)
            csut -= 1
        frame = cvQueryFrame(CAM)
        cvCopy(frame, background)
        
        cvCopy(frame, text)
        cvPutText(text, "Is correct? [y/n]", center, font, CV_RGB(150, 100, 150))

        cvShowImage("Background", text)
        
        key = -1
        while key != 'n' and key != 'y':
            key = cvWaitKey(10)
            if key == 'y': 
                like = True
                
    return background        
    cvDestroyWindow("Background")
示例#21
0
def main(): # ctrl+c to end
    global h,s,v,h2,v2,s2,d,e
    highgui.cvNamedWindow("Camera 1", 1)
    highgui.cvNamedWindow("Orig", 1)
    highgui.cvCreateTrackbar("H", "Camera 1", h, 256, tb_h)
    highgui.cvCreateTrackbar("S", "Camera 1", s, 256, tb_s)
    highgui.cvCreateTrackbar("V", "Camera 1", v, 256, tb_v)
    highgui.cvCreateTrackbar("H2", "Camera 1", h2, 256, tb_h2)
    highgui.cvCreateTrackbar("S2", "Camera 1", s2, 256, tb_s2)
    highgui.cvCreateTrackbar("V2", "Camera 1", v2, 256, tb_v2)
    highgui.cvCreateTrackbar("Dilate", "Camera 1", d, 30, tb_d)
    highgui.cvCreateTrackbar("Erode", "Camera 1", e, 30, tb_e)
    
    cap = highgui.cvCreateCameraCapture(1)
    highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_WIDTH, IMGW)
    highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_HEIGHT, IMGH)
    c = 0
    t1 = tdraw = time.clock()
    t = 1
    font = cv.cvInitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1)
    while c != 0x27:
        image = highgui.cvQueryFrame(cap)
        if not image:
            print "capture failed"
            break
            
        thresh = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,1)
        cv.cvSetZero(thresh)
        cv.cvCvtColor(image,image,cv.CV_RGB2HSV)
        cv.cvInRangeS(image, (h,s,v,0), (h2,s2,v2,0), thresh)
        result = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,3)
        cv.cvSetZero(result)
        
        cv.cvOr(image,image,result,thresh)
        for i in range(1,e):
            cv.cvErode(result,result)
        for i in range(1,d):
            cv.cvDilate(result,result)
            
        # floodfill objects back in, allowing threshold differences outwards
        
        t2 = time.clock()
        if t2 > tdraw+0.3:
            t = t2-t1
            tdraw=t2
        cv.cvPutText(result, "FPS: " + str(1 / (t)), (0,25), font, (255,255,255))
        t1 = t2
        highgui.cvShowImage("Orig", image)
        highgui.cvShowImage("Camera 1", result)
        c = highgui.cvWaitKey(10)
示例#22
0
def get_nearest_feature(image, this_point, n=2000):
    """
	Get the n-nearest features to a specified image coordinate.
	Features are determined using cvGoodFeaturesToTrack.
	"""

    _red = cv.cvScalar(0, 0, 255, 0)
    _green = cv.cvScalar(0, 255, 0, 0)
    _blue = cv.cvScalar(255, 0, 0, 0)
    _white = cv.cvRealScalar(255)
    _black = cv.cvRealScalar(0)

    quality = 0.01
    min_distance = 4
    N_best = n
    win_size = 11

    grey = cv.cvCreateImage(cv.cvGetSize(image), 8, 1)
    eig = cv.cvCreateImage(cv.cvGetSize(image), 32, 1)
    temp = cv.cvCreateImage(cv.cvGetSize(image), 32, 1)

    # create a grey version of the image
    cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY)

    points = cv.cvGoodFeaturesToTrack(grey, eig, temp, N_best, quality,
                                      min_distance, None, 3, 0, 0.04)

    # refine the corner locations
    better_points = cv.cvFindCornerSubPix(
        grey, points, cv.cvSize(win_size, win_size), cv.cvSize(-1, -1),
        cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))

    eigs = []
    for i in range(len(points)):
        eigs.append(cv.cvGetMat(eig)[int(points[i].y)][int(points[i].x)])

    mypoints = np.matrix(np.zeros((len(points) * 2),
                                  dtype=float)).reshape(len(points), 2)
    dists = []
    for i, point in enumerate(points):
        mypoints[i, 0] = point.x
        mypoints[i, 1] = point.y
        dists.append(np.linalg.norm(mypoints[i, :] - this_point))

    dists = np.array(dists)
    sorteddists = dists.argsort()

    cv.cvDrawCircle(image, points[sorteddists[0]], 5, _green, 2, 8, 0)

    return better_points[sorteddists[0]]
示例#23
0
 def __FindCorner(self, filename): #find the corners of images, and save all corner points in self.vKeyPoints
     self.img = highgui.cvLoadImage (filename)
     greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1)
     hsvimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3)
     cv.cvCvtColor(self.img, hsvimg, cv.CV_RGB2HSV)
     cv.cvCvtColor (hsvimg, greyimg, cv.CV_BGR2GRAY)
     
     eigImage = cv.cvCreateImage(cv.cvGetSize(greyimg), cv.IPL_DEPTH_32F, 1)
     tempImage = cv.cvCreateImage(cv.cvGetSize(greyimg), cv.IPL_DEPTH_32F, 1)
     self.points = cv.cvGoodFeaturesToTrack(greyimg, eigImage,tempImage, 2000, 0.01, 5, None, 3,0,0.01 )
     self.points2 = cv.cvFindCornerSubPix(greyimg, self.points,cv.cvSize(20, 20), 
                                          cv.cvSize(-1, -1), cv.cvTermCriteria(cv.CV_TERMCRIT_ITER |cv.CV_TERMCRIT_EPS, 20, 0.03))
     cv.cvReleaseImage(eigImage)
     cv.cvReleaseImage(tempImage)
示例#24
0
def analyzeCut(original, edgeImage, cut, settings, showBlobs=False):
	"""Extract the interesting features in the vicinity of a given cut"""
	# Get all data from the settings
	lo = settings.lo
	up = settings.up

	# Set up the margin with respect to the cut
	margin = marginCalculator.getPixels(original, cut, settings.marginPercentage)
	superMargin = 0
	# ^^ We don't use superMargin

	# Set up constraints
	constraints = regionSelector.Constraints(cv.cvGetSize(original), cut, margin, superMargin, 0.002, 0.25)

	# Create temporary images
	blurImage = cv.cvCreateImage(cv.cvGetSize(original), 8, 3)
	workImage = cv.cvCreateImage(cv.cvGetSize(original), 8, 3)

	# Create a blurred copy of the original
	cv.cvSmooth(original, blurImage, cv.CV_BLUR, 3, 3, 0)

	# Superimpose the edges onto the blured image
	cv.cvNot(edgeImage, edgeImage)
	cv.cvCopy(blurImage, workImage, edgeImage)

	# We're done with the blurred image now
	cv.cvReleaseImage(blurImage)

	# Get the edges back to white
	cv.cvNot(edgeImage, edgeImage)

	# Retrive the regions touching the cut
	component_dictionary = featureDetector.ribbonFloodFill(original, edgeImage, workImage, cut, margin, lo, up)

	#start expanded

	# Prune components BEFORE we delete the workImage
	tmpnewComponents = regionSelector.pruneExpandedRegions(component_dictionary, constraints)
	newComponents = regionSelector.pruneExpandedRagionsto(tmpnewComponents, constraints, cut, workImage)

	# Clean up only if we do not return the image
	if not showBlobs:
		cv.cvReleaseImage(workImage)

	# Return the dictionary of accepted components or both
	if not showBlobs:
		return newComponents
	else:
		return (workImage, newComponents)
示例#25
0
    def texture_features(self, block_size=5, filter_size=3):
        """
        Calculates the texture features associated with the image.
        block_size gives the size of the texture neighborhood to be processed
        filter_size gives the size of the Sobel operator used to find gradient information
        """
        #block_size = cv.cvSize(block_size, block_size)

        #convert to grayscale float
        channels = 1
        self.gray_image = cv.cvCreateImage(cv.cvSize(self.im_width, self.im_height),
                                           cv.IPL_DEPTH_8U, #cv.IPL_DEPTH_16U, #cv.IPL_DEPTH_32F,
                                           channels)


        #cv.CV_32FC1, #cv.IPL_DEPTH_32F, #cv.IPL_DEPTH_8U, #cv.IPL_DEPTH_16U, 
        channels = 1
        eig_tex = cv.cvCreateImage(cv.cvSize(self.im_width*6, self.im_height),
                                    cv.IPL_DEPTH_32F, 
                                    channels)


        cv.cvCvtColor(self.image, self.gray_image, cv.CV_BGR2GRAY);

        #cv.cvAdd(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL );
        
        #highgui.cvConvertImage(self.image, self.gray_image)
        
        cv.cvCornerEigenValsAndVecs(self.gray_image, eig_tex,#CvArr* eigenvv,
                                    block_size, filter_size)

        eig_tex = ut.cv2np(eig_tex)
        eig_tex = np.reshape(eig_tex, [self.im_height, self.im_width, 6])
        #print eig_tex.shape ## [480,640,3]
        ## (l1, l2, x1, y1, x2, y2), where
        ## l1, l2 - eigenvalues of M; not sorted
        ## (x1, y1) - eigenvector corresponding to l1
        ## (x2, y2) - eigenvector corresponding to l2
        tex_feat = np.zeros([3, self.im_height * self.im_width], dtype=np.float32)
        tmp = np.reshape(eig_tex, [self.im_height * self.im_width, 6]).T
        s = tmp[0] > tmp[1]
        tex_feat[1:3, s] = tmp[0, s] * tmp[2:4, s]
        tex_feat[0, s] = tmp[1, s]
        tex_feat[1:3, -s] = tmp[1, -s] * tmp[4:6, -s]
        tex_feat[0, -s] = tmp[0, -s]
        
        self.tex_feat = tex_feat.T
        self.tex_image = np.reshape(self.tex_feat, [self.im_height, self.im_width, 3])
示例#26
0
	def _get_cv_frame(self):
		frame = CameraInputProvider.get_frame(self)

		dst = cv.cvCreateImage(cv.cvSize(self.capture_dims[0],self.capture_dims[1]),frame.depth,frame.nChannels)
		cv.cvWarpPerspective( frame, dst, self.matrix)

		return dst
示例#27
0
def on_trackbar (position):

    # create the image for putting in it the founded contours
    contours_image = cv.cvCreateImage (cv.cvSize (_SIZE, _SIZE), 8, 3)

    # compute the real level of display, given the current position
    levels = position - 3

    # initialisation
    _contours = contours
    
    if levels <= 0:
        # zero or negative value
        # => get to the nearest face to make it look more funny
        _contours = contours.h_next.h_next.h_next
        
    # first, clear the image where we will draw contours
    cv.cvSetZero (contours_image)
    
    # draw contours in red and green
    cv.cvDrawContours (contours_image, _contours,
                       _red, _green,
                       levels, 3, cv.CV_AA,
                       cv.cvPoint (0, 0))

    # finally, show the image
    highgui.cvShowImage ("contours", contours_image)
示例#28
0
 def __normImage(self, img, length):
     #print "Generating norm image..."
     width = length
     height = length
     gray = cv.cvCreateImage(cv.cvSize(img.width,img.height), 8, 1);
     small_img = cv.cvCreateImage(cv.cvSize(cv.cvRound(width),
                                        cv.cvRound(height)), 8, 1 );
 
     # convert color input image to grayscale
     cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY);
     # scale input image for faster processing
     cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR);
     cv.cvEqualizeHist(small_img, small_img);
     #cvClearMemStorage(self.storage);
     norm_image = small_img # save the 'normalized image'
     return norm_image
def detectObject(image):
  grayscale = cv.cvCreateImage(size, 8, 1)
  cv.cvFlip(image, None, 1)
  cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)
  storage = cv.cvCreateMemStorage(0)
  cv.cvClearMemStorage(storage)
  cv.cvEqualizeHist(grayscale, grayscale)
  cascade = cv.cvLoadHaarClassifierCascade(haar_file, cv.cvSize(1,1))
  objects = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, 
                                   cv.CV_HAAR_DO_CANNY_PRUNING,
                                   cv.cvSize(100,100))

  # Draw dots where hands are
  if objects:
    for i in objects:
      #cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
      #               cv.cvPoint(int(i.x+i.width), int(i.y+i.height)),
      #               cv.CV_RGB(0,255,0), 3, 8, 0)
      center = cv.cvPoint(int(i.x+i.width/2), int(i.y+i.height/2))
      cv.cvCircle(image, center, 10, cv.CV_RGB(0,0,0), 5,8, 0)
      # Left side check
      if center.x > box_forward_left[0].x and center.x < box_backwards_left[1].x and center.y > box_forward_left[0].y and center.y < box_backwards_left[1].y:
        set_speed('left', center)
      # Right side check
      if center.x > box_forward_right[0].x and center.x < box_backwards_right[1].x and center.y > box_forward_right[0].y and center.y < box_backwards_right[1].y:
        set_speed('right', center)
def on_trackbar(position):

    # create the image for putting in it the founded contours
    contours_image = cv.cvCreateImage(cv.cvSize(_SIZE, _SIZE), 8, 3)

    # compute the real level of display, given the current position
    levels = position - 3

    # initialisation
    _contours = contours

    if levels <= 0:
        # zero or negative value
        # => get to the nearest face to make it look more funny
        _contours = contours.h_next.h_next.h_next

    # first, clear the image where we will draw contours
    cv.cvSetZero(contours_image)

    # draw contours in red and green
    cv.cvDrawContours(contours_image, _contours, _red, _green, levels, 3,
                      cv.CV_AA, cv.cvPoint(0, 0))

    # finally, show the image
    highgui.cvShowImage("contours", contours_image)
示例#31
0
def PIL2Ipl(input):
    """Converts a PIL image to the OpenCV/IPL CvMat data format.

    Supported input image formats are:
        RGB
        L
        F
    """

    if not (isinstance(input, PIL.Image.Image) or isinstance(input, Image.Image)):
        raise TypeError, 'Must be called with PIL.Image.Image or Image.Image!'

    # mode dictionary:
    # (pil_mode : (ipl_depth, ipl_channels)
    mode_list = {
        "RGB" : (cv.IPL_DEPTH_8U, 3),
        "L"   : (cv.IPL_DEPTH_8U, 1),
        "F"   : (cv.IPL_DEPTH_32F, 1)
        }

    if not mode_list.has_key(input.mode):
        raise ValueError, 'unknown or unsupported input mode'

    result = cv.cvCreateImage(
        cv.cvSize(input.size[0], input.size[1]),  # size
        mode_list[input.mode][0],  # depth
        mode_list[input.mode][1]  # channels
        )

    # set imageData
    result.imageData = input.tostring()
    return result
示例#32
0
    def __init__(self,
                 name,
                 size=2,
                 draw_center=True,
                 draw_grid=True,
                 meters_radius=4.0):
        """
			 name = name of window
			 meter_radus = 4.0
			 size = multiple of 400x200 to use for screen
			 meter_radius = how many per metrer 
		"""
        self.draw_center = draw_center
        self.draw_grid = draw_grid
        self.w = (int)(round(size * 400.0))
        self.h = (int)(round(size * 200.0))

        self.meters_disp = 4.0  #Range in meters of area around robot to display
        self.laser_win = name
        self.buffer = cv.cvCreateImage(cv.cvSize(self.w, 2 * self.h),
                                       cv.IPL_DEPTH_8U, 3)
        #print "RobotDisp: window width", self.buffer.width
        #print "RobotDisp: window height", self.buffer.height
        self.pixels_per_meter = self.h / self.meters_disp
        hg.cvNamedWindow(name, hg.CV_WINDOW_AUTOSIZE)
        hg.cvMoveWindow(name, 0, 50)

        self.font = cv.cvInitFont(cv.CV_FONT_HERSHEY_PLAIN, as_int(1),
                                  as_int(1), 0, 1, cv.CV_AA)
def detectObject(image):
    grayscale = cv.cvCreateImage(size, 8, 1)
    cv.cvFlip(image, None, 1)
    cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)
    storage = cv.cvCreateMemStorage(0)
    cv.cvClearMemStorage(storage)
    cv.cvEqualizeHist(grayscale, grayscale)
    cascade = cv.cvLoadHaarClassifierCascade(haar_file, cv.cvSize(1, 1))
    objects = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                     cv.CV_HAAR_DO_CANNY_PRUNING,
                                     cv.cvSize(100, 100))

    # Draw dots where hands are
    if objects:
        for i in objects:
            #cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
            #               cv.cvPoint(int(i.x+i.width), int(i.y+i.height)),
            #               cv.CV_RGB(0,255,0), 3, 8, 0)
            center = cv.cvPoint(int(i.x + i.width / 2),
                                int(i.y + i.height / 2))
            cv.cvCircle(image, center, 10, cv.CV_RGB(0, 0, 0), 5, 8, 0)
            # Left side check
            if center.x > box_forward_left[
                    0].x and center.x < box_backwards_left[
                        1].x and center.y > box_forward_left[
                            0].y and center.y < box_backwards_left[1].y:
                set_speed('left', center)
            # Right side check
            if center.x > box_forward_right[
                    0].x and center.x < box_backwards_right[
                        1].x and center.y > box_forward_right[
                            0].y and center.y < box_backwards_right[1].y:
                set_speed('right', center)
示例#34
0
	def read(self):
		frame=self.input.read()
		if self.enabled:

			cv_rs = [None]*4
			cv_thresh = [0]*4
			cv_max = [255]*4

			for i in self.channels :
				cv_rs[i] = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)
				cv_thresh[i] = self.thresholds[i]
				cv_max[i] = self.max_thresholds[i]

			# extract the color channel
			cv.cvSplit(frame,cv_rs[0],cv_rs[1],cv_rs[2],cv_rs[3])

			#self.debug_print(cv_rs)
			for i in self.channels :
				cv.cvThreshold(cv_rs[i],cv_rs[i],cv_thresh[i],cv_max[i],self.type)

			#cv_thresh = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,3)
			cv.cvZero(frame)
			cv.cvMerge(cv_rs[0],cv_rs[1],cv_rs[2],cv_rs[3],frame)

			#frame = cv_thresh
		return frame
示例#35
0
    def detect_faces(self, img_grey):
        """ Detect faces within an image, then draw around them.
			The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned 
			for accurate yet slow object detection. For a faster operation on real video 
			images the settings are: 
			scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, 
			min_size=<minimum possible face size
		"""
        min_size = cv.cvSize(20, 20)
        self.image_scale = 1.3
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0

        # Create a small image for better performance
        small_size = cv.cvSize(cv.cvRound(img_grey.width / self.image_scale),
                               cv.cvRound(img_grey.height / self.image_scale))
        small_img = cv.cvCreateImage(small_size, 8, 1)
        cv.cvResize(img_grey, small_img, cv.CV_INTER_LINEAR)
        cv.cvEqualizeHist(small_img, small_img)
        cv.cvClearMemStorage(self.faces_storage)

        if (self.cascade):
            t = cv.cvGetTickCount()
            faces = cv.cvHaarDetectObjects(small_img, self.cascade,
                                           self.faces_storage, haar_scale,
                                           min_neighbors, haar_flags, min_size)
            t = cv.cvGetTickCount() - t
            cv.cvReleaseImage(small_img)
            #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.));
            return faces
示例#36
0
	def read(self) :
		frame=self.input.read()
		cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)
		cv.cvCvtColor(frame,cv_rs,cv.CV_RGB2GRAY)
		frame = cv_rs
		if self.enabled :
			# I think these functions are too specialized for transforms
			cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 )
			cv.cvErode(frame, frame, None, 1)
			cv.cvDilate(frame, frame, None, 1)
			num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0))
			if contours is None :
				return []
			else :
				contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 );
				if contours is None :
					return []
				else :
					final_contours = []
					for c in contours.hrange() :
						area = abs(cv.cvContourArea(c))
						#self.debug_print('Polygon Area: %f'%area)
						if area >= self.min_area :
							lst = []
							for pt in c :
								lst.append((pt.x,pt.y))
							final_contours.append(lst)
						contours = contours.h_next
					return final_contours

		return []
示例#37
0
	def detect_faces(self, img_grey):
		""" Detect faces within an image, then draw around them.
			The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned 
			for accurate yet slow object detection. For a faster operation on real video 
			images the settings are: 
			scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, 
			min_size=<minimum possible face size
		"""
		min_size								= cv.cvSize(20,20)
		self.image_scale						= 1.3
		haar_scale								= 1.2
		min_neighbors							= 2
		haar_flags								= 0

		# Create a small image for better performance
		small_size								= cv.cvSize(cv.cvRound(img_grey.width/self.image_scale),cv.cvRound(img_grey.height/self.image_scale))
		small_img								= cv.cvCreateImage(small_size, 8, 1)
		cv.cvResize(img_grey, small_img, cv.CV_INTER_LINEAR)
		cv.cvEqualizeHist(small_img, small_img)
		cv.cvClearMemStorage(self.faces_storage)

		if(self.cascade):
			t									= cv.cvGetTickCount();
			faces								= cv.cvHaarDetectObjects(small_img,
																		self.cascade,
																		self.faces_storage,
																		haar_scale,
																		min_neighbors,
																		haar_flags,
																		min_size)
			t									= cv.cvGetTickCount() - t
			cv.cvReleaseImage(small_img)
			#print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.));
			return faces
示例#38
0
	def read(self):
		frame = self.input.read()

		# which channels to combine
		cv_rs = [None]*4

		#self.debug_print('channels:%s'%self.channels)

		# if frame only has one channel, just return it
		if frame.nChannels == 1 :
			for i in self.channels :
				cv_rs[i] = frame
		else :
			for i in self.channels :
				cv_rs[i] = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)

			#self.debug_print(cv_rs)
			# extract the color channel
			#print 'frame.nChannels',frame.nChannels
			cv.cvSplit(frame,cv_rs[0],cv_rs[1],cv_rs[2],cv_rs[3])

		#cvt_im = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,3)
		cv.cvMerge(cv_rs[0],cv_rs[1],cv_rs[2],cv_rs[3],frame)

		return frame
示例#39
0
    def process(self, videofile, progress):
        progress(0, _("Extracting histogram"))
        video = hg.cvCreateFileCapture(str(videofile).encode(sys.getfilesystemencoding()))
        if not video:
            raise Exception("Could not open video file")
        histo = cv.cvCreateHist([256],cv.CV_HIST_ARRAY,[[0,256]], 1)
        frame = hg.cvQueryFrame(video)
        frame_gray  = cv.cvCreateImage(cv.cvGetSize(frame), frame.depth, 1);
        hists    = []
        nbframes = 0

        fps = hg.cvGetCaptureProperty(video, hg.CV_CAP_PROP_FPS)
        while frame :
            if not progress(hg.cvGetCaptureProperty(video, hg.CV_CAP_PROP_POS_AVI_RATIO)):
                break
            hg.cvConvertImage(frame,frame_gray)
            cv.cvCalcHist(frame_gray,histo,0,None)
            h = [cv.cvGetReal1D(histo.bins,i) for i in range(255) ]
            h = numpy.array(h,dtype='int32')
            hists.append(h)
            frame = hg.cvQueryFrame(video)
            nbframes += 1

        hists = numpy.array(hists)
        return hists.reshape(nbframes, -1), fps
示例#40
0
def show(fr,width,height,name):
    image = cv.cvCreateImage(cv.cvSize (width, height),8,1)
    l = 0
    for j in range(0,image.width):
        for i in range(0,image.height):
            cv.cvSet2D(image,i,j,int(fr[l][0]));
            l=l+1
    highgui.cvShowImage(name,image)
    highgui.cvWaitKey(1000/29)
示例#41
0
    def HarrisPoints(self, imgfile):
        self.points = []
        self.drawimg = highgui.cvLoadImage(imgfile)
        c = 1
        try:
            gray = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 8, 1)
            cv.cvCvtColor(self.drawimg, gray, cv.CV_BGR2GRAY)
            eig = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 32, 1)
            tmpimg = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 32, 1)
            p = cv.cvGoodFeaturesToTrack(gray, eig, tmpimg, 100, 0.1, 20, None,
                                         7, 1, 0.04)
            for x in p:
                cv.cvCircle(self.drawimg, x, 3, cv.CV_RGB(0, 255, 0), 8, 0)
                self.points.append(x)

        except Exception, e:
            print e
            print 'ERROR: problem handling ' + imgfile
示例#42
0
def putoriginal(fname, img):
    ori_img = highgui.cvLoadImage(fname)
    ori_img_thumb = cv.cvCreateImage(
        cv.cvSize(ori_img.width / 4, ori_img.height / 4), 8, 3)
    cv.cvResize(ori_img, ori_img_thumb)
    for x in range(ori_img_thumb.height):
        for y in range(ori_img_thumb.width):
            cv.cvSet2D(img, x, y, cv.cvGet2D(ori_img_thumb, x, y))
    return
示例#43
0
 def __findedge(self, filename):
     tmpimg = highgui.cvLoadImage(filename)
     self.img = cv.cvCreateImage(
         cv.cvSize(int(tmpimg.width * self.enlarge),
                   int(tmpimg.height * self.enlarge)), 8, 3)
     cv.cvResize(tmpimg, self.img, cv.CV_INTER_LINEAR)
     if (self.drawimage):
         self.drawimg = cv.cvCloneImage(self.img)
     else:
         self.drawimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3)
     greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height),
                                8, 1)
     cv.cvCvtColor(self.img, greyimg, cv.CV_BGR2GRAY)
     self.allcurve = []
     for i in range(80, 200, 20):
         bimg = cv.cvCloneImage(greyimg)
         cv.cvSmooth(bimg, bimg, cv.CV_MEDIAN, 9)
         #            cv.cvSmooth(bimg, bimg, cv.CV_BILATERAL, 9)
         #            cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9)
         #            cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9)
         cv.cvThreshold(greyimg, bimg, i, 255, cv.CV_THRESH_BINARY)
         self.__findcurve(bimg)
示例#44
0
def mask_image(im, mask):
    if mask.depth == 8:
        bim = cv.cvCreateImage(cv.cvSize(mask.width, mask.height),
                               cv.IPL_DEPTH_32F, mask.nChannels)
        cv.cvConvertScale(mask, bim, 1.0 / 255.0)

    if im.depth == 8:
        newim = cv.cvCreateImage(cv.cvSize(im.width, im.height),
                                 cv.IPL_DEPTH_32F, im.nChannels)
        cv.cvConvertScale(im, newim, 1.0 / 255.0)

    print 'newim.depth = ', newim.depth
    print 'newim.nChannels = ', newim.nChannels
    print 'bim.depth = ', bim.depth
    print 'bim.nChannels = ', bim.nChannels
    if newim.nChannels == 3 and newim.depth == 32 and bim.nChannels == 3 and bim.depth == 32:
        outputIm = cv.cvCloneImage(bim)
        cv.cvMul(bim, newim, outputIm, 1)
        return outputIm
    else:
        print 'oops problem with formats'
        return mask
示例#45
0
    def detect_face(self, img):
        """ Detect faces within an image, then draw around them.
			The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned 
			for accurate yet slow object detection. For a faster operation on real video 
			images the settings are: 
			scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, 
			min_size=<minimum possible face size
		"""
        min_size = cv.cvSize(20, 20)
        image_scale = 1.3
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0
        gray = cv.cvCreateImage(cv.cvSize(img.width, img.height), 8, 1)
        small_img = cv.cvCreateImage(
            cv.cvSize(cv.cvRound(img.width / image_scale),
                      cv.cvRound(img.height / image_scale)), 8, 1)
        cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY)
        cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.cvEqualizeHist(small_img, small_img)
        cv.cvClearMemStorage(self.storage)

        if (self.cascade):
            t = cv.cvGetTickCount()
            faces = cv.cvHaarDetectObjects(small_img, self.cascade,
                                           self.storage, haar_scale,
                                           min_neighbors, haar_flags, min_size)
            t = cv.cvGetTickCount() - t
            #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.));
            if faces:
                for r in faces:
                    pt1 = cv.cvPoint(int(r.x * image_scale),
                                     int(r.y * image_scale))
                    pt2 = cv.cvPoint(int((r.x + r.width) * image_scale),
                                     int((r.y + r.height) * image_scale))
                    cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8,
                                   0)
        return img
示例#46
0
    def run(self):

        if self.capture:
            webcam_frame = highgui.cvQueryFrame(self.capture)
        else:
            print "Capture failed!"
            return

        if self.inverted_video.get_active():
            highgui.cvConvertImage(webcam_frame, webcam_frame,
                                   highgui.CV_CVTIMG_FLIP)
        highgui.cvConvertImage(webcam_frame, self.display_frame,
                               highgui.CV_CVTIMG_SWAP_RB)

        if False:
            # PROCESS WEBCAM FRAME HERE...
            inputImage = cv.cvCreateImage(cv.cvGetSize(webcam_frame),
                                          cv.IPL_DEPTH_8U, 1)
            cv.cvCvtColor(webcam_frame, inputImage, cv.CV_RGB2GRAY)

            cv.cvThreshold(inputImage, inputImage, 128, 255,
                           cv.CV_THRESH_BINARY)

            mysize = cv.cvGetSize(webcam_frame)
            height = mysize.height
            width = mysize.width

            # Find horizontal first-moment:
            if False:
                mysum = 0
                for i in range(height):
                    mysum += sum(inputImage[i, :])

                print "Sum:", mysum

            cv.cvMerge(inputImage, inputImage, inputImage, None,
                       self.display_frame)

        incoming_pixbuf = gtk.gdk.pixbuf_new_from_data(
            self.display_frame.imageData, gtk.gdk.COLORSPACE_RGB, False, 8,
            self.display_frame.width, self.display_frame.height,
            self.display_frame.widthStep)
        incoming_pixbuf.copy_area(0, 0, self.display_frame.width,
                                  self.display_frame.height,
                                  self.webcam_pixbuf, 0, 0)

        self.video_image.queue_draw()

        return self.video_enabled_button.get_active()
示例#47
0
def detect_faces_on(path):
    faces = []
    image = cvLoadImage(path)
    # convert to grayscale for faster results
    grayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)
    cvCvtColor(image, grayscale, CV_BGR2GRAY)
    # smooth picture for better results
    cvSmooth(grayscale, grayscale, CV_GAUSSIAN, 3, 3)

    storage = cvCreateMemStorage(0)
    cvClearMemStorage(storage)
    cvEqualizeHist(grayscale, grayscale)

    cascade_files = [
        # ('/usr/share/opencv/haarcascades/haarcascade_eye_tree_eyeglasses.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_lowerbody.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_mouth.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_profileface.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_eye.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_eyepair_big.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_nose.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_righteye_2splits.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt2.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_fullbody.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_eyepair_small.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_righteye.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_upperbody.xml', (50, 50)),
        ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt_tree.xml',
         (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_lefteye_2splits.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_lefteye.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_upperbody.xml', (50, 50)),
        # ('parojos_22_5.1.xml', (22, 5)),
        # ('Mouth.xml', (22, 15)),
    ]

    for cascade_file, cascade_sizes in cascade_files:
        cascade = cvLoadHaarClassifierCascade(os.path.join(cascade_file),
                                              cvSize(1, 1))
        faces += cvHaarDetectObjects(grayscale, cascade, storage, HAAR_SCALE,
                                     HAAR_NEIGHBORS, CV_HAAR_DO_CANNY_PRUNING,
                                     cvSize(*cascade_sizes))

    return [{'x': f.x, 'y': f.y, 'w': f.width, 'h': f.height} for f in faces]
示例#48
0
def detect_faces(image):
    """Converts an image to grayscale and prints the locations of any
         faces found"""
    grayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)
    cvCvtColor(image, grayscale, CV_BGR2GRAY)

    storage = cvCreateMemStorage(0)
    cvClearMemStorage(storage)
    cvEqualizeHist(grayscale, grayscale)

    # The default parameters (scale_factor=1.1, min_neighbors=3,
    # flags=0) are tuned for accurate yet slow face detection. For
    # faster face detection on real video images the better settings are
    # (scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING).
    # --- http://www710.univ-lyon1.fr/~bouakaz/OpenCV-0.9.5/docs/ref/OpenCVRef_Experimental.htm#decl_cvHaarDetectObjects
    # The size box is of the *minimum* detectable object size. Smaller box = more processing time. - http://cell.fixstars.com/opencv/index.php/Facedetect
    minsize = (int(MINFACEWIDTH_PERCENT * image.width + 0.5),
               int(MINFACEHEIGHT_PERCENT * image.height))
    print >> sys.stderr, "Min size of face: %s" % ` minsize `

    faces = []
    for cascadefile in [
            '/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml'
    ]:
        #    for cascadefile in ['/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml', '/usr/share/opencv/haarcascades/haarcascade_profileface.xml']:
        cascade = cvLoadHaarClassifierCascade(cascadefile, cvSize(1, 1))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(50,50))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, 0, cvSize(MINFACEWIDTH,MINFACEHEIGHT))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, 0, cvSize(MINFACEWIDTH,MINFACEHEIGHT))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize(*minsize))
        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1,
                                     4, CV_HAAR_DO_CANNY_PRUNING,
                                     cvSize(*minsize))


#        faces += cvHaarDetectObjects(grayscale, cascade, storage, scale_factor=1.1, min_neighbors=3, flags=0, cvSize(50,50))

#    print dir(faces)
    bboxes = []
    if faces:
        for f in faces:
            print >> sys.stderr, "\tFace at [(%d,%d) -> (%d,%d)]" % (
                f.x, f.y, f.x + f.width, f.y + f.height)
        bboxes = [Face(f.x, f.y, f.x + f.width, f.y + f.height) for f in faces]
    return bboxes
示例#49
0
def detect(image):
    image_size = opencv.cvGetSize(image)
 
    # create grayscale version
    grayscale = opencv.cvCreateImage(image_size, 8, 1)
    opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)
 
    # create storage
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
 
    # equalize histogram
    opencv.cvEqualizeHist(grayscale, grayscale)
 
    # detect objects
    faces = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(100, 100))
#    eyes = opencv.cvHaarDetectObjects(grayscale, eye_cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(60,60))
    draw_bounding_boxes(faces, image, 127,255,0, 3)
示例#50
0
    def initialize_video(self):

        webcam_frame = highgui.cvQueryFrame(self.capture)

        if not webcam_frame:
            print "Frame acquisition failed."
            return False

        self.webcam_pixbuf = gtk.gdk.pixbuf_new_from_data(
            webcam_frame.imageData, gtk.gdk.COLORSPACE_RGB, False, 8,
            webcam_frame.width, webcam_frame.height, webcam_frame.widthStep)
        self.video_image.set_from_pixbuf(self.webcam_pixbuf)

        self.display_frame = cv.cvCreateImage(
            cv.cvSize(webcam_frame.width, webcam_frame.height),
            cv.IPL_DEPTH_8U, 3)

        return True
示例#51
0
 def np2cv(im):
     print 'WARNING: np2cv is not reliable or well tested (it is a bit flakey...)'
     #raise AssertionError('np2cv does not work :-(')
     if len(im.shape) == 3:
         shp = im.shape
         channels = shp[2]
         height = shp[0]
         width = shp[1]
         #height, width, channels = im.shape
     elif len(im.shape) == 2:
         height, width = im.shape
         channels = 1
     else:
         raise AssertionError(
             "unrecognized shape for the input image. should be 3 or 2, but was %d."
             % len(im.shape))
     key = str(im.dtype)
     cv_type = np2cv_type_dict[key]
     print 'attempt to create opencv image with (key, width, height, channels) =', (
         key, width, height, channels)
     cv_im = cv.cvCreateImage(cv.cvSize(width, height), cv_type, channels)
     #cv_im.imageData = im.tostring()
     if True:
         if len(im.shape) == 3:
             for y in xrange(height):
                 for x in xrange(width):
                     pix = [float(v) for v in im[y, x]]
                     scalar = cv.cvScalar(*pix)
                     #print scalar
                     cv_im[y, x] = scalar
         else:
             for y in xrange(height):
                 for x in xrange(width):
                     pix = float(im[y, x])
                     cv_im[y, x] = cv.cvScalar(pix, pix, pix)
                     #print 'im[y,x], cv_im[y,x] =', im[y,x], cv_im[y,x]
     print 'resulted in an image openCV image with the following properties:'
     numpy_type, nchannels = cv2np_type_dict[cv.cvGetElemType(cv_im)]
     print '(numpy_type, nchannels, cvmat.width, cvmat.height) =', (
         numpy_type, nchannels, cv_im.width, cv_im.height)
     return cv_im
示例#52
0
def threshold_image(image, n=[]):
    """Record the first 5 images to get a background, then diff current frame with the last saved frame.
    """
    if len(n) < 5:
        # n[4] will be our background
        # First capture a few images
        n.append(cv.cvCloneMat(image))
        if len(n) == 5:
            # last time here
            # could do averaging here.
            pass
        return image

    original = n[4]
    differenceImage = cv.cvCloneMat(image)
    cv.cvAbsDiff(image, original, differenceImage)
    """The threshold value determines the amount of "Change" required 
    before something will show up"""
    thresholdValue = 50  # 32
    cv.cvThreshold(differenceImage, differenceImage, thresholdValue, 255,
                   cv.CV_THRESH_BINARY)

    # Convert to one channel
    gray = cv.cvCreateImage(cv.cvGetSize(differenceImage), 8, 1)
    cv.cvCvtColor(differenceImage, gray, cv.CV_BGR2GRAY)

    # Use median filter to remove salt and pepper noise.
    cv.cvSmooth(gray, gray, cv.CV_MEDIAN, 15)

    # Dilate and the threshold image
    # It adds a border to the object.
    #cv.cvDilate(gray,gray, None, 9)

    # Add a bit of Blur to the threshold mask
    cv.cvSmooth(gray, gray, cv.CV_GAUSSIAN, 5)

    result = cv.cvCloneMat(image)
    cv.cvSetZero(result)

    cv.cvAnd(image, image, result, gray)
    return result
示例#53
0
    def timerEvent(self, ev):
        # Fetch a frame from the video camera
        frame = highgui.cvQueryFrame(self.cap)
        img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height),
                                    cv.IPL_DEPTH_8U, frame.nChannels)
        if (frame.origin == cv.IPL_ORIGIN_TL):
            cv.cvCopy(frame, img_orig)
        else:
            cv.cvFlip(frame, img_orig, 0)

        # Create a grey frame to clarify data

        #img									= self.detect_face(frame_copy)
        img = self.detect_squares(frame_copy)
        img_pil = adaptors.Ipl2PIL(img)
        s = StringIO()
        img_pil.save(s, "PNG")
        s.seek(0)
        q_img = QImage()
        q_img.loadFromData(s.read())
        bitBlt(self, 0, 0, q_img)
示例#54
0
def display_images(image_list, max_x=1200, max_y=1000, save_images=False):
    """
	Display a list of OpenCV images tiled across the screen
	with maximum width of max_x and maximum height of max_y

	save_images - will save the images(with timestamp)
	"""

    curtime = time.localtime()
    date_name = time.strftime('%Y_%m_%d_%I%M%S', curtime)

    loc_x, loc_y = 0, 0
    wins = []
    for i, im in enumerate(image_list):
        if save_images:
            if im.nChannels == 1 and im.depth == cv.IPL_DEPTH_32F:
                clr = cv.cvCreateImage(cv.cvSize(im.width, im.height),
                                       cv.IPL_DEPTH_8U, 1)
                cv.cvConvertScale(im, clr, 255.0)
                im = clr
            highgui.cvSaveImage('image%d_' % i + date_name + '.png', im)

        window_name = 'image %d' % i
        wins.append((window_name, im))
        highgui.cvNamedWindow(window_name, highgui.CV_WINDOW_AUTOSIZE)
        highgui.cvMoveWindow(window_name, loc_x, loc_y)
        loc_x = loc_x + im.width
        if loc_x > max_x:
            loc_x = 0
            loc_y = loc_y + im.height
            if loc_y > max_y:
                loc_y = 0
    while True:
        for name, im in wins:
            highgui.cvShowImage(name, im)
        keypress = highgui.cvWaitKey(10)
        if keypress == '\x1b':
            break
示例#55
0
    def detect(self, pil_image, cascade_name, recogn_w=50, recogn_h=50):
        # Get cascade:
        cascade = self.get_cascade(cascade_name)

        image = opencv.PIL2Ipl(pil_image)
        image_size = opencv.cvGetSize(image)
        grayscale = image
        if pil_image.mode == "RGB":
            # create grayscale version
            grayscale = opencv.cvCreateImage(image_size, 8, 1)
            # Change to RGB2Gray - I dont think itll affect the conversion
            opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)

        # create storage
        storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(storage)

        # equalize histogram
        opencv.cvEqualizeHist(grayscale, grayscale)

        # detect objects
        return opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                          opencv.CV_HAAR_DO_CANNY_PRUNING,
                                          opencv.cvSize(recogn_w, recogn_h))