Example #1
0
 def onIdle(self, event):
     """
     Event to grab and display a frame from the camera. (internal use).
     """
     if self.cap == None:  #Should be cvCameraCapture instance.
         #unbind the idle instance, change to click.
         highgui.cvReleaseCapture(self.cap)  #release the old instance and
         self.cap = highgui.cvCreateCameraCapture(
             self.camera)  #try new one.
         self.displayError(self.errorBitmap, (128, 128))
         raise CameraError('Unable to open camera, retrying....')
         event.Skip()
     try:
         img = highgui.cvQueryFrame(self.cap)
     except cv2.error as e:
         raise CameraError('Error when querying for frame: {0}'.format(e))
     self._error = 0  #worked successfully
     img = opencv.cvGetMat(img)
     cv.cvCvtColor(img, img, cv.CV_BGR2RGB)
     if conf.as_bool(conf.config['webcam']['cropBars']):
         #Draw cropping region
         cv.cvRectangle(img, (80, -1), (560, 480), (205.0, 0.0, 0.0, 0.0),
                        2)
     self.displayImage(img)
     event.RequestMore()
Example #2
0
 def frame(self):
     if self.framepos == -1:
         raise Exception('call next before the first frame!')
     
     format = self.format
     img = hg.cvRetrieveFrame(self.cap)
     nchannels = 1 if format == FORMAT_GRAY else 3
     shape = \
         (img.height, img.width) if nchannels == 1 else \
         (img.height, img.width, nchannels)
     
     if format == FORMAT_BGR: # default format
         frame = np.ndarray(shape = shape, dtype = np.uint8, 
                            buffer = img.imageData)
         if self.own_data: frame = frame.copy()
         return frame
     
     size = cv.cvSize(img.width, img.height)
     img2 = cv.cvCreateImage(size, 8, nchannels)
     cvt_type = -1
     if format == FORMAT_GRAY:
         cvt_type = cv.CV_BGR2GRAY
     elif format == FORMAT_RGB:
         cvt_type = cv.CV_BGR2RGB
     elif format == FORMAT_HSV:
         cvt_type = cv.CV_BGR2HSV
     else: assert(0)
     
     cv.cvCvtColor(img, img2, cvt_type)
     
     frame = np.ndarray(shape = shape, dtype = np.uint8,
                        buffer = img2.imageData)
     if self.own_data: frame = frame.copy()
     return frame
Example #3
0
    def evalCurrentImageGradient(self):
        key = {}
        try:
            self._cnt.emit(qt.PYSIGNAL("getImage"), (key, ))
            qimage = key['image']
        except KeyError:
            return
        ##EVAL IMAGE GRADIENT
        x, y = self._cnt.focusPointSelected
        rectangleSize = self._cnt.focusRectangleSize
        im = qimage.copy(x, y, x + rectangleSize, y + rectangleSize)
        srcColorImage = qtTools.getImageOpencvFromQImage(im)

        if srcColorImage.nChannels > 1:
            srcImage = cv.cvCreateImage(
                cv.cvSize(srcColorImage.width, srcColorImage.height),
                srcColorImage.depth, 1)
            cv.cvCvtColor(srcColorImage, srcImage, cv.CV_RGB2GRAY)
        else:  # In Fact It's a grey image
            srcImage = srcColorImage

        destImage = cv.cvCreateImage(
            cv.cvSize(srcImage.width, srcImage.height), cv.IPL_DEPTH_16S, 1)
        cv.cvSobel(srcImage, destImage, 1, 0, 3)
        array = numpy.fromstring(destImage.imageData_get(), dtype=numpy.int16)
        focusQuality = array.std()
        return focusQuality
Example #4
0
def face_detect(file, closeafter=True):
    """Converts an image to grayscale and prints the locations of any faces found"""

    if hasattr(file, 'read'):
        _, filename = tempfile.mkstemp()
        tmphandle = open(filename, 'w')
        shutil.copyfileobj(file, tmphandle)
        tmphandle.close()
        if closeafter:
            file.close()
        deleteafter = True
    else:
        filename = file
        deleteafter = False

    image = cvLoadImage(filename)
    grayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)
    cvCvtColor(image, grayscale, CV_BGR2GRAY)

    storage = cvCreateMemStorage(0)
    cvClearMemStorage(storage)
    cvEqualizeHist(grayscale, grayscale)

    cascade = cvLoadHaarClassifierCascade(
        '/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml',
        cvSize(1,1))

    faces = cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                CV_HAAR_DO_CANNY_PRUNING, cvSize(50,50))
    if deleteafter:
        os.unlink(filename)

    return (image.width, image.height), faces
Example #5
0
def findEdges(original, out, threshold1 = 100, threshold2 = None):
	"""Return a new edge detected image with a specified threshold"""
	warnings.warn("Use findBWEdges instead unless you really need colored edges.", DeprecationWarning)

	#Define threshold2
	if threshold2 == None:
		threshold2 = threshold1 * 3

	# Create two pictures with only one channel for a b/w copy
	# and one for storring the edges found in the b/w picture
	gray = cv.cvCreateImage(cv.cvGetSize(original), 8, 1)
	edge = cv.cvCreateImage(cv.cvGetSize(original), 8, 1)

	# Create the b/w copy of the original
	cv.cvCvtColor(original, gray, cv.CV_BGR2GRAY)

	# Blur the b/w copy, but put the result into edge pic
	cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0)

	# Negate the b/w copy of original with newly blurred
	# b/w copy. This will make egdes stand out
	cv.cvNot(gray, edge)

	# Run an edge-finding algorithm called 'Canny'
	# It will analyse the first argument and store the
	# resulting picture in the second argument
	cv.cvCanny(gray, edge, threshold1, threshold2)

	# We initialize our out-image to black
	cv.cvSetZero(out)

	# Finally, we use the found edges, which are b/w, as
	# a mask for copying the colored edges from the original
	# to the out-image
	cv.cvCopy(original, out, edge)
def detectObject(image):
  grayscale = cv.cvCreateImage(size, 8, 1)
  cv.cvFlip(image, None, 1)
  cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)
  storage = cv.cvCreateMemStorage(0)
  cv.cvClearMemStorage(storage)
  cv.cvEqualizeHist(grayscale, grayscale)
  cascade = cv.cvLoadHaarClassifierCascade(haar_file, cv.cvSize(1,1))
  objects = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, 
                                   cv.CV_HAAR_DO_CANNY_PRUNING,
                                   cv.cvSize(100,100))

  # Draw dots where hands are
  if objects:
    for i in objects:
      #cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
      #               cv.cvPoint(int(i.x+i.width), int(i.y+i.height)),
      #               cv.CV_RGB(0,255,0), 3, 8, 0)
      center = cv.cvPoint(int(i.x+i.width/2), int(i.y+i.height/2))
      cv.cvCircle(image, center, 10, cv.CV_RGB(0,0,0), 5,8, 0)
      # Left side check
      if center.x > box_forward_left[0].x and center.x < box_backwards_left[1].x and center.y > box_forward_left[0].y and center.y < box_backwards_left[1].y:
        set_speed('left', center)
      # Right side check
      if center.x > box_forward_right[0].x and center.x < box_backwards_right[1].x and center.y > box_forward_right[0].y and center.y < box_backwards_right[1].y:
        set_speed('right', center)
Example #7
0
	def _cv_to_pygame(self,frame,channel=-1) :

		# scale the image to size of the window
		cvt_scale = cv.cvCreateImage(cv.cvSize(self.image_dims[0],self.image_dims[1]),frame.depth,frame.nChannels)
		#cv.cvResize(frame,cvt_scale,cv.CV_INTER_LINEAR)
		cv.cvResize(frame,cvt_scale,cv.CV_INTER_NN)

		# need to convert the colorspace differently depending on where the image came from
		cvt_color = cv.cvCreateImage(cv.cvSize(cvt_scale.width,cvt_scale.height),cvt_scale.depth,3)
		if frame.nChannels == 3 :
			# frame is in BGR format, convert it to RGB so the sky isn't orange
			cv.cvCvtColor(cvt_scale,cvt_color,cv.CV_BGR2RGB)
		elif frame.nChannels == 1 : # image has only one channel, iow 1 color
			if channel == 0 :
				cv.cvMerge(frame,None,None,None,cvt_color)
			elif channel == 1 :
				cv.cvMerge(None,frame,None,None,cvt_color)
			elif channel == 2 :
				cv.cvMerge(None,None,frame,None,cvt_color)
			elif channel == 3 :
				cv.cvMerge(None,None,None,frame,cvt_color)
			else :
				cv.cvCvtColor(cvt_scale,cvt_color,cv.CV_GRAY2RGB)

		# create a pygame surface
		frame_surface=pygame.image.frombuffer(cvt_color.imageData,self.image_dims,'RGB')

		return frame_surface
Example #8
0
def detect(image, cascade_file='haarcascade_data/haarcascade_frontalface_alt.xml'):
    image_size = cv.cvGetSize(image)

    # create grayscale version
    grayscale = cv.cvCreateImage(image_size, 8, 1)
    cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)

    # create storage
    storage = cv.cvCreateMemStorage(0)
    cv.cvClearMemStorage(storage)

    # equalize histogram
    cv.cvEqualizeHist(grayscale, grayscale)

    # detect objects
    cascade = cv.cvLoadHaarClassifierCascade(cascade_file, cv.cvSize(1,1))
    faces = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(50, 50))

    positions = []
    if faces:
        for i in faces:
            positions.append({'x': i.x, 'y': i.y, 'width': i.width, 'height':
                i.height,})
            cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
                         cv.cvPoint(int(i.x + i.width), int(i.y + i.height)),
                         cv.CV_RGB(0, 255, 0), 3, 8, 0)
    return positions
Example #9
0
	def read(self) :
		frame=self.input.read()
		cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)
		cv.cvCvtColor(frame,cv_rs,cv.CV_RGB2GRAY)
		frame = cv_rs
		if self.enabled :
			# I think these functions are too specialized for transforms
			cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 )
			cv.cvErode(frame, frame, None, 1)
			cv.cvDilate(frame, frame, None, 1)
			num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0))
			if contours is None :
				return []
			else :
				contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 );
				if contours is None :
					return []
				else :
					final_contours = []
					for c in contours.hrange() :
						area = abs(cv.cvContourArea(c))
						#self.debug_print('Polygon Area: %f'%area)
						if area >= self.min_area :
							lst = []
							for pt in c :
								lst.append((pt.x,pt.y))
							final_contours.append(lst)
						contours = contours.h_next
					return final_contours

		return []
Example #10
0
 def __findContour(self, filename): #find the contour of images, and save all points in self.vKeyPoints
     self.img = highgui.cvLoadImage (filename)
     self.grayimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1)
     self.drawimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,3)
     cv.cvCvtColor (self.img, self.grayimg, cv.CV_BGR2GRAY)
     cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9)
     cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9)
     cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9)
     cv.cvThreshold( self.grayimg, self.grayimg, self.threshold, self.threshold +100, cv.CV_THRESH_BINARY )
     cv.cvZero(self.drawimg)
     storage = cv.cvCreateMemStorage(0)
     nb_contours, cont = cv.cvFindContours (self.grayimg,
         storage,
         cv.sizeof_CvContour,
         cv.CV_RETR_LIST,
         cv.CV_CHAIN_APPROX_NONE,
         cv.cvPoint (0,0))
         
     cv.cvDrawContours (self.drawimg, cont, cv.cvScalar(255,255,255,0), cv.cvScalar(255,255,255,0), 1, 1, cv.CV_AA, cv.cvPoint (0, 0))
     self.allcurve = []
     idx = 0
     for c in cont.hrange():
         PointArray = cv.cvCreateMat(1, c.total  , cv.CV_32SC2)
         PointArray2D32f= cv.cvCreateMat( 1, c.total  , cv.CV_32FC2)
         cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX))
         fpoints = []
         for i in range(c.total):
             kp = myPoint()
             kp.x = cv.cvGet2D(PointArray,0, i)[0]
             kp.y = cv.cvGet2D(PointArray,0, i)[1]
             kp.index = idx
             idx += 1
             fpoints.append(kp)
         self.allcurve.append(fpoints)
     self.curvelength = idx
def detectObject(image):
    grayscale = cv.cvCreateImage(size, 8, 1)
    cv.cvFlip(image, None, 1)
    cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)
    storage = cv.cvCreateMemStorage(0)
    cv.cvClearMemStorage(storage)
    cv.cvEqualizeHist(grayscale, grayscale)
    cascade = cv.cvLoadHaarClassifierCascade(haar_file, cv.cvSize(1, 1))
    objects = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                     cv.CV_HAAR_DO_CANNY_PRUNING,
                                     cv.cvSize(100, 100))

    # Draw dots where hands are
    if objects:
        for i in objects:
            #cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
            #               cv.cvPoint(int(i.x+i.width), int(i.y+i.height)),
            #               cv.CV_RGB(0,255,0), 3, 8, 0)
            center = cv.cvPoint(int(i.x + i.width / 2),
                                int(i.y + i.height / 2))
            cv.cvCircle(image, center, 10, cv.CV_RGB(0, 0, 0), 5, 8, 0)
            # Left side check
            if center.x > box_forward_left[
                    0].x and center.x < box_backwards_left[
                        1].x and center.y > box_forward_left[
                            0].y and center.y < box_backwards_left[1].y:
                set_speed('left', center)
            # Right side check
            if center.x > box_forward_right[
                    0].x and center.x < box_backwards_right[
                        1].x and center.y > box_forward_right[
                            0].y and center.y < box_backwards_right[1].y:
                set_speed('right', center)
Example #12
0
def cv2np(im, format='RGB'):
	if format == 'BGR':
		cv.cvCvtColor( im, im, cv.CV_BGR2RGB )
	numpy_type, nchannels = cv2np_type_dict[cv.cvGetElemType(im)]
	array_size = [im.height, im.width, nchannels]
	np_im = np.frombuffer(im.imageData, dtype=numpy_type, 
            count=im.height*im.width*nchannels*(im.depth/8))
	return np.reshape(np_im, array_size)
Example #13
0
    def onNextFrame(self, evt):

        frame = gui.cvQueryFrame(self.capture)
        if frame:
            cv.cvCvtColor(frame, frame, cv.CV_BGR2RGB)
            self.bmp = wx.BitmapFromBuffer(frame.width, frame.height, frame.imageData)
            self.Refresh()        
        evt.Skip()
Example #14
0
	def get_frame(self) :
		frame=self.input.read()

		converted=cv.cvCreateImage(cv.cvGetSize(frame),frame.depth,3)
		cv.cvCvtColor(frame,converted,cv.CV_GRAY2RGB)
		frame = converted

		frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB')
		return frame_surface
Example #15
0
    def onNextFrame(self, evt):

        frame = gui.cvQueryFrame(self.capture)
        if frame:
            cv.cvCvtColor(frame, frame, cv.CV_BGR2RGB)
            self.bmp = wx.BitmapFromBuffer(frame.width, frame.height,
                                           frame.imageData)
            self.Refresh()
        evt.Skip()
Example #16
0
File: chroma.py Project: bmiro/vpc
def getFilter(frameWidht, frameHeight):    
    cvNamedWindow("Filtred")
    
    cvCreateTrackbar("hmax", "Filtred", getHlsFilter('hmax'), 180, trackBarChangeHmax)
    cvCreateTrackbar("hmin", "Filtred", getHlsFilter('hmin'), 180, trackBarChangeHmin)
    #cvCreateTrackbar("lmax", "Filtred", hlsFilter['lmax'], 255, trackBarChangeLmax)
    #cvCreateTrackbar("lmin", "Filtred", hlsFilter['lmin'], 255, trackBarChangeLmin)
    cvCreateTrackbar("smax", "Filtred", getHlsFilter('smax'), 255, trackBarChangeSmax)
    cvCreateTrackbar("smin", "Filtred", getHlsFilter('smin'), 255, trackBarChangeSmin)

    cvSetMouseCallback("Filtred", mouseClick, None)
    
    frame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    hlsFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    filtredFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)

    mask = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)

    hFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    lFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    sFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    ThHFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThLFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThSFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    key = -1
    while key == -1: 
        if not cvGrabFrame(CAM):
            print "Could not grab a frame"
            exit
        frame = cvQueryFrame(CAM)
        
        cvCvtColor(frame, hlsFrame, CV_BGR2HLS)
    
        cvSplit(hlsFrame, hFrame, lFrame, sFrame, None)
        
        pixelInRange(hFrame, getHlsFilter('hmin'), getHlsFilter('hmax'), 0, 180, ThHFrame) 
        #pixelInRange(lFrame, getHlsFilter('lmin'), getHlsFilter('lmax'), 0, 255, ThLFrame)
        pixelInRange(sFrame, getHlsFilter('smin'), getHlsFilter('smax'), 0, 255, ThSFrame)
        
        cvSetZero(mask)        
        cvAnd(ThHFrame, ThSFrame, mask)
        
        cvSetZero(filtredFrame)
        
        cvCopy(frame, filtredFrame, mask)
        
        cvShowImage("Filtred", filtredFrame)

        key = cvWaitKey(10)
        if key == 'r':
            key = -1
            resetHlsFilter()
            
    cvDestroyWindow("Filtred")    
Example #17
0
def cv2np(im, format='RGB'):
	if format == 'BGR':
		cv.cvCvtColor( im, im, cv.CV_BGR2RGB )
	numpy_type, nchannels = cv2np_type_dict[cv.cvGetElemType(im)]
	array_size = [im.height, im.width, nchannels]
    #Removed multiplication of size by (im.depth/8) as numpy takes
    #into account of that in numpy_type
	np_im = np.frombuffer(im.imageData, dtype=numpy_type, 
            count=im.height*im.width*nchannels)
	return np.reshape(np_im, array_size)
Example #18
0
 def onRetry(self, event):
     frame = gui.cvQueryFrame(self.capture)
     cv.cvCvtColor(frame, frame, cv.CV_BGR2RGB)
     self.bmp = wx.BitmapFromBuffer(frame.width, frame.height, frame.imageData)
     self.startTimer()
     self.shotbutton.Show()
     self.retrybutton.Hide()
     self.hasPicture = False
     self.Layout()
     event.Skip()    
Example #19
0
def cv2np(im, format='RGB'):
    if format == 'BGR':
        cv.cvCvtColor(im, im, cv.CV_BGR2RGB)
    numpy_type, nchannels = cv2np_type_dict[cv.cvGetElemType(im)]
    array_size = [im.height, im.width, nchannels]
    np_im = np.frombuffer(im.imageData,
                          dtype=numpy_type,
                          count=im.height * im.width * nchannels *
                          (im.depth / 8))
    return np.reshape(np_im, array_size)
Example #20
0
def _detect(image):
    """ Detects faces on `image`
    Parameters:
        @image: image file path

    Returns:
        [((x1, y1), (x2, y2)), ...] List of coordenates for top-left
                                    and bottom-right corner
    """
    # the OpenCV API says this function is obsolete, but we can't
    # cast the output of cvLoad to a HaarClassifierCascade, so use
    # this anyways the size parameter is ignored
    capture = cvCreateFileCapture(image) 

    if not capture:
        return []

    frame = cvQueryFrame(capture)
    if not frame:
        return []

    img = cvCreateImage(cvSize(frame.width, frame.height),
                        IPL_DEPTH_8U, frame.nChannels)
    cvCopy(frame, img)

    # allocate temporary images
    gray          = cvCreateImage((img.width, img.height),
                                  COPY_DEPTH, COPY_CHANNELS)
    width, height = (cvRound(img.width / IMAGE_SCALE),
                     cvRound(img.height / IMAGE_SCALE))
    small_img     = cvCreateImage((width, height), COPY_DEPTH, COPY_CHANNELS)

    # convert color input image to grayscale
    cvCvtColor(img, gray, CV_BGR2GRAY)

    # scale input image for faster processing
    cvResize(gray, small_img, CV_INTER_LINEAR)
    cvEqualizeHist(small_img, small_img)
    cvClearMemStorage(STORAGE)

    coords = []
    for haar_file in CASCADES:
        cascade = cvLoadHaarClassifierCascade(haar_file, cvSize(1, 1))
        if cascade:
            faces = cvHaarDetectObjects(small_img, cascade, STORAGE, HAAR_SCALE,
                                        MIN_NEIGHBORS, HAAR_FLAGS, MIN_SIZE) or []
            for face_rect in faces:
                # the input to cvHaarDetectObjects was resized, so scale the 
                # bounding box of each face and convert it to two CvPoints
                x, y = face_rect.x, face_rect.y
                pt1 = (int(x * IMAGE_SCALE), int(y * IMAGE_SCALE))
                pt2 = (int((x + face_rect.width) * IMAGE_SCALE),
                       int((y + face_rect.height) * IMAGE_SCALE))
                coords.append((pt1, pt2))
    return coords
Example #21
0
def _detect(image):
    """ Detects faces on `image`
    Parameters:
        @image: image file path

    Returns:
        [((x1, y1), (x2, y2)), ...] List of coordenates for top-left
                                    and bottom-right corner
    """
    # the OpenCV API says this function is obsolete, but we can't
    # cast the output of cvLoad to a HaarClassifierCascade, so use
    # this anyways the size parameter is ignored
    capture = cvCreateFileCapture(image)

    if not capture:
        return []

    frame = cvQueryFrame(capture)
    if not frame:
        return []

    img = cvCreateImage(cvSize(frame.width, frame.height), IPL_DEPTH_8U,
                        frame.nChannels)
    cvCopy(frame, img)

    # allocate temporary images
    gray = cvCreateImage((img.width, img.height), COPY_DEPTH, COPY_CHANNELS)
    width, height = (cvRound(img.width / IMAGE_SCALE),
                     cvRound(img.height / IMAGE_SCALE))
    small_img = cvCreateImage((width, height), COPY_DEPTH, COPY_CHANNELS)

    # convert color input image to grayscale
    cvCvtColor(img, gray, CV_BGR2GRAY)

    # scale input image for faster processing
    cvResize(gray, small_img, CV_INTER_LINEAR)
    cvEqualizeHist(small_img, small_img)
    cvClearMemStorage(STORAGE)

    coords = []
    for haar_file in CASCADES:
        cascade = cvLoadHaarClassifierCascade(haar_file, cvSize(1, 1))
        if cascade:
            faces = cvHaarDetectObjects(small_img, cascade, STORAGE,
                                        HAAR_SCALE, MIN_NEIGHBORS, HAAR_FLAGS,
                                        MIN_SIZE) or []
            for face_rect in faces:
                # the input to cvHaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                x, y = face_rect.x, face_rect.y
                pt1 = (int(x * IMAGE_SCALE), int(y * IMAGE_SCALE))
                pt2 = (int((x + face_rect.width) * IMAGE_SCALE),
                       int((y + face_rect.height) * IMAGE_SCALE))
                coords.append((pt1, pt2))
    return coords
Example #22
0
	def run(self):

		if self.capture:
			webcam_frame = highgui.cvQueryFrame( self.capture )
		else:
			print "Capture failed!"
			return

		if self.inverted_video.get_active():
			highgui.cvConvertImage(webcam_frame, webcam_frame, highgui.CV_CVTIMG_FLIP)
		highgui.cvConvertImage(webcam_frame, self.display_frame, highgui.CV_CVTIMG_SWAP_RB)




		if False:
			# PROCESS WEBCAM FRAME HERE...
			inputImage = cv.cvCreateImage(cv.cvGetSize(webcam_frame), cv.IPL_DEPTH_8U, 1)
			cv.cvCvtColor(webcam_frame, inputImage, cv.CV_RGB2GRAY);

			cv.cvThreshold(inputImage, inputImage, 128, 255, cv.CV_THRESH_BINARY)

			mysize = cv.cvGetSize(webcam_frame)
			height = mysize.height
			width = mysize.width


			# Find horizontal first-moment:
			if False:
				mysum = 0
				for i in range(height):
					mysum += sum(inputImage[i,:])

				print "Sum:", mysum

			cv.cvMerge( inputImage, inputImage, inputImage, None, self.display_frame )




		incoming_pixbuf = gtk.gdk.pixbuf_new_from_data(
				self.display_frame.imageData,
				gtk.gdk.COLORSPACE_RGB,
				False,
				8,
				self.display_frame.width,
				self.display_frame.height,
				self.display_frame.widthStep)
		incoming_pixbuf.copy_area(0, 0, self.display_frame.width, self.display_frame.height, self.webcam_pixbuf, 0, 0)

		self.video_image.queue_draw()


		return self.video_enabled_button.get_active()
Example #23
0
def get_nearest_feature( image, this_point, n=2000 ):
	"""
	Get the n-nearest features to a specified image coordinate.
	Features are determined using cvGoodFeaturesToTrack.
	"""

	_red = cv.cvScalar (0, 0, 255, 0);
	_green = cv.cvScalar (0, 255, 0, 0);
	_blue = cv.cvScalar (255,0,0,0);
	_white = cv.cvRealScalar (255)
	_black = cv.cvRealScalar (0)

	quality = 0.01
	min_distance = 4
	N_best = n
	win_size = 11

	grey = cv.cvCreateImage (cv.cvGetSize (image), 8, 1)
	eig = cv.cvCreateImage (cv.cvGetSize (image), 32, 1)
	temp = cv.cvCreateImage (cv.cvGetSize (image), 32, 1)

	# create a grey version of the image
	cv.cvCvtColor ( image, grey, cv.CV_BGR2GRAY)

	points = cv.cvGoodFeaturesToTrack ( 
		grey, eig, temp,
		N_best,
		quality, min_distance, None, 3, 0, 0.04)

	# refine the corner locations
	better_points = cv.cvFindCornerSubPix (
		grey,
		points,
		cv.cvSize (win_size, win_size), cv.cvSize (-1, -1),
		cv.cvTermCriteria (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,
						   20, 0.03))

	eigs = []
	for i in range(len(points)):
		eigs.append(cv.cvGetMat(eig)[int(points[i].y)][int(points[i].x)])

	mypoints = np.matrix(np.zeros((len(points)*2),dtype=float)).reshape(len(points),2)
	dists = []
	for i,point in enumerate(points):
		mypoints[i,0]=point.x
		mypoints[i,1]=point.y
		dists.append( np.linalg.norm(mypoints[i,:]-this_point) )

	dists = np.array(dists)
	sorteddists = dists.argsort()

	cv.cvDrawCircle ( image, points[ sorteddists[0] ], 5, _green, 2, 8, 0 )

	return better_points[ sorteddists[0] ]
Example #24
0
 def onRetry(self, event):
     frame = gui.cvQueryFrame(self.capture)
     cv.cvCvtColor(frame, frame, cv.CV_BGR2RGB)
     self.bmp = wx.BitmapFromBuffer(frame.width, frame.height,
                                    frame.imageData)
     self.startTimer()
     self.shotbutton.Show()
     self.retrybutton.Hide()
     self.hasPicture = False
     self.Layout()
     event.Skip()
Example #25
0
def cv2np(im, format='RGB'):
    if format == 'BGR':
        cv.cvCvtColor(im, im, cv.CV_BGR2RGB)
    numpy_type, nchannels = cv2np_type_dict[cv.cvGetElemType(im)]
    array_size = [im.height, im.width, nchannels]
    #Removed multiplication of size by (im.depth/8) as numpy takes
    #into account of that in numpy_type
    np_im = np.frombuffer(im.imageData,
                          dtype=numpy_type,
                          count=im.height * im.width * nchannels)
    return np.reshape(np_im, array_size)
Example #26
0
    def texture_features(self, block_size=5, filter_size=3):
        """
        Calculates the texture features associated with the image.
        block_size gives the size of the texture neighborhood to be processed
        filter_size gives the size of the Sobel operator used to find gradient information
        """
        #block_size = cv.cvSize(block_size, block_size)

        #convert to grayscale float
        channels = 1
        self.gray_image = cv.cvCreateImage(
            cv.cvSize(self.im_width, self.im_height),
            cv.IPL_DEPTH_8U,  #cv.IPL_DEPTH_16U, #cv.IPL_DEPTH_32F,
            channels)

        #cv.CV_32FC1, #cv.IPL_DEPTH_32F, #cv.IPL_DEPTH_8U, #cv.IPL_DEPTH_16U,
        channels = 1
        eig_tex = cv.cvCreateImage(
            cv.cvSize(self.im_width * 6, self.im_height), cv.IPL_DEPTH_32F,
            channels)

        cv.cvCvtColor(self.image, self.gray_image, cv.CV_BGR2GRAY)

        #cv.cvAdd(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL );

        #highgui.cvConvertImage(self.image, self.gray_image)

        cv.cvCornerEigenValsAndVecs(
            self.gray_image,
            eig_tex,  #CvArr* eigenvv,
            block_size,
            filter_size)

        eig_tex = ut.cv2np(eig_tex)
        eig_tex = np.reshape(eig_tex, [self.im_height, self.im_width, 6])
        #print eig_tex.shape ## [480,640,3]
        ## (l1, l2, x1, y1, x2, y2), where
        ## l1, l2 - eigenvalues of M; not sorted
        ## (x1, y1) - eigenvector corresponding to l1
        ## (x2, y2) - eigenvector corresponding to l2
        tex_feat = np.zeros([3, self.im_height * self.im_width],
                            dtype=np.float32)
        tmp = np.reshape(eig_tex, [self.im_height * self.im_width, 6]).T
        s = tmp[0] > tmp[1]
        tex_feat[1:3, s] = tmp[0, s] * tmp[2:4, s]
        tex_feat[0, s] = tmp[1, s]
        tex_feat[1:3, -s] = tmp[1, -s] * tmp[4:6, -s]
        tex_feat[0, -s] = tmp[0, -s]

        self.tex_feat = tex_feat.T
        self.tex_image = np.reshape(self.tex_feat,
                                    [self.im_height, self.im_width, 3])
Example #27
0
def harrisResponse(image):
    """pyvision/point/DetectorHarris.py
    Runs at 10.5 fps...
    """
    gray = cv.cvCreateImage( cv.cvGetSize(image), 8, 1 )
    corners = cv.cvCreateImage( cv.cvGetSize(image), 32, 1 )
    cv.cvCvtColor( image, gray, cv.CV_BGR2GRAY )

    cv.cvCornerHarris(gray,corners,3)
    
    image = filter_and_render_cv(image,corners)
    #IPShellEmbed()()
    return image
Example #28
0
	def read(self) :
		frame=self.input.read()
		if self.debug :
			raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels)
			cv.cvCopy(frame,raw_frame,None)
			self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB')

		if self.enabled :
			cv_rs = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)

			# convert color
			cv.cvCvtColor(frame,cv_rs,cv.CV_BGR2GRAY)

			# invert the image
			cv.cvSubRS(cv_rs, 255, cv_rs, None);

			# threshold the image
			frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)
			cv.cvThreshold(cv_rs, frame, self.threshold, 255, cv.CV_THRESH_BINARY)

			if self.debug :
				thresh_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,3)
				cv.cvCvtColor(frame,thresh_frame,cv.CV_GRAY2RGB)
				self.thresh_frame_surface=pygame.image.frombuffer(thresh_frame.imageData,(frame.width,frame.height),'RGB')

			# I think these functions are too specialized for transforms
			cv.cvSmooth(frame,frame,cv.CV_GAUSSIAN,3, 0, 0, 0 )
			cv.cvErode(frame, frame, None, 1)
			cv.cvDilate(frame, frame, None, 1)

			num_contours,contours=cv.cvFindContours(frame,self.storage,cv.sizeof_CvContour,cv.CV_RETR_LIST,cv.CV_CHAIN_APPROX_NONE,cv.cvPoint(0,0))
			if contours is None :
				return []
			else :
				contours = cv.cvApproxPoly( contours, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, 3, 1 );
				if contours is None :
					return []
				else :
					final_contours = []
					for c in contours.hrange() :
						area = abs(cv.cvContourArea(c))
						#self.debug_print('Polygon Area: %f'%area)
						if area >= self.min_area :
							lst = []
							for pt in c :
								lst.append((pt.x,pt.y))
							final_contours.append(lst)
						contours = contours.h_next
					return final_contours

		return []
Example #29
0
	def read(self):
		frame=self.input.read()
		if self.enabled:
			if self.num_channels==frame.nChannels:
				new_frame=frame
			else:
				new_frame=cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,self.num_channels)
			cv.cvCvtColor(frame,new_frame,self.convert_method)
			if self.num_channels == 1:
				cv.cvMerge(new_frame,new_frame,new_frame,None,frame)
			else :
				frame=new_frame

		return frame
Example #30
0
def get_nearest_feature(image, this_point, n=2000):
    """
	Get the n-nearest features to a specified image coordinate.
	Features are determined using cvGoodFeaturesToTrack.
	"""

    _red = cv.cvScalar(0, 0, 255, 0)
    _green = cv.cvScalar(0, 255, 0, 0)
    _blue = cv.cvScalar(255, 0, 0, 0)
    _white = cv.cvRealScalar(255)
    _black = cv.cvRealScalar(0)

    quality = 0.01
    min_distance = 4
    N_best = n
    win_size = 11

    grey = cv.cvCreateImage(cv.cvGetSize(image), 8, 1)
    eig = cv.cvCreateImage(cv.cvGetSize(image), 32, 1)
    temp = cv.cvCreateImage(cv.cvGetSize(image), 32, 1)

    # create a grey version of the image
    cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY)

    points = cv.cvGoodFeaturesToTrack(grey, eig, temp, N_best, quality,
                                      min_distance, None, 3, 0, 0.04)

    # refine the corner locations
    better_points = cv.cvFindCornerSubPix(
        grey, points, cv.cvSize(win_size, win_size), cv.cvSize(-1, -1),
        cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))

    eigs = []
    for i in range(len(points)):
        eigs.append(cv.cvGetMat(eig)[int(points[i].y)][int(points[i].x)])

    mypoints = np.matrix(np.zeros((len(points) * 2),
                                  dtype=float)).reshape(len(points), 2)
    dists = []
    for i, point in enumerate(points):
        mypoints[i, 0] = point.x
        mypoints[i, 1] = point.y
        dists.append(np.linalg.norm(mypoints[i, :] - this_point))

    dists = np.array(dists)
    sorteddists = dists.argsort()

    cv.cvDrawCircle(image, points[sorteddists[0]], 5, _green, 2, 8, 0)

    return better_points[sorteddists[0]]
Example #31
0
def main(): # ctrl+c to end
    global h,s,v,h2,v2,s2,d,e
    highgui.cvNamedWindow("Camera 1", 1)
    highgui.cvNamedWindow("Orig", 1)
    highgui.cvCreateTrackbar("H", "Camera 1", h, 256, tb_h)
    highgui.cvCreateTrackbar("S", "Camera 1", s, 256, tb_s)
    highgui.cvCreateTrackbar("V", "Camera 1", v, 256, tb_v)
    highgui.cvCreateTrackbar("H2", "Camera 1", h2, 256, tb_h2)
    highgui.cvCreateTrackbar("S2", "Camera 1", s2, 256, tb_s2)
    highgui.cvCreateTrackbar("V2", "Camera 1", v2, 256, tb_v2)
    highgui.cvCreateTrackbar("Dilate", "Camera 1", d, 30, tb_d)
    highgui.cvCreateTrackbar("Erode", "Camera 1", e, 30, tb_e)
    
    cap = highgui.cvCreateCameraCapture(1)
    highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_WIDTH, IMGW)
    highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_HEIGHT, IMGH)
    c = 0
    t1 = tdraw = time.clock()
    t = 1
    font = cv.cvInitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1)
    while c != 0x27:
        image = highgui.cvQueryFrame(cap)
        if not image:
            print "capture failed"
            break
            
        thresh = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,1)
        cv.cvSetZero(thresh)
        cv.cvCvtColor(image,image,cv.CV_RGB2HSV)
        cv.cvInRangeS(image, (h,s,v,0), (h2,s2,v2,0), thresh)
        result = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,3)
        cv.cvSetZero(result)
        
        cv.cvOr(image,image,result,thresh)
        for i in range(1,e):
            cv.cvErode(result,result)
        for i in range(1,d):
            cv.cvDilate(result,result)
            
        # floodfill objects back in, allowing threshold differences outwards
        
        t2 = time.clock()
        if t2 > tdraw+0.3:
            t = t2-t1
            tdraw=t2
        cv.cvPutText(result, "FPS: " + str(1 / (t)), (0,25), font, (255,255,255))
        t1 = t2
        highgui.cvShowImage("Orig", image)
        highgui.cvShowImage("Camera 1", result)
        c = highgui.cvWaitKey(10)
Example #32
0
 def __FindCorner(self, filename): #find the corners of images, and save all corner points in self.vKeyPoints
     self.img = highgui.cvLoadImage (filename)
     greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1)
     hsvimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3)
     cv.cvCvtColor(self.img, hsvimg, cv.CV_RGB2HSV)
     cv.cvCvtColor (hsvimg, greyimg, cv.CV_BGR2GRAY)
     
     eigImage = cv.cvCreateImage(cv.cvGetSize(greyimg), cv.IPL_DEPTH_32F, 1)
     tempImage = cv.cvCreateImage(cv.cvGetSize(greyimg), cv.IPL_DEPTH_32F, 1)
     self.points = cv.cvGoodFeaturesToTrack(greyimg, eigImage,tempImage, 2000, 0.01, 5, None, 3,0,0.01 )
     self.points2 = cv.cvFindCornerSubPix(greyimg, self.points,cv.cvSize(20, 20), 
                                          cv.cvSize(-1, -1), cv.cvTermCriteria(cv.CV_TERMCRIT_ITER |cv.CV_TERMCRIT_EPS, 20, 0.03))
     cv.cvReleaseImage(eigImage)
     cv.cvReleaseImage(tempImage)
Example #33
0
File: chroma.py Project: bmiro/vpc
def mouseClick(event, x, y, flags, param):
    if event == CV_EVENT_LBUTTONUP:
        frame = cvQueryFrame(CAM)
        hlsFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
        cvCvtColor(frame, hlsFrame, CV_BGR2HLS)

        pixel = cvGet2D(hlsFrame, y, x)
                
        setHlsFilter('hmax', pixel[0])      
        setHlsFilter('hmin', pixel[0])        
        #setHlsFilter('lmax', pixel[1])        
        #setHlsFilter('lmin', pixel[1])        
        setHlsFilter('smax', pixel[2])        
        setHlsFilter('smin', pixel[2])        
Example #34
0
    def get_eye(self):
        eyes = False

        face     = self.cap.get_area(commons.haar_cds['Face'])

        if face:
            cvtile = cv.cvCreateMat(128,128,cv.CV_8UC3)
            bwtile = cv.cvCreateMat(128,128,cv.CV_8U)
            areas    = [ (pt[1].x - pt[0].x)*(pt[1].y - pt[0].y) for pt in face ]
            startF   = face[areas.index(max(areas))][0]
            endF     = face[areas.index(max(areas))][1]
            facerect     = self.cap.rect(startF.x, startF.y, endF.x - startF.x, endF.y - startF.y)

            if not facerect:
                return

            cv.cvResize(facerect, cvtile)

            cv.cvCvtColor( cvtile, bwtile, cv.CV_BGR2GRAY )

            leye,reye,lcp,rcp = self.fel.locateEyes(bwtile)
            leye = pv.Point(leye)
            reye = pv.Point(reye)

            leye_x = int((float(leye.X())*facerect.width/cvtile.width) + startF.x)
            leye_y = int((float(leye.Y())*facerect.height/cvtile.height) + startF.y)

            reye_x = int((float(reye.X())*facerect.width/cvtile.width) + startF.x)
            reye_y = int((float(reye.Y())*facerect.height/cvtile.height) + startF.y)

            eye_rect = { "startX" : leye_x - 5,
                         "startY" : leye_y - 5,
                         "endX"   : leye_x + 5,
                         "endY"   : leye_y + 5}

            #self.cap.image(self.cap.rect(leye_x - 5, leye_y - 5, 20, 20))

            if not hasattr(self.cap, "leye"):
                self.cap.add( Point("point", "leye", [int(leye_x), int(leye_y)], parent=self.cap, follow=True) )
            else:
                self.cap.add( Point("point", "reye", [int(reye_x), int(reye_y)], parent=self.cap, follow=True) )

            # Shows the face rectangle
            #self.cap.add( Graphic("rect", "Face", ( startF.x, startF.y ), (endF.x, endF.y), parent=self.cap) )


        self.foreheadOrig = None

        return False
Example #35
0
    def run(self):

        if self.capture:
            webcam_frame = highgui.cvQueryFrame(self.capture)
        else:
            print "Capture failed!"
            return

        if self.inverted_video.get_active():
            highgui.cvConvertImage(webcam_frame, webcam_frame,
                                   highgui.CV_CVTIMG_FLIP)
        highgui.cvConvertImage(webcam_frame, self.display_frame,
                               highgui.CV_CVTIMG_SWAP_RB)

        if False:
            # PROCESS WEBCAM FRAME HERE...
            inputImage = cv.cvCreateImage(cv.cvGetSize(webcam_frame),
                                          cv.IPL_DEPTH_8U, 1)
            cv.cvCvtColor(webcam_frame, inputImage, cv.CV_RGB2GRAY)

            cv.cvThreshold(inputImage, inputImage, 128, 255,
                           cv.CV_THRESH_BINARY)

            mysize = cv.cvGetSize(webcam_frame)
            height = mysize.height
            width = mysize.width

            # Find horizontal first-moment:
            if False:
                mysum = 0
                for i in range(height):
                    mysum += sum(inputImage[i, :])

                print "Sum:", mysum

            cv.cvMerge(inputImage, inputImage, inputImage, None,
                       self.display_frame)

        incoming_pixbuf = gtk.gdk.pixbuf_new_from_data(
            self.display_frame.imageData, gtk.gdk.COLORSPACE_RGB, False, 8,
            self.display_frame.width, self.display_frame.height,
            self.display_frame.widthStep)
        incoming_pixbuf.copy_area(0, 0, self.display_frame.width,
                                  self.display_frame.height,
                                  self.webcam_pixbuf, 0, 0)

        self.video_image.queue_draw()

        return self.video_enabled_button.get_active()
Example #36
0
    def texture_features(self, block_size=5, filter_size=3):
        """
        Calculates the texture features associated with the image.
        block_size gives the size of the texture neighborhood to be processed
        filter_size gives the size of the Sobel operator used to find gradient information
        """
        #block_size = cv.cvSize(block_size, block_size)

        #convert to grayscale float
        channels = 1
        self.gray_image = cv.cvCreateImage(cv.cvSize(self.im_width, self.im_height),
                                           cv.IPL_DEPTH_8U, #cv.IPL_DEPTH_16U, #cv.IPL_DEPTH_32F,
                                           channels)


        #cv.CV_32FC1, #cv.IPL_DEPTH_32F, #cv.IPL_DEPTH_8U, #cv.IPL_DEPTH_16U, 
        channels = 1
        eig_tex = cv.cvCreateImage(cv.cvSize(self.im_width*6, self.im_height),
                                    cv.IPL_DEPTH_32F, 
                                    channels)


        cv.cvCvtColor(self.image, self.gray_image, cv.CV_BGR2GRAY);

        #cv.cvAdd(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL );
        
        #highgui.cvConvertImage(self.image, self.gray_image)
        
        cv.cvCornerEigenValsAndVecs(self.gray_image, eig_tex,#CvArr* eigenvv,
                                    block_size, filter_size)

        eig_tex = ut.cv2np(eig_tex)
        eig_tex = np.reshape(eig_tex, [self.im_height, self.im_width, 6])
        #print eig_tex.shape ## [480,640,3]
        ## (l1, l2, x1, y1, x2, y2), where
        ## l1, l2 - eigenvalues of M; not sorted
        ## (x1, y1) - eigenvector corresponding to l1
        ## (x2, y2) - eigenvector corresponding to l2
        tex_feat = np.zeros([3, self.im_height * self.im_width], dtype=np.float32)
        tmp = np.reshape(eig_tex, [self.im_height * self.im_width, 6]).T
        s = tmp[0] > tmp[1]
        tex_feat[1:3, s] = tmp[0, s] * tmp[2:4, s]
        tex_feat[0, s] = tmp[1, s]
        tex_feat[1:3, -s] = tmp[1, -s] * tmp[4:6, -s]
        tex_feat[0, -s] = tmp[0, -s]
        
        self.tex_feat = tex_feat.T
        self.tex_image = np.reshape(self.tex_feat, [self.im_height, self.im_width, 3])
Example #37
0
 def __normImage(self, img, length):
     #print "Generating norm image..."
     width = length
     height = length
     gray = cv.cvCreateImage(cv.cvSize(img.width,img.height), 8, 1);
     small_img = cv.cvCreateImage(cv.cvSize(cv.cvRound(width),
                                        cv.cvRound(height)), 8, 1 );
 
     # convert color input image to grayscale
     cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY);
     # scale input image for faster processing
     cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR);
     cv.cvEqualizeHist(small_img, small_img);
     #cvClearMemStorage(self.storage);
     norm_image = small_img # save the 'normalized image'
     return norm_image
Example #38
0
def detect_faces_on(path):
    faces = []
    image = cvLoadImage(path)
    # convert to grayscale for faster results
    grayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)
    cvCvtColor(image, grayscale, CV_BGR2GRAY)
    # smooth picture for better results
    cvSmooth(grayscale, grayscale, CV_GAUSSIAN, 3, 3)

    storage = cvCreateMemStorage(0)
    cvClearMemStorage(storage)
    cvEqualizeHist(grayscale, grayscale)

    cascade_files = [
        # ('/usr/share/opencv/haarcascades/haarcascade_eye_tree_eyeglasses.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_lowerbody.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_mouth.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_profileface.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_eye.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_eyepair_big.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_nose.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_righteye_2splits.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt2.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_fullbody.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_eyepair_small.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_righteye.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_upperbody.xml', (50, 50)),
        ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt_tree.xml',
         (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_lefteye_2splits.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_lefteye.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_upperbody.xml', (50, 50)),
        # ('parojos_22_5.1.xml', (22, 5)),
        # ('Mouth.xml', (22, 15)),
    ]

    for cascade_file, cascade_sizes in cascade_files:
        cascade = cvLoadHaarClassifierCascade(os.path.join(cascade_file),
                                              cvSize(1, 1))
        faces += cvHaarDetectObjects(grayscale, cascade, storage, HAAR_SCALE,
                                     HAAR_NEIGHBORS, CV_HAAR_DO_CANNY_PRUNING,
                                     cvSize(*cascade_sizes))

    return [{'x': f.x, 'y': f.y, 'w': f.width, 'h': f.height} for f in faces]
Example #39
0
	def _get_color_frame(self,color="r",frame=None) :
		if frame is None :
			frame = self._get_cv_frame()

		# extract the color channel (image is in BGR format)
		cv_r = None #cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)
		if color == "r" :
			cv_r = self._get_frame_channel(frame,channel=2)
		elif color == "g" :
			cv_r = self._get_frame_channel(frame,channel=1)
		elif color == "b" :
			cv_r = self._get_frame_channel(frame,channel=0)
		else :
			cv_r = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)
			cv.cvCvtColor(frame,cv_r,cv.CV_BGR2GRAY)

		return cv_r
Example #40
0
    def HarrisPoints(self, imgfile):
        self.points = []
        self.drawimg = highgui.cvLoadImage (imgfile)
        c = 1
        try:
            gray = cv.cvCreateImage (cv.cvGetSize (self.drawimg), 8, 1)
            cv.cvCvtColor(self.drawimg, gray, cv.CV_BGR2GRAY)
            eig = cv.cvCreateImage (cv.cvGetSize (self.drawimg), 32, 1)
            tmpimg = cv.cvCreateImage (cv.cvGetSize (self.drawimg), 32, 1)
            p =   cv.cvGoodFeaturesToTrack(gray, eig, tmpimg, 100, 0.1, 20, None, 7, 1, 0.04 )
            for x in p:
                 cv.cvCircle( self.drawimg, x, 3, cv.CV_RGB(0,255,0), 8, 0 );
                 self.points.append(x)

        except Exception,e:
            print e
            print 'ERROR: problem handling '+ imgfile 
Example #41
0
def detect_faces(image):
    """Converts an image to grayscale and prints the locations of any
         faces found"""
    grayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)
    cvCvtColor(image, grayscale, CV_BGR2GRAY)

    storage = cvCreateMemStorage(0)
    cvClearMemStorage(storage)
    cvEqualizeHist(grayscale, grayscale)

    # The default parameters (scale_factor=1.1, min_neighbors=3,
    # flags=0) are tuned for accurate yet slow face detection. For
    # faster face detection on real video images the better settings are
    # (scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING).
    # --- http://www710.univ-lyon1.fr/~bouakaz/OpenCV-0.9.5/docs/ref/OpenCVRef_Experimental.htm#decl_cvHaarDetectObjects
    # The size box is of the *minimum* detectable object size. Smaller box = more processing time. - http://cell.fixstars.com/opencv/index.php/Facedetect
    minsize = (int(MINFACEWIDTH_PERCENT * image.width + 0.5),
               int(MINFACEHEIGHT_PERCENT * image.height))
    print >> sys.stderr, "Min size of face: %s" % ` minsize `

    faces = []
    for cascadefile in [
            '/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml'
    ]:
        #    for cascadefile in ['/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml', '/usr/share/opencv/haarcascades/haarcascade_profileface.xml']:
        cascade = cvLoadHaarClassifierCascade(cascadefile, cvSize(1, 1))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(50,50))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, 0, cvSize(MINFACEWIDTH,MINFACEHEIGHT))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, 0, cvSize(MINFACEWIDTH,MINFACEHEIGHT))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize(*minsize))
        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1,
                                     4, CV_HAAR_DO_CANNY_PRUNING,
                                     cvSize(*minsize))


#        faces += cvHaarDetectObjects(grayscale, cascade, storage, scale_factor=1.1, min_neighbors=3, flags=0, cvSize(50,50))

#    print dir(faces)
    bboxes = []
    if faces:
        for f in faces:
            print >> sys.stderr, "\tFace at [(%d,%d) -> (%d,%d)]" % (
                f.x, f.y, f.x + f.width, f.y + f.height)
        bboxes = [Face(f.x, f.y, f.x + f.width, f.y + f.height) for f in faces]
    return bboxes
Example #42
0
def findBWEdges(original, out, threshold1, threshold2):
	"""Identical with findEdges except that this returns white edges
	on a black background. We really don't need colored edges any longer.
	This also makes it easy to to a manual merge of edge and blur picture."""
	if threshold2 == None:
		threshold2 = threshold1 * 3

	gray = cv.cvCreateImage(cv.cvGetSize(original), 8, 1)

	cv.cvCvtColor(original, gray, cv.CV_BGR2GRAY)

	cv.cvSmooth(gray, out, cv.CV_BLUR, 3, 3, 0)

	cv.cvNot(gray, out)

	cv.cvCanny(gray, out, threshold1, threshold2)

	return out
Example #43
0
    def HarrisPoints(self, imgfile):
        self.points = []
        self.drawimg = highgui.cvLoadImage(imgfile)
        c = 1
        try:
            gray = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 8, 1)
            cv.cvCvtColor(self.drawimg, gray, cv.CV_BGR2GRAY)
            eig = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 32, 1)
            tmpimg = cv.cvCreateImage(cv.cvGetSize(self.drawimg), 32, 1)
            p = cv.cvGoodFeaturesToTrack(gray, eig, tmpimg, 100, 0.1, 20, None,
                                         7, 1, 0.04)
            for x in p:
                cv.cvCircle(self.drawimg, x, 3, cv.CV_RGB(0, 255, 0), 8, 0)
                self.points.append(x)

        except Exception, e:
            print e
            print 'ERROR: problem handling ' + imgfile
Example #44
0
def detect(image):
    image_size = opencv.cvGetSize(image)
 
    # create grayscale version
    grayscale = opencv.cvCreateImage(image_size, 8, 1)
    opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)
 
    # create storage
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
 
    # equalize histogram
    opencv.cvEqualizeHist(grayscale, grayscale)
 
    # detect objects
    faces = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(100, 100))
#    eyes = opencv.cvHaarDetectObjects(grayscale, eye_cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(60,60))
    draw_bounding_boxes(faces, image, 127,255,0, 3)
Example #45
0
def threshold_image(image, n=[]):
    """Record the first 5 images to get a background, then diff current frame with the last saved frame.
    """
    if len(n) < 5:
        # n[4] will be our background
        # First capture a few images
        n.append(cv.cvCloneMat(image))
        if len(n) == 5:
            # last time here
            # could do averaging here.
            pass
        return image

    original = n[4]
    differenceImage = cv.cvCloneMat(image)
    cv.cvAbsDiff(image, original, differenceImage)
    """The threshold value determines the amount of "Change" required 
    before something will show up"""
    thresholdValue = 50  # 32
    cv.cvThreshold(differenceImage, differenceImage, thresholdValue, 255,
                   cv.CV_THRESH_BINARY)

    # Convert to one channel
    gray = cv.cvCreateImage(cv.cvGetSize(differenceImage), 8, 1)
    cv.cvCvtColor(differenceImage, gray, cv.CV_BGR2GRAY)

    # Use median filter to remove salt and pepper noise.
    cv.cvSmooth(gray, gray, cv.CV_MEDIAN, 15)

    # Dilate and the threshold image
    # It adds a border to the object.
    #cv.cvDilate(gray,gray, None, 9)

    # Add a bit of Blur to the threshold mask
    cv.cvSmooth(gray, gray, cv.CV_GAUSSIAN, 5)

    result = cv.cvCloneMat(image)
    cv.cvSetZero(result)

    cv.cvAnd(image, image, result, gray)
    return result
Example #46
0
    def __init__(self, parent):

        wx.Frame.__init__(
            self,
            parent,
            -1,
        )

        sizer = wx.BoxSizer(wx.VERTICAL)

        self.capture = gui.cvCreateCameraCapture(0)
        frame = gui.cvQueryFrame(self.capture)
        cv.cvCvtColor(frame, frame, cv.CV_BGR2RGB)

        self.SetSize((frame.width + 300, frame.height + 100))

        self.bmp = wx.BitmapFromBuffer(frame.width, frame.height,
                                       frame.imageData)
        self.displayPanel = wx.StaticBitmap(self, -1, bitmap=self.bmp)
        sizer.Add(self.displayPanel, 0, wx.ALL, 10)

        self.shotbutton = wx.Button(self, -1, "Shot")
        sizer.Add(self.shotbutton, -1, wx.GROW)

        self.retrybutton = wx.Button(self, -1, "Retry")
        sizer.Add(self.retrybutton, -1, wx.GROW)
        self.retrybutton.Hide()

        #events
        self.Bind(wx.EVT_BUTTON, self.onShot, self.shotbutton)
        self.Bind(wx.EVT_BUTTON, self.onRetry, self.retrybutton)
        self.Bind(wx.EVT_PAINT, self.onPaint)
        self.Bind(wx.EVT_CLOSE, self.onClose)

        self.playTimer = wx.Timer(self, self.TIMER_PLAY_ID)
        wx.EVT_TIMER(self, self.TIMER_PLAY_ID, self.onNextFrame)

        self.fps = 8
        self.SetSizer(sizer)
        sizer.Layout()
        self.startTimer()
Example #47
0
 def __findedge(self, filename):
     tmpimg = highgui.cvLoadImage(filename)
     self.img = cv.cvCreateImage(
         cv.cvSize(int(tmpimg.width * self.enlarge),
                   int(tmpimg.height * self.enlarge)), 8, 3)
     cv.cvResize(tmpimg, self.img, cv.CV_INTER_LINEAR)
     if (self.drawimage):
         self.drawimg = cv.cvCloneImage(self.img)
     else:
         self.drawimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3)
     greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height),
                                8, 1)
     cv.cvCvtColor(self.img, greyimg, cv.CV_BGR2GRAY)
     self.allcurve = []
     for i in range(80, 200, 20):
         bimg = cv.cvCloneImage(greyimg)
         cv.cvSmooth(bimg, bimg, cv.CV_MEDIAN, 9)
         #            cv.cvSmooth(bimg, bimg, cv.CV_BILATERAL, 9)
         #            cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9)
         #            cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9)
         cv.cvThreshold(greyimg, bimg, i, 255, cv.CV_THRESH_BINARY)
         self.__findcurve(bimg)
    def detect_face(self, img):
        """ Detect faces within an image, then draw around them.
			The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned 
			for accurate yet slow object detection. For a faster operation on real video 
			images the settings are: 
			scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, 
			min_size=<minimum possible face size
		"""
        min_size = cv.cvSize(20, 20)
        image_scale = 1.3
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0
        gray = cv.cvCreateImage(cv.cvSize(img.width, img.height), 8, 1)
        small_img = cv.cvCreateImage(
            cv.cvSize(cv.cvRound(img.width / image_scale),
                      cv.cvRound(img.height / image_scale)), 8, 1)
        cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY)
        cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.cvEqualizeHist(small_img, small_img)
        cv.cvClearMemStorage(self.storage)

        if (self.cascade):
            t = cv.cvGetTickCount()
            faces = cv.cvHaarDetectObjects(small_img, self.cascade,
                                           self.storage, haar_scale,
                                           min_neighbors, haar_flags, min_size)
            t = cv.cvGetTickCount() - t
            #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.));
            if faces:
                for r in faces:
                    pt1 = cv.cvPoint(int(r.x * image_scale),
                                     int(r.y * image_scale))
                    pt2 = cv.cvPoint(int((r.x + r.width) * image_scale),
                                     int((r.y + r.height) * image_scale))
                    cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8,
                                   0)
        return img
Example #49
0
    def detect(self, pil_image, cascade_name, recogn_w=50, recogn_h=50):
        # Get cascade:
        cascade = self.get_cascade(cascade_name)

        image = opencv.PIL2Ipl(pil_image)
        image_size = opencv.cvGetSize(image)
        grayscale = image
        if pil_image.mode == "RGB":
            # create grayscale version
            grayscale = opencv.cvCreateImage(image_size, 8, 1)
            # Change to RGB2Gray - I dont think itll affect the conversion
            opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)

        # create storage
        storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(storage)

        # equalize histogram
        opencv.cvEqualizeHist(grayscale, grayscale)

        # detect objects
        return opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                          opencv.CV_HAAR_DO_CANNY_PRUNING,
                                          opencv.cvSize(recogn_w, recogn_h))
Example #50
0
    cv.cvSet(mask, 1)

    blob_overlay = False

    while True:

        # 1. capture the current image
        frame = highgui.cvQueryFrame(capture)
        if frame is None:
            # no image captured... end the processing
            break

        # mirror the captured image
        cv.cvFlip(frame, None, 1)

        cv.cvCvtColor(frame, my_grayscale, cv.CV_RGB2GRAY)
        cv.cvThreshold(my_grayscale, my_grayscale, 128, 255,
                       cv.CV_THRESH_BINARY)
        if not blob_overlay:
            # Convert black-and-white version back into three-color representation
            cv.cvCvtColor(my_grayscale, frame, cv.CV_GRAY2RGB)

        myblobs = CBlobResult(my_grayscale, mask, 100, True)

        myblobs.filter_blobs(10, 10000)
        blob_count = myblobs.GetNumBlobs()

        for i in range(blob_count):

            my_enumerated_blob = myblobs.GetBlob(i)
            #		print "%d: Area = %d" % (i, my_enumerated_blob.Area())
Example #51
0
def main(args):
    global capture
    global hmax, hmin
    highgui.cvNamedWindow('Camera', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Hue', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Satuation', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Value', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvNamedWindow('Laser', highgui.CV_WINDOW_AUTOSIZE)
    highgui.cvMoveWindow('Camera', 0, 10)
    highgui.cvMoveWindow('Hue', 0, 350)
    highgui.cvMoveWindow('Satuation', 360, 10)
    highgui.cvMoveWindow('Value', 360, 350)
    highgui.cvMoveWindow('Laser', 700, 40)

    highgui.cvCreateTrackbar("Brightness Trackbar", "Camera", 0, 255,
                             change_brightness)
    highgui.cvCreateTrackbar("hmin Trackbar", "Hue", hmin, 180, change_hmin)
    highgui.cvCreateTrackbar("hmax Trackbar", "Hue", hmax, 180, change_hmax)
    highgui.cvCreateTrackbar("smin Trackbar", "Satuation", smin, 255,
                             change_smin)
    highgui.cvCreateTrackbar("smax Trackbar", "Satuation", smax, 255,
                             change_smax)
    highgui.cvCreateTrackbar("vmin Trackbar", "Value", vmin, 255, change_vmin)
    highgui.cvCreateTrackbar("vmax Trackbar", "Value", vmax, 255, change_vmax)

    print "grabbing camera"
    capture = highgui.cvCreateCameraCapture(0)
    print "found camera"
    highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_WIDTH, 320)
    highgui.cvSetCaptureProperty(capture, highgui.CV_CAP_PROP_FRAME_HEIGHT,
                                 240)

    frame = highgui.cvQueryFrame(capture)
    frameSize = cv.cvGetSize(frame)

    hsv = cv.cvCreateImage(frameSize, 8, 3)
    mask = cv.cvCreateImage(frameSize, 8, 1)
    hue = cv.cvCreateImage(frameSize, 8, 1)
    satuation = cv.cvCreateImage(frameSize, 8, 1)
    value = cv.cvCreateImage(frameSize, 8, 1)
    laser = cv.cvCreateImage(frameSize, 8, 1)

    while 1:
        frame = highgui.cvQueryFrame(capture)

        cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV)
        #cv.cvInRangeS(hsv,hsv_min,hsv_max,mask)
        cv.cvSplit(hsv, hue, satuation, value, None)

        cv.cvInRangeS(hue, hmin, hmax, hue)
        cv.cvInRangeS(satuation, smin, smax, satuation)
        cv.cvInRangeS(value, vmin, vmax, value)
        #cv.cvInRangeS(hue,0,180,hue)

        cv.cvAnd(hue, value, laser)
        #cv.cvAnd(laser, value, laser)

        cenX, cenY = averageWhitePoints(laser)
        #print cenX,cenY
        draw_target(frame, cenX, cenY)
        #draw_target(frame,200,1)

        highgui.cvShowImage('Camera', frame)
        highgui.cvShowImage('Hue', hue)
        highgui.cvShowImage('Satuation', satuation)
        highgui.cvShowImage('Value', value)
        highgui.cvShowImage('Laser', laser)

        k = highgui.cvWaitKey(10)
        if k == " ":
            highgui.cvDestroyAllWindows()
            highgui.cvReleaseCapture(capture)
            sys.exit()
Example #52
0
def harrisResponse(frame):
    """pyvision/point/DetectorHarris.py
    Runs at 10.5 fps...
    """
    #gray = cv.cvCreateImage( cv.cvGetSize(image), 8, 1 )
    #corners = cv.cvCreateImage( cv.cvGetSize(image), 32, 1 )
    #cv.cvCvtColor( image, gray, cv.CV_BGR2GRAY )

    #cv.cvCornerHarris(gray,corners,15)

    # This could be done in a persistant way
    # create the images we need
    image = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3)
    grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
    prev_grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
    pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
    prev_pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
    eig = cv.cvCreateImage(cv.cvGetSize(frame), cv.IPL_DEPTH_32F, 1)
    temp = cv.cvCreateImage(cv.cvGetSize(frame), cv.IPL_DEPTH_32F, 1)
    points = [[], []]

    # copy the frame, so we can draw on it
    cv.cvCopy(frame, image)

    # create a grey version of the image
    cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY)

    # search the good points
    points[1] = cv.cvGoodFeaturesToTrack(grey, eig, temp, MAX_COUNT, quality,
                                         min_distance, None, 3, 0, 0.04)

    # refine the corner locations
    cv.cvFindCornerSubPix(
        grey, points[1], cv.cvSize(win_size, win_size), cv.cvSize(-1, -1),
        cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))

    if len(points[0]) > 0:
        # we have points, so display them

        # calculate the optical flow
        [points[1], status], something = cv.cvCalcOpticalFlowPyrLK(
            prev_grey, grey, prev_pyramid, pyramid, points[0], len(points[0]),
            (win_size, win_size), 3, len(points[0]), None,
            cv.cvTermCriteria(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20,
                              0.03), flags)

        # initializations
        point_counter = -1
        new_points = []

        for the_point in points[1]:
            # go trough all the points

            # increment the counter
            point_counter += 1

            if add_remove_pt:
                # we have a point to add, so see if it is close to
                # another one. If yes, don't use it
                dx = pt.x - the_point.x
                dy = pt.y - the_point.y
                if dx * dx + dy * dy <= 25:
                    # too close
                    add_remove_pt = 0
                    continue

            if not status[point_counter]:
                # we will disable this point
                continue

            # this point is a correct point
            new_points.append(the_point)

            # draw the current point
            cv.cvCircle(image, cv.cvPointFrom32f(the_point), 3,
                        cv.cvScalar(0, 255, 0, 0), -1, 8, 0)

        # set back the points we keep
        points[1] = new_points

    # swapping
    prev_grey, grey = grey, prev_grey
    prev_pyramid, pyramid = pyramid, prev_pyramid
    points[0], points[1] = points[1], points[0]

    return image
Example #53
0
    if len(sys.argv) > 1:
        filename = sys.argv[1]

    # load the image gived on the command line
    image = highgui.cvLoadImage(filename)

    if not image:
        print "Error loading image '%s'" % filename
        sys.exit(-1)

    # create the output image
    col_edge = cv.cvCreateImage(cv.cvSize(image.width, image.height), 8, 3)

    # convert to grayscale
    gray = cv.cvCreateImage(cv.cvSize(image.width, image.height), 8, 1)
    edge = cv.cvCreateImage(cv.cvSize(image.width, image.height), 8, 1)
    cv.cvCvtColor(image, gray, cv.CV_BGR2GRAY)

    # create the window
    highgui.cvNamedWindow(win_name, highgui.CV_WINDOW_AUTOSIZE)

    # create the trackbar
    highgui.cvCreateTrackbar(trackbar_name, win_name, 1, 100, on_trackbar)

    # show the image
    on_trackbar(0)

    # wait a key pressed to end
    highgui.cvWaitKey(0)
Example #54
0
    def timerEvent(self, ev):
        # Fetch a frame from the video camera
        frame = highgui.cvQueryFrame(self.cap)
        img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height),
                                    cv.IPL_DEPTH_8U, frame.nChannels)
        if (frame.origin == cv.IPL_ORIGIN_TL):
            cv.cvCopy(frame, img_orig)
        else:
            cv.cvFlip(frame, img_orig, 0)

        # Create a grey frame to clarify data
        img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width, img_orig.height),
                                    8, 1)
        cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY)
        # Detect objects within the frame
        self.faces_storage = cv.cvCreateMemStorage(0)
        faces = self.detect_faces(img_grey)
        self.circles_storage = cv.cvCreateMemStorage(0)
        circles = self.detect_circles(img_grey)
        self.squares_storage = cv.cvCreateMemStorage(0)
        squares = self.detect_squares(img_grey, img_orig)
        self.lines_storage = cv.cvCreateMemStorage(0)
        lines = self.detect_lines(img_grey, img_orig)

        # Draw faces
        if faces:
            for face in faces:
                pt1, pt2 = self.face_points(face)
                cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8,
                               0)

        # Draw lines
        if lines:
            for line in lines:
                cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255, 255, 0),
                          3, 8)
        # Draw circles
        if circles:
            for circle in circles:
                cv.cvCircle(
                    img_orig,
                    cv.cvPoint(cv.cvRound(circle[0]), cv.cvRound(circle[1])),
                    cv.cvRound(circle[2]), cv.CV_RGB(0, 0, 255), 3, 8, 0)

        # Draw squares
        if squares:
            i = 0
            while i < squares.total:
                pt = []
                # read 4 vertices
                pt.append(squares[i])
                pt.append(squares[i + 1])
                pt.append(squares[i + 2])
                pt.append(squares[i + 3])
                ## draw the square as a closed polyline
                cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0, 255, 0), 3,
                              cv.CV_AA, 0)
                i += 4

        # Resize the image to display properly within the window
        #	CV_INTER_NN - nearest-neigbor interpolation,
        #	CV_INTER_LINEAR - bilinear interpolation (used by default)
        #	CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation)
        #	CV_INTER_CUBIC - bicubic interpolation.
        img_display = cv.cvCreateImage(cv.cvSize(self.width(), self.height()),
                                       8, 3)
        cv.cvResize(img_orig, img_display, cv.CV_INTER_NN)
        img_pil = adaptors.Ipl2PIL(img_display)
        s = StringIO()
        img_pil.save(s, "PNG")
        s.seek(0)
        q_img = QImage()
        q_img.loadFromData(s.read())
        bitBlt(self, 0, 0, q_img)
Example #55
0
    def get_eye(self):
        eyes = False

        face = self.cap.get_area(commons.haar_cds['Face'])

        if face:
            cvtile = cv.cvCreateMat(128, 128, cv.CV_8UC3)
            bwtile = cv.cvCreateMat(128, 128, cv.CV_8U)
            areas = [(pt[1].x - pt[0].x) * (pt[1].y - pt[0].y) for pt in face]
            startF = face[areas.index(max(areas))][0]
            endF = face[areas.index(max(areas))][1]
            facerect = self.cap.rect(startF.x, startF.y, endF.x - startF.x,
                                     endF.y - startF.y)

            if not facerect:
                return

            cv.cvResize(facerect, cvtile)

            cv.cvCvtColor(cvtile, bwtile, cv.CV_BGR2GRAY)

            leye, reye, lcp, rcp = self.fel.locateEyes(bwtile)
            leye = pv.Point(leye)
            reye = pv.Point(reye)

            leye_x = int((float(leye.X()) * facerect.width / cvtile.width) +
                         startF.x)
            leye_y = int((float(leye.Y()) * facerect.height / cvtile.height) +
                         startF.y)

            reye_x = int((float(reye.X()) * facerect.width / cvtile.width) +
                         startF.x)
            reye_y = int((float(reye.Y()) * facerect.height / cvtile.height) +
                         startF.y)

            eye_rect = {
                "startX": leye_x - 5,
                "startY": leye_y - 5,
                "endX": leye_x + 5,
                "endY": leye_y + 5
            }

            #self.cap.image(self.cap.rect(leye_x - 5, leye_y - 5, 20, 20))

            if not hasattr(self.cap, "leye"):
                self.cap.add(
                    Point("point",
                          "leye", [int(leye_x), int(leye_y)],
                          parent=self.cap,
                          follow=True))
            else:
                self.cap.add(
                    Point("point",
                          "reye", [int(reye_x), int(reye_y)],
                          parent=self.cap,
                          follow=True))

            # Shows the face rectangle
            #self.cap.add( Graphic("rect", "Face", ( startF.x, startF.y ), (endF.x, endF.y), parent=self.cap) )

        self.foreheadOrig = None

        return False
Example #56
0
    hist = cv.cvCreateHist([hdims], cv.CV_HIST_ARRAY, hranges, 1)
    obj_hist = cv.cvCreateHist([hdims], cv.CV_HIST_ARRAY, hranges, 1)
    while 1:
        # do forever

        # 1. capture the current image
        frame = highgui.cvQueryFrame(capture)
        if frame is None:
            # no image captured... end the processing
            break

        # mirror the captured image
        #cv.cvFlip (frame, None, 1)

        # compute the hsv version of the image
        cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV)

        # compute which pixels are in the wanted range
        cv.cvInRangeS(hsv, hsv_min, hsv_max, mask)

        # extract the hue from the hsv array
        cv.cvSplit(hsv, hue, None, None, None)

        # select the rectangle of interest in the hue/mask arrays
        hue_roi = cv.cvGetSubRect(hue, selection)
        mask_roi = cv.cvGetSubRect(mask, selection)

        # it's time to compute the histogram
        cv.cvCalcHist(hue_roi, hist, 0, mask_roi)

        # extract the min and max value of the histogram