예제 #1
0
    def detect(self, image):
        # image size is needed by underlying opencv lib to allocate memory
        image_size = opencv.cvGetSize(image)

        # the algorithm works with grayscale images
        grayscale = opencv.cvCreateImage(image_size, 8, 1)
        opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)

        # more underlying c lib memory allocation
        storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(storage)

        # equalize histogram
        opencv.cvEqualizeHist(grayscale, grayscale)

        # detect faces using haar cascade, the used file is trained to
        # detect frontal faces
        cascade = opencv.cvLoadHaarClassifierCascade(
            'haarcascade_frontalface_alt.xml', opencv.cvSize(1, 1))
        faces = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                           opencv.CV_HAAR_DO_CANNY_PRUNING,
                                           opencv.cvSize(100, 100))

        # draw rectangles around faces
        for face in faces:
            opencv.cvRectangle(
                image, opencv.cvPoint(int(face.x), int(face.y)),
                opencv.cvPoint(int(face.x + face.width),
                               int(face.y + face.height)),
                opencv.CV_RGB(127, 255, 0), 2)

        # return faces casted to list here, otherwise some obscure bug
        # in opencv will make it segfault if the casting happens later
        return image, list(faces)
예제 #2
0
파일: __init__.py 프로젝트: aoloe/shoebot
def findcontours(iplimage, threshold=100):
    srcimage = opencv.cvCloneImage(iplimage)    
    # create the storage area and bw image
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    #threshold
    opencv.cvThreshold(grayscale, grayscale, threshold, 255, opencv.CV_THRESH_BINARY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)   
    # find the contours
    nb_contours, contours = opencv.cvFindContours (grayscale, storage)
    # comment this out if you do not want approximation
    contours = opencv.cvApproxPoly (contours, opencv.sizeof_CvContour, storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    # next line is for ctypes-opencv
    #contours = opencv.cvApproxPoly (contours, opencv.sizeof(opencv.CvContour), storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    conts = []
    for cont in contours.hrange():
        points=[]
        for pt in cont:
            points.append((pt.x,pt.y))                
        conts.append(points)
    opencv.cvReleaseMemStorage(storage)    
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    return (nb_contours, conts)
예제 #3
0
    def determine_next_position(self, image):

        self.image = image
        self.im = array2image(image)
        self.ipl_im = opencv.adaptors.PIL2Ipl(self.im)
        self.storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(self.storage)
        self.cascade = opencv(
            '/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml',
            opencv.cvSize(1, 1))
        self.faces = opencv.cvHaarDetectObjects(
            self.ipl_im, self.cascade, self.storage, 1.2, 2,
            opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50, 50))

        if self.faces.total < 1:
            return None

        for f in self.faces:
            print "face detected: %s" % f
            #row and column are inverted in Opencv
            self.pos = (f.y, f.x)
            self.coormin_bbox = (self.pos[0], self.pos[1])
            self.coormax_bbox = (self.pos[0] + f.width, self.pos[1] + f.height)
            self.brightpixel = False
            return (self.pos, self.coormin_bbox, self.coormax_bbox,
                    self.brightpixel)
예제 #4
0
	def get_frame(self, face_rec = False):
		
		image = highgui.cvQueryFrame(self.device)
		face_matches = False
		
		if face_rec:
			
			grayscale = cv.cvCreateImage(cv.cvSize(640, 480), 8, 1)
			cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)
			storage = cv.cvCreateMemStorage(0)
			cv.cvClearMemStorage(storage)
			cv.cvEqualizeHist(grayscale, grayscale)
			
			for cascade in self.haarfiles:
				matches = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, 
										  cv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(100,100))
			  
				if matches:
					face_matches = True
					for i in matches:
						cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
							cv.cvPoint(int(i.x+i.width), int(i.y+i.height)),
							cv.CV_RGB(0,0,255), 2, 5, 0)
			
			image = cv.cvGetMat(image)
			
		return (image, face_matches)
예제 #5
0
def findcontours(iplimage, threshold=100):
    srcimage = opencv.cvCloneImage(iplimage)
    # create the storage area and bw image
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    # threshold
    opencv.cvThreshold(grayscale, grayscale, threshold, 255, opencv.CV_THRESH_BINARY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
    # find the contours
    nb_contours, contours = opencv.cvFindContours(grayscale, storage)
    # comment this out if you do not want approximation
    contours = opencv.cvApproxPoly(contours, opencv.sizeof_CvContour, storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    # next line is for ctypes-opencv
    # contours = opencv.cvApproxPoly (contours, opencv.sizeof(opencv.CvContour), storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    conts = []
    for cont in contours.hrange():
        points = []
        for pt in cont:
            points.append((pt.x, pt.y))
        conts.append(points)
    opencv.cvReleaseMemStorage(storage)
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    return (nb_contours, conts)
예제 #6
0
파일: fr.py 프로젝트: alien9/cam
def detect(image):
    # Find out how large the file is, as the underlying C-based code
    # needs to allocate memory in the following steps
    image_size = opencv.cvGetSize(image)

    # create grayscale version - this is also the point where the allegation about
    # facial recognition being racist might be most true. A caucasian face would have more
    # definition on a webcam image than an African face when greyscaled.
    # I would suggest that adding in a routine to overlay edge-detection enhancements may
    # help, but you would also need to do this to the training images as well.
    grayscale = opencv.cvCreateImage(image_size, 8, 1)
    opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)

    # create storage (It is C-based so you need to do this sort of thing)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)

    # equalize histogram
    opencv.cvEqualizeHist(grayscale, grayscale)

    # detect objects - Haar cascade step
    # In this case, the code uses a frontal_face cascade - trained to spot faces that look directly
    # at the camera. In reality, I found that no bearded or hairy person must have been in the training
    # set of images, as the detection routine turned out to be beardist as well as a little racist!
    cascade = opencv.cvLoadHaarClassifierCascade('haarcascade_frontalface_alt.xml', opencv.cvSize(1,1))

    faces = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50, 50))

    if faces:
        for face in faces:
            # Hmm should I do a min-size check?
            # Draw a Chartreuse rectangle around the face - Chartruese rocks 
            opencv.cvRectangle(image, opencv.cvPoint( int(face.x), int(face.y)),
                         opencv.cvPoint(int(face.x + face.width), int(face.y + face.height)),
                         opencv.CV_RGB(127, 255, 0), 2) # RGB #7FFF00 width=2
예제 #7
0
    def detect(self, image):
        # image size is needed by underlying opencv lib to allocate memory
        image_size = opencv.cvGetSize(image)

        # the algorithm works with grayscale images
        grayscale = opencv.cvCreateImage(image_size, 8, 1)
        opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)

        # more underlying c lib memory allocation
        storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(storage)

        # equalize histogram
        opencv.cvEqualizeHist(grayscale, grayscale)

        # detect faces using haar cascade, the used file is trained to
        # detect frontal faces
        cascade = opencv.cvLoadHaarClassifierCascade(
            'haarcascade_frontalface_alt.xml', opencv.cvSize(1, 1))
        faces = opencv.cvHaarDetectObjects(
            grayscale, cascade, storage, 1.2, 2,
            opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(100, 100))

        # draw rectangles around faces
        for face in faces:
            opencv.cvRectangle(
                image, opencv.cvPoint(
                    int(face.x), int(face.y)),
                    opencv.cvPoint(int(face.x + face.width),
                    int(face.y + face.height)), opencv.CV_RGB(127, 255, 0), 2)

        # return faces casted to list here, otherwise some obscure bug
        # in opencv will make it segfault if the casting happens later
        return image, list(faces)
예제 #8
0
파일: __init__.py 프로젝트: aoloe/shoebot
 def detectObject(self, classifier):
     self.grayscale = opencv.cvCreateImage(opencv.cvGetSize(self.iplimage), 8, 1)
     opencv.cvCvtColor(self.iplimage, self.grayscale, opencv.CV_BGR2GRAY)
     self.storage = opencv.cvCreateMemStorage(0)
     opencv.cvClearMemStorage(self.storage)
     opencv.cvEqualizeHist(self.grayscale, self.grayscale)
     
     try:
         self.cascade = opencv.cvLoadHaarClassifierCascade(os.path.join(os.path.dirname(__file__), classifier+".xml"),opencv.cvSize(1,1))
     except:
         raise AttributeError, "could not load classifier file"            
     
     self.objects = opencv.cvHaarDetectObjects(self.grayscale, self.cascade, self.storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50,50))
     
     return self.objects        
예제 #9
0
    def detectObject(self, classifier):
        self.grayscale = opencv.cvCreateImage(opencv.cvGetSize(self.iplimage), 8, 1)
        opencv.cvCvtColor(self.iplimage, self.grayscale, opencv.CV_BGR2GRAY)
        self.storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(self.storage)
        opencv.cvEqualizeHist(self.grayscale, self.grayscale)

        try:
            self.cascade = opencv.cvLoadHaarClassifierCascade(os.path.join(os.path.dirname(__file__), classifier+".xml"),opencv.cvSize(1, 1))
        except:
            raise AttributeError("could not load classifier file")

        self.objects = opencv.cvHaarDetectObjects(self.grayscale, self.cascade, self.storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50, 50))

        return self.objects
예제 #10
0
    def detectFaces(self):
        self._faces = []
        frame = self._camera.getFrameAsIpl()
        storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(storage)
        cascade = opencv.cvLoadHaarClassifierCascade(self._trainedHaar,
                                                     opencv.cvSize(1, 1))

        mugsht = opencv.cvHaarDetectObjects(frame, cascade, storage, 1.2, 2,
                                            opencv.CV_HAAR_DO_CANNY_PRUNING,
                                            opencv.cvSize(75, 75))
        if mugsht:
            for mug in mugsht:
                face = [0, 0, 0, 0]
                face[0], face[1], face[2], face[
                    3] = mug.x, mug.y, mug.width, mug.height
                self._faces.append(face)
예제 #11
0
def detectHaar(iplimage, classifier):
    srcimage = opencv.cvCloneImage(iplimage)
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
    opencv.cvEqualizeHist(grayscale, grayscale)
    try:
        cascade = opencv.cvLoadHaarClassifierCascade(os.path.join(os.path.dirname(__file__), classifier + ".xml"), opencv.cvSize(1, 1))
    except:
        raise AttributeError("could not load classifier file")
    objs = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50, 50))
    objects = []
    for obj in objs:
        objects.append(Haarobj(obj))
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    opencv.cvReleaseMemStorage(storage)
    return objects
예제 #12
0
파일: dev_foucam.py 프로젝트: FOULAB/FOUCAM
    def detectFaces( self ):
        self._faces = []
        frame = self._camera.getFrameAsIpl()
        storage = opencv.cvCreateMemStorage( 0 )
        opencv.cvClearMemStorage( storage )
        cascade = opencv.cvLoadHaarClassifierCascade( self._trainedHaar, opencv.cvSize( 1, 1 ) )

        mugsht = opencv.cvHaarDetectObjects( frame,
                                             cascade,
                                             storage,
                                             1.2,
                                             2,
                                             opencv.CV_HAAR_DO_CANNY_PRUNING,
                                             opencv.cvSize( 75, 75 ) )
        if mugsht:
            for mug in mugsht:
                face = [ 0, 0, 0, 0 ]
                face[0], face[1], face[2], face[3] = mug.x, mug.y, mug.width, mug.height 
                self._faces.append( face )
예제 #13
0
파일: __init__.py 프로젝트: aoloe/shoebot
def detectHaar(iplimage, classifier):
    srcimage = opencv.cvCloneImage(iplimage)
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
    opencv.cvEqualizeHist(grayscale, grayscale)    
    try:
        cascade = opencv.cvLoadHaarClassifierCascade(os.path.join(os.path.dirname(__file__), classifier+".xml"),opencv.cvSize(1,1))
    except:
        raise AttributeError, "could not load classifier file"                
    objs = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50,50))    
    objects = []    
    for obj in objs:
        objects.append(Haarobj(obj))        
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    opencv.cvReleaseMemStorage(storage)    
    return objects 
예제 #14
0
    def determine_next_position(self,image):

        self.image=image
        self.im=array2image(image)
        self.ipl_im = opencv.adaptors.PIL2Ipl(self.im)
        self.storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(self.storage)
        self.cascade = opencv('/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml',opencv.cvSize(1,1))
        self.faces = opencv.cvHaarDetectObjects(self.ipl_im, self.cascade, self.storage, 1.2, 2,opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50,50))

        if self.faces.total < 1:
            return None

        for f in self.faces:
            print "face detected: %s" %f
            #row and column are inverted in Opencv
            self.pos=(f.y,f.x)
            self.coormin_bbox=(self.pos[0],self.pos[1])
            self.coormax_bbox=(self.pos[0]+f.width,self.pos[1]+f.height)
            self.brightpixel=False
            return (self.pos,self.coormin_bbox,self.coormax_bbox,self.brightpixel)
예제 #15
0
  def detect(self, pil_image, cascade_name, recogn_w = 50, recogn_h = 50):
    # Get cascade:
    cascade = self.get_cascade(cascade_name)

    image = opencv.PIL2Ipl(pil_image) 
    image_size = opencv.cvGetSize(image)
    grayscale = image
    if pil_image.mode == "RGB": 
      # create grayscale version
      grayscale = opencv.cvCreateImage(image_size, 8, 1)
      # Change to RGB2Gray - I dont think itll affect the conversion
      opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)
 
    # create storage
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
 
    # equalize histogram
    opencv.cvEqualizeHist(grayscale, grayscale)
 
    # detect objects
    return opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(recogn_w, recogn_h))
예제 #16
0
    def detect(self, pil_image, cascade_name, recogn_w=50, recogn_h=50):
        # Get cascade:
        cascade = self.get_cascade(cascade_name)

        image = opencv.PIL2Ipl(pil_image)
        image_size = opencv.cvGetSize(image)
        grayscale = image
        if pil_image.mode == "RGB":
            # create grayscale version
            grayscale = opencv.cvCreateImage(image_size, 8, 1)
            # Change to RGB2Gray - I dont think itll affect the conversion
            opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)

        # create storage
        storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(storage)

        # equalize histogram
        opencv.cvEqualizeHist(grayscale, grayscale)

        # detect objects
        return opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                          opencv.CV_HAAR_DO_CANNY_PRUNING,
                                          opencv.cvSize(recogn_w, recogn_h))
예제 #17
0
 def __init__(self,haarcascade="haarcascade_frontalface_alt.xml"):
   self.cascade = opencv.cvLoadHaarClassifierCascade(haarcascade,opencv.CvSize())
   self.storage = opencv.cvCreateMemStorage(0)
   opencv.cvClearMemStorage(self.storage)
예제 #18
0
import ImageChops
import ImageOps
import ImageDraw

import freenect
import numpy


cascadeFile = "/usr/share/doc/opencv-doc/examples/haarcascades/haarcascades/haarcascade_frontalface_default.xml.gz"
cascadeFile = "/usr/share/doc/opencv-doc/examples/haarcascades/haarcascades/haarcascade_frontalface_alt.xml.gz"
cascadeFile = "/usr/share/doc/opencv-doc/examples/haarcascades/haarcascades/haarcascade_frontalface_alt2.xml.gz"
cascadeFile = "/usr/share/doc/opencv-doc/examples/haarcascades/haarcascades/haarcascade_eye_tree_eyeglasses.xml.gz"
#cascade = opencv.cv.cvLoad(cascadeFile)
cascade = opencv.cv.cvLoadHaarClassifierCascade(cascadeFile, opencv.cv.cvSize(1,1))
camera = highgui.cvCreateCameraCapture(0)
storage = opencv.cvCreateMemStorage(0)

fps = 10.0
size = (640,480)
pygame.init()
window = pygame.display.set_mode(size)
pygame.display.set_caption("WebCam Demo")
screen = pygame.display.get_surface()
black = Image.new('RGB', size)
red = Image.new('RGB', size, (255,0,0))

def movementDetector(prev, current) :
	# meassure the difference
	step = ImageChops.difference(prev, current)
	# join channels
	step = step.convert("L")
def houghTransform(image, bcFilterConstants, parenttags=None):
    """ Runs the hough circle detection against the image
    
    Keyword Arguments:
    image -- Image instance
    bcFilterConstants -- The configuration under which to run bcFilter detection
    parenttags -- tag string of the calling function
    
    Returns:
    a list of CirclesFilter_.Circle objects
    """

    if bcFilterConstants is None:
        constants = BCFilterConstants
    else:
        constants = bcFilterConstants
        
    cvImage = opencv.PIL2Ipl(image)
    
    # smoothen the Image
    # opencv.cvSmooth( cvImage, cvImage, opencv.CV_GAUSSIAN, BCFilterConstants.masksize, BCFilterConstants.masksize);
    
    storage = opencv.cvCreateMemStorage(0)

    # print the settings that were used to detect circles
    log.info('BCFilterConstants dp:{0}, '\
                                'minimum distance:{1}, '\
                                'high threshold:{2}, '\
                                'accumulator threshold:{3}, '\
                                'minimum radius:{4}, '\
                                'maximum radius:{5}'.format(constants.dp,
                                                      constants.minimumDistance,
                                                      constants.highThreshold,
                                                      constants.accumulatorThreshold,
                                                      constants.minimumRadius,
                                                      constants.maximumRadius)
                                , extra=parenttags)
    
    circles = opencv.cvHoughCircles(cvImage, 
                                    storage,
                                    opencv.CV_HOUGH_GRADIENT,
                                    constants.dp, 
                                    constants.minimumDistance,
                                    constants.highThreshold, 
                                    constants.accumulatorThreshold,
                                    constants.minimumRadius, 
                                    constants.maximumRadius)

    # unpack the circle into a generic tuple
    # !!something wrong with circle.__getitem__ (don't use "tuple(circle)")
    if constants.maximumRadius != 0:
        # neither minimumRadius nor maximumRadius seem to be an absolue
        circles = [(float(circle[0]), float(circle[1]), float(circle[2]))
                        for circle in circles
                        if constants.minimumRadius <= circle[2] <= constants.maximumRadius]
    else:
        circles = [(float(circle[0]), float(circle[1]), float(circle[2]))
                        for circle in circles]

    log.debug('Found circles: %s', circles, extra=parenttags)

    return circles