예제 #1
0
파일: face.py 프로젝트: smokey42/PhotoBlog
def face_detect(file, closeafter=True):
    """Converts an image to grayscale and prints the locations of any faces found"""

    if hasattr(file, 'read'):
        _, filename = tempfile.mkstemp()
        tmphandle = open(filename, 'w')
        shutil.copyfileobj(file, tmphandle)
        tmphandle.close()
        if closeafter:
            file.close()
        deleteafter = True
    else:
        filename = file
        deleteafter = False

    image = cvLoadImage(filename)
    grayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)
    cvCvtColor(image, grayscale, CV_BGR2GRAY)

    storage = cvCreateMemStorage(0)
    cvClearMemStorage(storage)
    cvEqualizeHist(grayscale, grayscale)

    cascade = cvLoadHaarClassifierCascade(
        '/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml',
        cvSize(1,1))

    faces = cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                CV_HAAR_DO_CANNY_PRUNING, cvSize(50,50))
    if deleteafter:
        os.unlink(filename)

    return (image.width, image.height), faces
def detectObject(image):
  grayscale = cv.cvCreateImage(size, 8, 1)
  cv.cvFlip(image, None, 1)
  cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)
  storage = cv.cvCreateMemStorage(0)
  cv.cvClearMemStorage(storage)
  cv.cvEqualizeHist(grayscale, grayscale)
  cascade = cv.cvLoadHaarClassifierCascade(haar_file, cv.cvSize(1,1))
  objects = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, 
                                   cv.CV_HAAR_DO_CANNY_PRUNING,
                                   cv.cvSize(100,100))

  # Draw dots where hands are
  if objects:
    for i in objects:
      #cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
      #               cv.cvPoint(int(i.x+i.width), int(i.y+i.height)),
      #               cv.CV_RGB(0,255,0), 3, 8, 0)
      center = cv.cvPoint(int(i.x+i.width/2), int(i.y+i.height/2))
      cv.cvCircle(image, center, 10, cv.CV_RGB(0,0,0), 5,8, 0)
      # Left side check
      if center.x > box_forward_left[0].x and center.x < box_backwards_left[1].x and center.y > box_forward_left[0].y and center.y < box_backwards_left[1].y:
        set_speed('left', center)
      # Right side check
      if center.x > box_forward_right[0].x and center.x < box_backwards_right[1].x and center.y > box_forward_right[0].y and center.y < box_backwards_right[1].y:
        set_speed('right', center)
def detectObject(image):
    grayscale = cv.cvCreateImage(size, 8, 1)
    cv.cvFlip(image, None, 1)
    cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)
    storage = cv.cvCreateMemStorage(0)
    cv.cvClearMemStorage(storage)
    cv.cvEqualizeHist(grayscale, grayscale)
    cascade = cv.cvLoadHaarClassifierCascade(haar_file, cv.cvSize(1, 1))
    objects = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                     cv.CV_HAAR_DO_CANNY_PRUNING,
                                     cv.cvSize(100, 100))

    # Draw dots where hands are
    if objects:
        for i in objects:
            #cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
            #               cv.cvPoint(int(i.x+i.width), int(i.y+i.height)),
            #               cv.CV_RGB(0,255,0), 3, 8, 0)
            center = cv.cvPoint(int(i.x + i.width / 2),
                                int(i.y + i.height / 2))
            cv.cvCircle(image, center, 10, cv.CV_RGB(0, 0, 0), 5, 8, 0)
            # Left side check
            if center.x > box_forward_left[
                    0].x and center.x < box_backwards_left[
                        1].x and center.y > box_forward_left[
                            0].y and center.y < box_backwards_left[1].y:
                set_speed('left', center)
            # Right side check
            if center.x > box_forward_right[
                    0].x and center.x < box_backwards_right[
                        1].x and center.y > box_forward_right[
                            0].y and center.y < box_backwards_right[1].y:
                set_speed('right', center)
예제 #4
0
    def detect_faces(self, img_grey):
        """ Detect faces within an image, then draw around them.
			The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned 
			for accurate yet slow object detection. For a faster operation on real video 
			images the settings are: 
			scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, 
			min_size=<minimum possible face size
		"""
        min_size = cv.cvSize(20, 20)
        self.image_scale = 1.3
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0

        # Create a small image for better performance
        small_size = cv.cvSize(cv.cvRound(img_grey.width / self.image_scale),
                               cv.cvRound(img_grey.height / self.image_scale))
        small_img = cv.cvCreateImage(small_size, 8, 1)
        cv.cvResize(img_grey, small_img, cv.CV_INTER_LINEAR)
        cv.cvEqualizeHist(small_img, small_img)
        cv.cvClearMemStorage(self.faces_storage)

        if (self.cascade):
            t = cv.cvGetTickCount()
            faces = cv.cvHaarDetectObjects(small_img, self.cascade,
                                           self.faces_storage, haar_scale,
                                           min_neighbors, haar_flags, min_size)
            t = cv.cvGetTickCount() - t
            cv.cvReleaseImage(small_img)
            #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.));
            return faces
예제 #5
0
파일: face.py 프로젝트: 1stvamp/stalkertux
def detect(image, cascade_file='haarcascade_data/haarcascade_frontalface_alt.xml'):
    image_size = cv.cvGetSize(image)

    # create grayscale version
    grayscale = cv.cvCreateImage(image_size, 8, 1)
    cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)

    # create storage
    storage = cv.cvCreateMemStorage(0)
    cv.cvClearMemStorage(storage)

    # equalize histogram
    cv.cvEqualizeHist(grayscale, grayscale)

    # detect objects
    cascade = cv.cvLoadHaarClassifierCascade(cascade_file, cv.cvSize(1,1))
    faces = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(50, 50))

    positions = []
    if faces:
        for i in faces:
            positions.append({'x': i.x, 'y': i.y, 'width': i.width, 'height':
                i.height,})
            cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
                         cv.cvPoint(int(i.x + i.width), int(i.y + i.height)),
                         cv.CV_RGB(0, 255, 0), 3, 8, 0)
    return positions
예제 #6
0
	def detect_faces(self, img_grey):
		""" Detect faces within an image, then draw around them.
			The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned 
			for accurate yet slow object detection. For a faster operation on real video 
			images the settings are: 
			scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, 
			min_size=<minimum possible face size
		"""
		min_size								= cv.cvSize(20,20)
		self.image_scale						= 1.3
		haar_scale								= 1.2
		min_neighbors							= 2
		haar_flags								= 0

		# Create a small image for better performance
		small_size								= cv.cvSize(cv.cvRound(img_grey.width/self.image_scale),cv.cvRound(img_grey.height/self.image_scale))
		small_img								= cv.cvCreateImage(small_size, 8, 1)
		cv.cvResize(img_grey, small_img, cv.CV_INTER_LINEAR)
		cv.cvEqualizeHist(small_img, small_img)
		cv.cvClearMemStorage(self.faces_storage)

		if(self.cascade):
			t									= cv.cvGetTickCount();
			faces								= cv.cvHaarDetectObjects(small_img,
																		self.cascade,
																		self.faces_storage,
																		haar_scale,
																		min_neighbors,
																		haar_flags,
																		min_size)
			t									= cv.cvGetTickCount() - t
			cv.cvReleaseImage(small_img)
			#print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.));
			return faces
예제 #7
0
파일: facedetect.py 프로젝트: agrimk/faces
def _detect(image):
    """ Detects faces on `image`
    Parameters:
        @image: image file path

    Returns:
        [((x1, y1), (x2, y2)), ...] List of coordenates for top-left
                                    and bottom-right corner
    """
    # the OpenCV API says this function is obsolete, but we can't
    # cast the output of cvLoad to a HaarClassifierCascade, so use
    # this anyways the size parameter is ignored
    capture = cvCreateFileCapture(image) 

    if not capture:
        return []

    frame = cvQueryFrame(capture)
    if not frame:
        return []

    img = cvCreateImage(cvSize(frame.width, frame.height),
                        IPL_DEPTH_8U, frame.nChannels)
    cvCopy(frame, img)

    # allocate temporary images
    gray          = cvCreateImage((img.width, img.height),
                                  COPY_DEPTH, COPY_CHANNELS)
    width, height = (cvRound(img.width / IMAGE_SCALE),
                     cvRound(img.height / IMAGE_SCALE))
    small_img     = cvCreateImage((width, height), COPY_DEPTH, COPY_CHANNELS)

    # convert color input image to grayscale
    cvCvtColor(img, gray, CV_BGR2GRAY)

    # scale input image for faster processing
    cvResize(gray, small_img, CV_INTER_LINEAR)
    cvEqualizeHist(small_img, small_img)
    cvClearMemStorage(STORAGE)

    coords = []
    for haar_file in CASCADES:
        cascade = cvLoadHaarClassifierCascade(haar_file, cvSize(1, 1))
        if cascade:
            faces = cvHaarDetectObjects(small_img, cascade, STORAGE, HAAR_SCALE,
                                        MIN_NEIGHBORS, HAAR_FLAGS, MIN_SIZE) or []
            for face_rect in faces:
                # the input to cvHaarDetectObjects was resized, so scale the 
                # bounding box of each face and convert it to two CvPoints
                x, y = face_rect.x, face_rect.y
                pt1 = (int(x * IMAGE_SCALE), int(y * IMAGE_SCALE))
                pt2 = (int((x + face_rect.width) * IMAGE_SCALE),
                       int((y + face_rect.height) * IMAGE_SCALE))
                coords.append((pt1, pt2))
    return coords
예제 #8
0
def _detect(image):
    """ Detects faces on `image`
    Parameters:
        @image: image file path

    Returns:
        [((x1, y1), (x2, y2)), ...] List of coordenates for top-left
                                    and bottom-right corner
    """
    # the OpenCV API says this function is obsolete, but we can't
    # cast the output of cvLoad to a HaarClassifierCascade, so use
    # this anyways the size parameter is ignored
    capture = cvCreateFileCapture(image)

    if not capture:
        return []

    frame = cvQueryFrame(capture)
    if not frame:
        return []

    img = cvCreateImage(cvSize(frame.width, frame.height), IPL_DEPTH_8U,
                        frame.nChannels)
    cvCopy(frame, img)

    # allocate temporary images
    gray = cvCreateImage((img.width, img.height), COPY_DEPTH, COPY_CHANNELS)
    width, height = (cvRound(img.width / IMAGE_SCALE),
                     cvRound(img.height / IMAGE_SCALE))
    small_img = cvCreateImage((width, height), COPY_DEPTH, COPY_CHANNELS)

    # convert color input image to grayscale
    cvCvtColor(img, gray, CV_BGR2GRAY)

    # scale input image for faster processing
    cvResize(gray, small_img, CV_INTER_LINEAR)
    cvEqualizeHist(small_img, small_img)
    cvClearMemStorage(STORAGE)

    coords = []
    for haar_file in CASCADES:
        cascade = cvLoadHaarClassifierCascade(haar_file, cvSize(1, 1))
        if cascade:
            faces = cvHaarDetectObjects(small_img, cascade, STORAGE,
                                        HAAR_SCALE, MIN_NEIGHBORS, HAAR_FLAGS,
                                        MIN_SIZE) or []
            for face_rect in faces:
                # the input to cvHaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                x, y = face_rect.x, face_rect.y
                pt1 = (int(x * IMAGE_SCALE), int(y * IMAGE_SCALE))
                pt2 = (int((x + face_rect.width) * IMAGE_SCALE),
                       int((y + face_rect.height) * IMAGE_SCALE))
                coords.append((pt1, pt2))
    return coords
예제 #9
0
파일: CVFaces.py 프로젝트: lamielle/aether
	def read(self):
		'''Run OpenCV's Haar detection on the current frame, returning a list of the bounding boxes of the faces'''

		#Read the frame to run face detection on
		frame=self.input.read()

		#Detect the faces
		#cv_faces is cv.CvSeq of cv.CvRect objects that have x,y,width,height members
		cv_faces=cv.cvHaarDetectObjects(frame,self.cascade,self.storage,1.1,2,cv.CV_HAAR_DO_CANNY_PRUNING,cv.cvSize(10,10))

		return [((face.x,face.y),((face.x+face.width),face.y),((face.x+face.width),(face.y+face.height)),(face.x,(face.y+face.width))) for face in cv_faces]
예제 #10
0
def detect_faces_on(path):
    faces = []
    image = cvLoadImage(path)
    # convert to grayscale for faster results
    grayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)
    cvCvtColor(image, grayscale, CV_BGR2GRAY)
    # smooth picture for better results
    cvSmooth(grayscale, grayscale, CV_GAUSSIAN, 3, 3)

    storage = cvCreateMemStorage(0)
    cvClearMemStorage(storage)
    cvEqualizeHist(grayscale, grayscale)

    cascade_files = [
        # ('/usr/share/opencv/haarcascades/haarcascade_eye_tree_eyeglasses.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_lowerbody.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_mouth.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_profileface.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_eye.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_eyepair_big.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_nose.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_righteye_2splits.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt2.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_fullbody.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_eyepair_small.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_righteye.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_upperbody.xml', (50, 50)),
        ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt_tree.xml',
         (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_lefteye_2splits.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_lefteye.xml', (50, 50)),
        # ('/usr/share/opencv/haarcascades/haarcascade_mcs_upperbody.xml', (50, 50)),
        # ('parojos_22_5.1.xml', (22, 5)),
        # ('Mouth.xml', (22, 15)),
    ]

    for cascade_file, cascade_sizes in cascade_files:
        cascade = cvLoadHaarClassifierCascade(os.path.join(cascade_file),
                                              cvSize(1, 1))
        faces += cvHaarDetectObjects(grayscale, cascade, storage, HAAR_SCALE,
                                     HAAR_NEIGHBORS, CV_HAAR_DO_CANNY_PRUNING,
                                     cvSize(*cascade_sizes))

    return [{'x': f.x, 'y': f.y, 'w': f.width, 'h': f.height} for f in faces]
예제 #11
0
def detect_faces(image):
    """Converts an image to grayscale and prints the locations of any
         faces found"""
    grayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)
    cvCvtColor(image, grayscale, CV_BGR2GRAY)

    storage = cvCreateMemStorage(0)
    cvClearMemStorage(storage)
    cvEqualizeHist(grayscale, grayscale)

    # The default parameters (scale_factor=1.1, min_neighbors=3,
    # flags=0) are tuned for accurate yet slow face detection. For
    # faster face detection on real video images the better settings are
    # (scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING).
    # --- http://www710.univ-lyon1.fr/~bouakaz/OpenCV-0.9.5/docs/ref/OpenCVRef_Experimental.htm#decl_cvHaarDetectObjects
    # The size box is of the *minimum* detectable object size. Smaller box = more processing time. - http://cell.fixstars.com/opencv/index.php/Facedetect
    minsize = (int(MINFACEWIDTH_PERCENT * image.width + 0.5),
               int(MINFACEHEIGHT_PERCENT * image.height))
    print >> sys.stderr, "Min size of face: %s" % ` minsize `

    faces = []
    for cascadefile in [
            '/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml'
    ]:
        #    for cascadefile in ['/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml', '/usr/share/opencv/haarcascades/haarcascade_profileface.xml']:
        cascade = cvLoadHaarClassifierCascade(cascadefile, cvSize(1, 1))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(50,50))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, 0, cvSize(MINFACEWIDTH,MINFACEHEIGHT))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, 0, cvSize(MINFACEWIDTH,MINFACEHEIGHT))
        #        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize(*minsize))
        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1,
                                     4, CV_HAAR_DO_CANNY_PRUNING,
                                     cvSize(*minsize))


#        faces += cvHaarDetectObjects(grayscale, cascade, storage, scale_factor=1.1, min_neighbors=3, flags=0, cvSize(50,50))

#    print dir(faces)
    bboxes = []
    if faces:
        for f in faces:
            print >> sys.stderr, "\tFace at [(%d,%d) -> (%d,%d)]" % (
                f.x, f.y, f.x + f.width, f.y + f.height)
        bboxes = [Face(f.x, f.y, f.x + f.width, f.y + f.height) for f in faces]
    return bboxes
예제 #12
0
def detectObjects(image):
    """Converts an image to grayscale and prints the locations of any
    faces found"""
    size = cv.cvSize(image.width, image.height)
    grayscale =  cv.cvCreateImage(size, 8, 1)
    cv.cvCvtColor(image, grayscale,  cv.CV_BGR2GRAY)
    


    storage =  cv.cvCreateMemStorage(0)         
    cv.cvClearMemStorage(storage)              
    cv.cvEqualizeHist(grayscale, grayscale)    
    cascade = cv.cvLoadHaarClassifierCascade(PATH, cv.cvSize(1,1))     
    faces = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(30,30))

    if faces:
        for f in faces:
            print("[(%d,%d) -> (%d,%d)]" % (f.x, f.y, f.x+f.width, f.y+f.height))
예제 #13
0
def detect(image):
    image_size = opencv.cvGetSize(image)
 
    # create grayscale version
    grayscale = opencv.cvCreateImage(image_size, 8, 1)
    opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)
 
    # create storage
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
 
    # equalize histogram
    opencv.cvEqualizeHist(grayscale, grayscale)
 
    # detect objects
    faces = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(100, 100))
#    eyes = opencv.cvHaarDetectObjects(grayscale, eye_cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(60,60))
    draw_bounding_boxes(faces, image, 127,255,0, 3)
예제 #14
0
def detect_faces_on(path):
    faces = []
    image = cvLoadImage(path)
    # convert to grayscale for faster results
    grayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)
    cvCvtColor(image, grayscale, CV_BGR2GRAY)
    # smooth picture for better results
    cvSmooth(grayscale, grayscale, CV_GAUSSIAN, 3, 3)

    storage = cvCreateMemStorage(0)
    cvClearMemStorage(storage)
    cvEqualizeHist(grayscale, grayscale)

    cascade_files = [
                     # ('/usr/share/opencv/haarcascades/haarcascade_eye_tree_eyeglasses.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_lowerbody.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_mcs_mouth.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_profileface.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_eye.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_mcs_eyepair_big.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_mcs_nose.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_righteye_2splits.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt2.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_fullbody.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_mcs_eyepair_small.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_mcs_righteye.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_upperbody.xml', (50, 50)),
                     ('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt_tree.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_lefteye_2splits.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_mcs_lefteye.xml', (50, 50)),
                     # ('/usr/share/opencv/haarcascades/haarcascade_mcs_upperbody.xml', (50, 50)),
                     # ('parojos_22_5.1.xml', (22, 5)),
                     # ('Mouth.xml', (22, 15)),
                    ]

    for cascade_file, cascade_sizes in cascade_files:
        cascade = cvLoadHaarClassifierCascade(os.path.join(cascade_file), cvSize(1, 1))
        faces += cvHaarDetectObjects(grayscale, cascade, storage, HAAR_SCALE, HAAR_NEIGHBORS, CV_HAAR_DO_CANNY_PRUNING, cvSize(*cascade_sizes))

    return [{'x': f.x, 'y': f.y, 'w': f.width, 'h': f.height} for f in faces]
예제 #15
0
  def detect(self, pil_image, cascade_name, recogn_w = 50, recogn_h = 50):
    # Get cascade:
    cascade = self.get_cascade(cascade_name)

    image = opencv.PIL2Ipl(pil_image) 
    image_size = opencv.cvGetSize(image)
    grayscale = image
    if pil_image.mode == "RGB": 
      # create grayscale version
      grayscale = opencv.cvCreateImage(image_size, 8, 1)
      # Change to RGB2Gray - I dont think itll affect the conversion
      opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)
 
    # create storage
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
 
    # equalize histogram
    opencv.cvEqualizeHist(grayscale, grayscale)
 
    # detect objects
    return opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(recogn_w, recogn_h))
예제 #16
0
	def detect_face(self, img):
		""" Detect faces within an image, then draw around them.
			The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned 
			for accurate yet slow object detection. For a faster operation on real video 
			images the settings are: 
			scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, 
			min_size=<minimum possible face size
		"""
		min_size								= cv.cvSize(20,20)
		image_scale								= 1.3
		haar_scale								= 1.2
		min_neighbors							= 2
		haar_flags								= 0
		gray									= cv.cvCreateImage(cv.cvSize(img.width,img.height), 8, 1)
		small_img								= cv.cvCreateImage(cv.cvSize(cv.cvRound(img.width/image_scale),
												  cv.cvRound(img.height/image_scale)), 8, 1)
		cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY)
		cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR)
		cv.cvEqualizeHist(small_img, small_img)
		cv.cvClearMemStorage(self.storage)

		if(self.cascade):
			t									= cv.cvGetTickCount();
			faces								= cv.cvHaarDetectObjects(small_img,
																		self.cascade,
																		self.storage,
																		haar_scale,
																		min_neighbors,
																		haar_flags,
																		min_size)
			t									= cv.cvGetTickCount() - t
			#print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.));
			if faces:
				for r in faces:
					pt1							= cv.cvPoint(int(r.x*image_scale), int(r.y*image_scale))
					pt2							= cv.cvPoint(int((r.x+r.width)*image_scale),
																int((r.y+r.height)*image_scale))
					cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255,0,0), 3, 8, 0)
		return img
예제 #17
0
파일: face.py 프로젝트: icosolera/smartcut
def detect(image):
	image_size = cv.cvGetSize(image)
 
	# create grayscale version
	grayscale = cv.cvCreateImage(image_size, 8, 1)
	cv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)
 
	# create storage
	storage = cv.cvCreateMemStorage(0)
	cv.cvClearMemStorage(storage)
 
	# equalize histogram
	cv.cvEqualizeHist(grayscale, grayscale)
 
	# detect objects
	cascade = cv.cvLoadHaarClassifierCascade('haarcascade_frontalface_alt.xml', cv.cvSize(1,1))
	faces = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(100, 100))
 
	if faces:
		for i in faces:
			r = image[int(i.y):int(i.y+i.height),int(i.x):int(i.x+i.width)]
			cv.cvSmooth(r,r,cv.CV_BLUR,51,51)
예제 #18
0
    def detect_face(self, img):
        """ Detect faces within an image, then draw around them.
			The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned 
			for accurate yet slow object detection. For a faster operation on real video 
			images the settings are: 
			scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, 
			min_size=<minimum possible face size
		"""
        min_size = cv.cvSize(20, 20)
        image_scale = 1.3
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0
        gray = cv.cvCreateImage(cv.cvSize(img.width, img.height), 8, 1)
        small_img = cv.cvCreateImage(
            cv.cvSize(cv.cvRound(img.width / image_scale),
                      cv.cvRound(img.height / image_scale)), 8, 1)
        cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY)
        cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.cvEqualizeHist(small_img, small_img)
        cv.cvClearMemStorage(self.storage)

        if (self.cascade):
            t = cv.cvGetTickCount()
            faces = cv.cvHaarDetectObjects(small_img, self.cascade,
                                           self.storage, haar_scale,
                                           min_neighbors, haar_flags, min_size)
            t = cv.cvGetTickCount() - t
            #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.));
            if faces:
                for r in faces:
                    pt1 = cv.cvPoint(int(r.x * image_scale),
                                     int(r.y * image_scale))
                    pt2 = cv.cvPoint(int((r.x + r.width) * image_scale),
                                     int((r.y + r.height) * image_scale))
                    cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8,
                                   0)
        return img
예제 #19
0
def detect_faces(image):
    """Converts an image to grayscale and prints the locations of any
         faces found"""
    grayscale = cvCreateImage(cvSize(image.width, image.height), 8, 1)
    cvCvtColor(image, grayscale, CV_BGR2GRAY)

    storage = cvCreateMemStorage(0)
    cvClearMemStorage(storage)
    cvEqualizeHist(grayscale, grayscale)

    # The default parameters (scale_factor=1.1, min_neighbors=3,
    # flags=0) are tuned for accurate yet slow face detection. For
    # faster face detection on real video images the better settings are
    # (scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING).
    # --- http://www710.univ-lyon1.fr/~bouakaz/OpenCV-0.9.5/docs/ref/OpenCVRef_Experimental.htm#decl_cvHaarDetectObjects
    # The size box is of the *minimum* detectable object size. Smaller box = more processing time. - http://cell.fixstars.com/opencv/index.php/Facedetect
    minsize = (int(MINFACEWIDTH_PERCENT*image.width+0.5),int(MINFACEHEIGHT_PERCENT*image.height))
    print >> sys.stderr, "Min size of face: %s" % `minsize`

    faces = []
    for cascadefile in ['/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml']:
#    for cascadefile in ['/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml', '/usr/share/opencv/haarcascades/haarcascade_profileface.xml']:
        cascade = cvLoadHaarClassifierCascade(cascadefile, cvSize(1,1))
#        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(50,50))
#        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, 0, cvSize(MINFACEWIDTH,MINFACEHEIGHT))
#        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, 0, cvSize(MINFACEWIDTH,MINFACEHEIGHT))
#        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize(*minsize))
        faces += cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 4, CV_HAAR_DO_CANNY_PRUNING, cvSize(*minsize))
#        faces += cvHaarDetectObjects(grayscale, cascade, storage, scale_factor=1.1, min_neighbors=3, flags=0, cvSize(50,50))

#    print dir(faces)
    bboxes = []
    if faces:
        for f in faces:
            print >> sys.stderr, "\tFace at [(%d,%d) -> (%d,%d)]" % (f.x, f.y, f.x+f.width, f.y+f.height)
        bboxes = [Face(f.x, f.y, f.x+f.width, f.y+f.height) for f in faces]
    return bboxes
예제 #20
0
    def detect(self, pil_image, cascade_name, recogn_w=50, recogn_h=50):
        # Get cascade:
        cascade = self.get_cascade(cascade_name)

        image = opencv.PIL2Ipl(pil_image)
        image_size = opencv.cvGetSize(image)
        grayscale = image
        if pil_image.mode == "RGB":
            # create grayscale version
            grayscale = opencv.cvCreateImage(image_size, 8, 1)
            # Change to RGB2Gray - I dont think itll affect the conversion
            opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)

        # create storage
        storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(storage)

        # equalize histogram
        opencv.cvEqualizeHist(grayscale, grayscale)

        # detect objects
        return opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                          opencv.CV_HAAR_DO_CANNY_PRUNING,
                                          opencv.cvSize(recogn_w, recogn_h))