Exemple #1
0
	def loadDistMatrixFromFile(self,filename):
		#print mnemosyne_dir+filename
		try:
			#self.VectDist = cv.CreateMat(5,1,cv.CV_32FC1)
			#tmpVectDist = cv.Load(mnemosyne_dir+filename, cv.CreateMemStorage(), name="distortion_coefficients")
			self.VectDist = cv.Load(mnemosyne_dir+filename, cv.CreateMemStorage(), name="distortion_coefficients")
			#cv.Convert(tmpVectDist, self.VectDist)
		except:
			print "Could not load file:",mnemosyne_dir+filename
Exemple #2
0
	def loadCamMatrixFromFile(self,filename):
		#print mnemosyne_dir+filename
		try:
			#self.MatCam = cv.CreateMat(3,3,cv.CV_32FC1)
			#tmpMatCam = cv.Load(mnemosyne_dir+filename, cv.CreateMemStorage(), name="camera_matrix")
			self.MatCam = cv.Load(mnemosyne_dir+filename, cv.CreateMemStorage(), name="camera_matrix")
			#cv.Convert(tmpMatCam, self.MatCam)
		except:
			print "Could not load file:",mnemosyne_dir+filename
Exemple #3
0
def faces_from_pil_image(pil_image):
    "Return a list of (x,y,h,w) tuples for faces detected in the PIL image"
    storage = cv.CreateMemStorage(0)
    facial_features = cv.Load('haarcascade_frontalface_alt.xml', storage=storage)
    cv_im = cv.CreateImageHeader(pil_image.size, cv.IPL_DEPTH_8U, 3)
    cv.SetData(cv_im, pil_image.tostring())
    faces = cv.HaarDetectObjects(cv_im, facial_features, storage)
    # faces includes a `neighbors` field that we aren't going to use here
    return [f[0] for f in faces]
Exemple #4
0
    def red_eye(self):
        self.load_cascade_file()
        faces = [
            face for face in self.context.request.focal_points
            if face.origin == 'Face Detection'
        ]
        if faces:
            engine = self.context.modules.engine
            mode, data = engine.image_data_as_rgb()
            mode = mode.lower()
            sz = engine.size
            image = cv.CreateImageHeader(sz, cv.IPL_DEPTH_8U, 3)
            cv.SetData(image, data)

            for face in faces:
                face_x = int(face.x - face.width / 2)
                face_y = int(face.y - face.height / 2)

                face_roi = (int(face_x), int(face_y), int(face.width),
                            int(face.height))

                cv.SetImageROI(image, face_roi)

                eyes = cv.HaarDetectObjects(image, self.cascade,
                                            cv.CreateMemStorage(0), HAAR_SCALE,
                                            MIN_NEIGHBORS, HAAR_FLAGS,
                                            MIN_SIZE)

                for (x, y, w, h), other in self.filter_eyes(eyes):
                    # Set the image Region of interest to be the eye area [this reduces processing time]
                    cv.SetImageROI(image, (face_x + x, face_y + y, w, h))

                    if self.context.request.debug:
                        cv.Rectangle(image, (0, 0), (w, h),
                                     cv.RGB(255, 255, 255), 2, 8, 0)

                    for pixel in self.get_pixels(image, w, h, mode):
                        green_blue_avg = (pixel['g'] + pixel['b']) / 2

                        if not green_blue_avg:
                            red_intensity = RED_THRESHOLD
                        else:
                            # Calculate the intensity compared to blue and green average
                            red_intensity = pixel['r'] / green_blue_avg

                        # If the red intensity is greater than 2.0, lower the value
                        if red_intensity >= RED_THRESHOLD:
                            new_red_value = (pixel['g'] + pixel['b']) / 2
                            # Insert the new red value for the pixel to the image
                            cv.Set2D(
                                image, pixel['y'], pixel['x'],
                                cv.RGB(new_red_value, pixel['g'], pixel['b']))

                    # Reset the image region of interest back to full image
                    cv.ResetImageROI(image)

            self.context.modules.engine.set_image_data(image.tostring())
Exemple #5
0
    def __init__(self):
        self.active = False
        #self.depth8 = cv.CreateImage((640,480), 8, 1)
        self.depth8 = ProcessContours.DEPTH8

        self.sweep_images = [
            cv.CreateImage((640, 480), 8, 1) for i in range(64)
        ]
        self.storage = cv.CreateMemStorage(0)
Exemple #6
0
    def get_candidates(self, m_d):
        '''
        Get candidates for this corner from new image
        @param m_d: marker_detector
        '''
        # if this corner is wider then MAX_CORNER_ANGLE, we probably won't
        # find it anyway. Instead lets find narrow corners and calculate its
        # position
        if self.angle > MAX_CORNER_ANGLE: return []
        cr = self.get_rectangle(m_d)
        cr = correct_rectangle(cr, m_d.size)
        if cr is None: return []
        m_d.set_ROI(cr)
        tmp_img = m_d.tmp_img
        gray_img = m_d.gray_img
        bw_img = m_d.bw_img
        canny = m_d.canny_img
        cv.Copy(gray_img, tmp_img)
        cv.Threshold(gray_img, bw_img, 125, 255, cv.CV_THRESH_OTSU)
        if self.black_inside > 0:
            cv.Not(bw_img, bw_img)
        cv.Canny(gray_img, canny, 300, 500)
        cv.Or(bw_img, canny, bw_img)
        tmpim = m_d.canny_img
        cv.Copy(bw_img, tmpim)
        cv.Set2D(tmpim, 1, 1, 255)
        conts = cv.FindContours(tmpim, cv.CreateMemStorage(),
                                cv.CV_RETR_EXTERNAL)
        cv.Zero(tmpim)
        m_d.set_ROI()
        cv.SetImageROI(tmpim, cr)
        result = []
        while conts:
            aconts = cv.ApproxPoly(conts, cv.CreateMemStorage(),
                                   cv.CV_POLY_APPROX_DP, 2)
            nconts = list(aconts)
            cv.PolyLine(tmpim, [nconts], True, (255, 255, 255))
            self._append_candidates_from_conts(cr, result, nconts, m_d)
            conts = conts.h_next()


#        print result
#        db.show([tmpim,m_d.draw_img], 'tmpim', 0, 0, 0)
        return result
Exemple #7
0
def calibrate(cam):
    print "\n\nCamera calibration " + str(cam)

    intrinsicPath = "/var/www/MuseumVisitors/calibration/calibIntrCam" + str(
        cam) + "-R1280x800.yml"
    extrinsicPath = "/var/www/MuseumVisitors/calibration/calibExtr_" + str(
        cam) + ".yaml"

    print "Rotation matrix:"
    VectRot = cv2.cv.Load(extrinsicPath,
                          cv.CreateMemStorage(),
                          name="Rotation")
    MatRot = cv.CreateMat(3, 3, cv.CV_64FC1)
    cv.Rodrigues2(VectRot, MatRot)
    MatRot = np.matrix(MatRot)
    print MatRot

    print "Translation vector:"
    VectTrans = cv2.cv.Load(extrinsicPath,
                            cv.CreateMemStorage(),
                            name="Translation")
    VectTrans = np.matrix(VectTrans)
    print VectTrans

    print "Camera matrix:"
    MatCam = cv2.cv.Load(intrinsicPath,
                         cv.CreateMemStorage(),
                         name="camera_matrix")
    MatCam = np.matrix(MatCam)
    print MatCam

    RotTrans = np.concatenate((MatRot[0:3, 0:2], VectTrans), axis=1)
    print(RotTrans)

    Hw = MatCam * RotTrans
    print(Hw)
    HT = (Hw.T).I
    HI2W = Hw.I
    HW2I = (HI2W.I).T
    print "HI2W:"
    print(HI2W)

    print "HW2I:"
    print(HW2I)
def init_images():
    """ Creates all the images we'll need. Is separate from init_globals 
        since we need to know what size the images are before we can make
        them
    """
    # get D so that we can change values in it
    global D

    # Find the size of the image
    # We set D.image right before calling this function
    D.size = cv.GetSize(D.image)

    # Create images for each color channel
    D.red = cv.CreateImage(D.size, 8, 1)
    D.blue = cv.CreateImage(D.size, 8, 1)
    D.green = cv.CreateImage(D.size, 8, 1)
    D.hue = cv.CreateImage(D.size, 8, 1)
    D.sat = cv.CreateImage(D.size, 8, 1)
    D.val = cv.CreateImage(D.size, 8, 1)

    # Create images to save the thresholded images to
    D.red_threshed = cv.CreateImage(D.size, 8, 1)
    D.green_threshed = cv.CreateImage(D.size, 8, 1)
    D.blue_threshed = cv.CreateImage(D.size, 8, 1)
    D.hue_threshed = cv.CreateImage(D.size, 8, 1)
    D.sat_threshed = cv.CreateImage(D.size, 8, 1)
    D.val_threshed = cv.CreateImage(D.size, 8, 1)

    # The final thresholded result
    D.threshed_image = cv.CreateImage(D.size, 8, 1)

    # Create an hsv image and a copy for contour-finding
    D.hsv = cv.CreateImage(D.size, 8, 3)
    D.copy = cv.CreateImage(D.size, 8, 1)
    D.storage = cv.CreateMemStorage(0)  # Create memory storage for contours

    # bunch of keypress values
    # So we know what to show, depending on which key is pressed
    D.key_dictionary = {
        ord('w'): D.threshed_image,
        ord('u'): D.red,
        ord('i'): D.green,
        ord('o'): D.blue,
        ord('j'): D.red_threshed,
        ord('k'): D.green_threshed,
        ord('l'): D.blue_threshed,
        ord('a'): D.hue,
        ord('s'): D.sat,
        ord('d'): D.val,
        ord('z'): D.hue_threshed,
        ord('x'): D.sat_threshed,
        ord('c'): D.val_threshed,
    }

    # set the default image for the second window
    D.current_threshold = D.threshed_image
Exemple #9
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    window = cv.CreateImage((cv.Round(img.width), cv.Round(img.height)), 8, 3)
    if (cascade):
        t = cv.GetTickCount()
        faces = local_haar_detect(small_img, cascade, cv.CreateMemStorage(0),
                                  haar_scale, min_neighbors, haar_flags,
                                  min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        channels = None
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (cv.Round(
                    (x + w * .2) * image_scale), cv.Round(y * image_scale))
                pt2 = (cv.Round(
                    (x + w * .8) * image_scale), cv.Round(
                        (y + h) * image_scale))

                window = cv.CreateImage((cv.Round(w * .6) * image_scale,
                                         cv.Round(h) * image_scale), 8, 3)
                cv.Smooth(window, window, cv.CV_GAUSSIAN)
                channels = [
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1),
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1),
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1)
                ]
                cv.GetRectSubPix(img, window, (cv.Round(
                    (pt1[0] + pt2[0]) / 2.0), cv.Round(
                        (pt1[1] + pt2[1]) / 2.0)))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                cv.Split(window, channels[0], channels[1], channels[2], None)
                result.append([
                    cv.Avg(channels[0])[0],
                    cv.Avg(channels[1])[0],
                    cv.Avg(channels[2])[0]
                ])

    cv.ShowImage("result", img)
Exemple #10
0
 def _get_approx(self, conts):
     '''
     Returns contur aproximation
     @param conts: conturs
     '''
     per = cv.ArcLength(conts)
     conts = cv.ApproxPoly(conts, cv.CreateMemStorage(),
                           cv.CV_POLY_APPROX_DP, per * 0.05)
     #cv.DrawContours(self.img, conts, (0,0,255), (0,255,0), 4)
     return list(conts)
Exemple #11
0
 def _get_face_rectangles(self):
     cvim = self._pil_to_opencv()
     return cv.HaarDetectObjects(
         cvim,
         self._get_face_classifier(),
         cv.CreateMemStorage(0),
         1.3,  # Scale factor
         4,  # Minimum neighbors
         0,  # HAAR Flags
         (20, 20))
Exemple #12
0
    def __init__(
            self, BW
    ):  #Constructor. BW is a binary image in the form of a numpy array
        self.BW = BW
        cs = cv.FindContours(cv.fromarray(self.BW.astype(np.uint8)),
                             cv.CreateMemStorage(),
                             mode=cv.CV_RETR_EXTERNAL)  #Finds the contours
        counter = 0

        centroid = list()
        cHull = list()
        contours = list()
        cHullArea = list()
        contourArea = list()
        while cs:  #Iterate through the CvSeq, cs.
            if abs(cv.ContourArea(cs)) > 2000:
                contourArea.append(cv.ContourArea(cs))
                m = cv.Moments(cs)
                try:
                    m10 = int(cv.GetSpatialMoment(m, 1,
                                                  0))  #Spatial moment m10
                    m00 = int(cv.GetSpatialMoment(m, 0,
                                                  0))  #Spatial moment m00
                    m01 = int(cv.GetSpatialMoment(m, 0,
                                                  1))  #Spatial moment m01
                    centroid.append((int(m10 / m00), int(m01 / m00)))
                    convexHull = cv.ConvexHull2(cs,
                                                cv.CreateMemStorage(),
                                                return_points=True)
                    cHullArea.append(cv.ContourArea(convexHull))
                    cHull.append(list(convexHull))
                    contours.append(list(cs))
                    counter += 1
                except:
                    pass
            cs = cs.h_next()

        self.centroid = centroid
        self.counter = counter
        self.cHull = cHull
        self.contours = contours
        self.cHullArea = cHullArea
        self.contourArea = contourArea
Exemple #13
0
def extracteur_de_sourires(nom, src):
	img = cv.GetSubRect(src, (src.width*1/7, src.height*2/3, src.width*5/7, src.height/3)) 
	cpt = 0
	for s in all_s :
		res = cv.HaarDetectObjects(img, s, cv.CreateMemStorage())
		if len(res) == 1 :
			cpt = cpt + 1     
		if cpt == len(all_s) :
			print "\tsourire vu dans "+nom
			cv.SaveImage(result+str(cpt)+"s_"+nom, img)
Exemple #14
0
def find_convex_hull(cvseq):
    """ @param cvseq cvseq: an input cvseq from cv.FindContours
        @return cvseq hull: convex hull from ConvexHull2
    """
    storage = cv.CreateMemStorage(0)
    try:
        hull = cv.ConvexHull2(cvseq, storage, cv.CV_CLOCKWISE, 1)
    except TypeError, e:
        print "Find convex hull failed"
        return None
Exemple #15
0
def show_faces():
    image = video_to_bgr(freenect.sync_get_video()[0])
    min_size = (20, 20)
    image_scale = 2
    haar_scale = 1.2
    min_neighbors = 2
    haar_flags = 0

    gray = cv.CreateImage((image.width, image.height), 8, 1)
    small_image = cv.CreateImage((cv.Round(
        image.width / image_scale), cv.Round(image.height / image_scale)), 8,
                                 1)
    cv.CvtColor(image, gray, cv.CV_BGR2GRAY)
    cv.Resize(gray, small_image, cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_image, small_image)

    faces = cv.HaarDetectObjects(small_image, face_cascade,
                                 cv.CreateMemStorage(0), haar_scale,
                                 min_neighbors, haar_flags, min_size)

    if faces:
        for ((x, y, w, h), n) in faces:
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
            cv.SetImageROI(
                image,
                (pt1[0], pt1[1], pt2[0] - pt1[0], int(
                    (pt2[1] - pt1[1]) * 0.7)))

        eyes = cv.HaarDetectObjects(image, eye_cascade, cv.CreateMemStorage(0),
                                    haar_scale, min_neighbors, haar_flags,
                                    (15, 15))

        if eyes:
            for eye in eyes:
                cv.Rectangle(image, (eye[0][0], eye[0][1]),
                             (eye[0][0] + eye[0][2], eye[0][1] + eye[0][3]),
                             cv.RGB(255, 0, 0), 1, 8, 0)

    cv.ResetImageROI(image)

    return image
Exemple #16
0
    def search_face(self, face_params):
        """
        Search on the picture for a face.
        Populates faces list.
        This function is the only one containing scaling information

        Set several Guy information, such as the face size, or the virtual center of the image

        :param face_params: The type of file to be used to train the classifier.
        :type face_params: string

        Once Faces have been found, they are listed and ordered
        """

        # Load the input image
        in_image = self.load_image()

        # Allocate the temporary images
        gray = cv.CreateImage((self.in_x, self.in_y),
                              cv.IPL_DEPTH_8U,
                              1)
        smallImage = cv.CreateImage((cv.Round(self.in_x / face_params.image_scale),
                                     cv.Round (self.in_y / face_params.image_scale)),
                                    cv.IPL_DEPTH_8U ,
                                    1)

        # Converts color input image to grayscale
        cv.CvtColor(in_image, gray, cv.CV_BGR2GRAY)
        # Scales input image for faster processing
        cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)
        # Equalizes the histogram
        cv.EqualizeHist(smallImage, smallImage)

        # Detect the faces
        small_faces = cv.HaarDetectObjects(smallImage,
                                     face_params.face_cascade,
                                     cv.CreateMemStorage(0),
                                     face_params.haar_scale,
                                     face_params.min_neighbors,
                                     face_params.haar_flags,
                                     face_params.min_size)

        # Resizing faces to full_scale
        for face in small_faces:
            if len(face): # if faces have been found
                ((x, y, w, h), n) = face
                big_face = ((int(x * face_params.image_scale),
                             int(y * face_params.image_scale),
                             int(w * face_params.image_scale),
                             int(h * face_params.image_scale)), n)
                self.faces.append(big_face)

        # sorting faces to keep only the most probable one
        self.sort_faces()
        self.update_center() # finds center of face in image
Exemple #17
0
def track(img, threshold=100):
    '''Accepts BGR image and optional object threshold between 0 and 255 (default = 100).
       Returns: (x,y) coordinates of centroid if found
                (-1,-1) if no centroid was found
                None if user hit ESC
    '''
    cascade = cv.Load("haarcascade_frontalface_alt_tree.xml")
    #cascade = cv.Load("haarcascade_frontalface_default.xml")
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    center = (-1, -1, -1)
    if (cascade):
        t = cv.GetTickCount()
        # HaarDetectObjects takes 0.02s
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        if faces:
            faces.sort()
            ((x, y, w, h), n) = faces[-1]
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
            #cv.Rectangle(img, (x,y), (x+w,y+h), 255)
            # get the xy corner co-ords, calc the center location
            x1 = pt1[0]
            x2 = pt2[0]
            y1 = pt1[1]
            y2 = pt2[1]
            centerx = x1 + ((x2 - x1) / 2)
            centery = y1 + ((y2 - y1) / 2)
            center = (centerx, centery, h * 3)

    if cmp(center, (-1, -1, -1)) == 0:
        center = None

    cv.NamedWindow(WINDOW_NAME, 1)
    cv.ShowImage(WINDOW_NAME, img)

    if cv.WaitKey(5) == 27:
        center = None
    return center
Exemple #18
0
    def add_features(self, cv_image, face):
        """ Look for any new features around the current feature cloud """
        """ Create the ROI mask"""
        roi = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
        """ Begin with all black pixels """
        cv.Zero(roi)
        """ Get the coordinates and dimensions of the current track box """
        try:
            ((x, y), (w, h), a) = face.track_box
        except:
            logger.info("Track box has shrunk to zero...")
            return
        """ Expand the track box to look for new features """
        w = int(face.expand_roi * w)
        h = int(face.expand_roi * h)

        roi_box = ((x, y), (w, h), a)
        """ Create a filled white ellipse within the track_box to define the ROI. """
        cv.EllipseBox(roi, roi_box, cv.CV_RGB(255, 255, 255), cv.CV_FILLED)
        """ Create the temporary scratchpad images """
        eig = cv.CreateImage(cv.GetSize(self.grey), 32, 1)
        temp = cv.CreateImage(cv.GetSize(self.grey), 32, 1)

        if self.feature_type == 0:
            """ Get the new features using Good Features to Track """
            features = cv.GoodFeaturesToTrack(self.grey,
                                              eig,
                                              temp,
                                              self.max_count,
                                              self.quality,
                                              self.good_feature_distance,
                                              mask=roi,
                                              blockSize=3,
                                              useHarris=0,
                                              k=0.04)

        elif self.feature_type == 1:
            """ Get the new features using SURF """
            features = []
            (surf_features, descriptors) = cv.ExtractSURF(
                self.grey, roi, cv.CreateMemStorage(0),
                (0, self.surf_hessian_quality, 3, 1))
            for feature in surf_features:
                features.append(feature[0])
        """ Append new features to the current list if they are not too
            far from the current cluster """
        for new_feature in features:
            try:
                distance = self.distance_to_cluster(new_feature, face.features)
                if distance > self.add_feature_distance:
                    face.features.append(new_feature)
            except:
                pass
        """ Remove duplicate features """
        face.features = list(set(face.features))
Exemple #19
0
    def _computeConvexHulls(self):
        hulls = []
        seq = self._contours
        while not (seq == None) and len(seq) != 0:
            cvxHull = cv.ConvexHull2(seq,
                                     cv.CreateMemStorage(),
                                     return_points=True)
            hulls.append(cvxHull)
            seq = seq.h_next()

        self._convexHulls = hulls
def find_squares_from_binary(gray):
    """
    use contour search to find squares in binary image
    returns list of numpy arrays containing 4 points
    """
    squares = []
    storage = cv.CreateMemStorage(0)
    contours = cv.FindContours(gray, storage, cv.CV_RETR_TREE,
                               cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))
    storage = cv.CreateMemStorage(0)
    while contours:
        #approximate contour with accuracy proportional to the contour perimeter
        arclength = cv.ArcLength(contours)
        polygon = cv.ApproxPoly(contours, storage, cv.CV_POLY_APPROX_DP,
                                arclength * 0.02, 0)
        if is_square(polygon):
            squares.append(polygon[0:4])
        contours = contours.h_next()

    return squares
Exemple #21
0
def main():
    hash=hashlib.sha1()
    buff=StringIO.StringIO()
    buff.write(sys.stdin.read()) #STDIN to buffer
    hash.update(buff.getvalue())
    buff.seek(0)
    pil_im=Image.open(buff)
    cv_im = cv.CreateImageHeader(pil_im.size, cv.IPL_DEPTH_8U, 3)
    cv.SetData(cv_im, pil_im.tostring())
    cascade = cv.Load("../src/main/resources/haarcascade_frontalface_default.xml")
    print hash.hexdigest()+":"+str(cv.HaarDetectObjects(cv_im, cascade, cv.CreateMemStorage(0), 1.2, 2, 0, (50, 50)))
Exemple #22
0
def DetectEye(image):

    faceCascade = cv.Load("frontalEyes35x16.xml")

    min_size = (20, 20)
    image_scale = 1
    haar_scale = 1.1
    min_neighbors = 3
    haar_flags = 0

    # Allocate the temporary images

    grayscale = cv.CreateImage((image.width, image.height), 8, 1)
    SmallImage = cv.CreateImage((cv.Round(
        image.width / image_scale), cv.Round(image.height / image_scale)), 8,
                                1)

    # Convert input color image to grayscale
    cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY)

    # Scale input image for faster processing

    cv.Resize(grayscale, SmallImage, cv.CV_INTER_LINEAR)

    # Equalize the histogram

    cv.EqualizeHist(SmallImage, SmallImage)

    # Detect the faces

    faces = cv.HaarDetectObjects(SmallImage, faceCascade,
                                 cv.CreateMemStorage(0), haar_scale,
                                 min_neighbors, haar_flags, min_size)

    # If faces are found

    eye = []

    if faces:

        for ((x, y, w, h), n) in faces:

            # The input to cv.HaarDetectObjects was resized, so the stairs

            # Bounding box of each face and convert it to two CvPoints

            pt1 = (int(x * image_scale), int((y + h / 2) * image_scale))

            pt2 = (int((x + w) * image_scale), int((y + h / 2) * image_scale))

            eye.append(pt1)
            eye.append(pt2)

    return eye
Exemple #23
0
def detect_image_faces(image_file, cascade):
    # Detect all faces in image:
    try:
        image = cv.LoadImageM(image_file, cv.CV_LOAD_IMAGE_GRAYSCALE)
        faces = cv.HaarDetectObjects( \
          image, cascade, cv.CreateMemStorage(0), scale_factor = 1.2, \
          min_neighbors = 2, flags = 0, min_size = (20, 20))
    except:
        faces = []

    return faces
Exemple #24
0
    def detect(self, cv_im):
        import cv
        HAAR_CASCADE_PATH = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml"
        cascade = cv.Load(HAAR_CASCADE_PATH)
        storage = cv.CreateMemStorage()

        faces = []
        detected = cv.HaarDetectObjects(cv_im, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (15,15))
        if detected:
            for (x,y,w,h),n in detected:
                faces.append((x,y,w,h))
        return faces
Exemple #25
0
def find_characters(grey, bw):
    """Find character contours in a 1-bit image."""
    # detect contours
    storage = cv.CreateMemStorage()
    contour_iter = cv.FindContours(bw, storage, cv.CV_RETR_CCOMP,
                                   cv.CV_CHAIN_APPROX_NONE)
    # filter the detected contours
    while contour_iter:
        contour = Contour(contour_iter, grey)
        if contour.valid:
            yield contour
        contour_iter = contour_iter.h_next()
def getBotCoord():
    image = cv.QueryFrame(capture)
    imageTreshold = thresholded_image(image, botTreshold[0], botTreshold[1])
    current_contour = cv.FindContours(cv.CloneImage(imageTreshold),
                                      cv.CreateMemStorage(), cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)
    mypos = (0, 0)
    if len(current_contour) != 0:
        mypos = contourCenter(largestContour(current_contour))
    imagehsv = hsv_image(image)
    s = cv.Get2D(imagehsv, mypos[0], mypos[1])
    return (mypos, s)
def detectFace(image):
    storage = cv.CreateMemStorage()
    haar = cv.Load(
        '/opt/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml'
    )
    detected = cv.HaarDetectObjects(image, haar, storage, 1.2, 2,
                                    cv.CV_HAAR_DO_CANNY_PRUNING, (100, 100))
    if detected:
        for face in detected:
            return face
    else:
        return False
Exemple #28
0
    def __setstate__(self, state):
        ''' Function required to save and load the state from pickel. '''
        # Modeled after SVM pickling

        for key, value in state.iteritems():
            self.__dict__[key] = value

        filename = tempfile.mktemp()
        open(filename, 'w').write(self.cascade_data)
        self.cascade = cv.Load(filename)
        self.storage = cv.CreateMemStorage(0)
        os.remove(filename)
def findmouth(img):

    # INITIALIZE: loading the classifiers
    haarFace = cv.Load(path_to_file + '/haarcascade_frontalface_default.xml')
    haarMouth = cv.Load(path_to_file + '/haarcascade_mouth.xml')
    # running the classifiers
    storage = cv.CreateMemStorage()
    detectedFace = cv.HaarDetectObjects(img, haarFace, storage)
    detectedMouth = cv.HaarDetectObjects(img, haarMouth, storage)

    # FACE: find the largest detected face as detected face
    maxFaceSize = 0
    maxFace = 0
    if detectedFace:
        # face: [0][0]: x; [0][1]: y; [0][2]: width; [0][3]: height
        for face in detectedFace:
            if face[0][3] * face[0][2] > maxFaceSize:
                maxFaceSize = face[0][3] * face[0][2]
                maxFace = face

    if maxFace == 0:  # did not detect face
        return 2

    def mouth_in_lower_face(mouth, face):
        # if the mouth is in the lower 2/5 of the face
        # and the lower edge of mouth is above that of the face
        # and the horizontal center of the mouth is the center of the face
        if (mouth[0][1] > face[0][1] + face[0][3] * 3 / float(5)
                and mouth[0][1] + mouth[0][3] < face[0][1] + face[0][3]
                and abs((mouth[0][0] + mouth[0][2] / float(2)) -
                        (face[0][0] + face[0][2] / float(2))) <
                face[0][2] / float(10)):
            return True
        else:
            return False

    # FILTER MOUTH
    filteredMouth = []
    if detectedMouth:
        for mouth in detectedMouth:
            if mouth_in_lower_face(mouth, maxFace):
                filteredMouth.append(mouth)

    maxMouthSize = 0
    for mouth in filteredMouth:
        if mouth[0][3] * mouth[0][2] > maxMouthSize:
            maxMouthSize = mouth[0][3] * mouth[0][2]
            maxMouth = mouth

    try:
        return maxMouth
    except UnboundLocalError:
        return 2
Exemple #30
0
def get_blobs(bin_arr):
    '''
    Find all contiguous nonzero blobs in the image, and return a list of Blob objects.
    '''
    bin_img = cv.fromarray(bin_arr.copy())
    storage = cv.CreateMemStorage(0)
    contours = cv.FindContours(bin_img, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE)
    blobs = []
    while contours:
        blobs.append(Blob(contours))
        contours = contours.h_next()
    return sorted(blobs, key=Blob.get_area, reverse=True)