def update(self, image): self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) else: image = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY) minSize = utils.widthHeightDividedBy(image, 8) faceRects = self._faceClassifier.detectMultiScale(image, self.scaleFactor, self.minNeighbors, self.flags, minSize) if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect x, y, w, h = faceRect searchRect = (x+w/7, y, w*2/7, h/2) face.leftEyeRect = self._detectOneObject(self._eyeClassifier, image, searchRect, 64) searchRect = (x+w*4/7, y, w*2/7, h/2) face.rightEyeRect = self._detectOneObject(self._eyeClassifier, image, searchRect, 64) searchRect = (x+w/4, y+h/4, w/2, h/2) face.noseRect = self._detectOneObject(self._noseClassifier, image, searchRect, 32) searchRect = (x+w/6, y+h*2/3, w*2/3, h/3) face.mouthRect = self._detectOneObject(self._mouthClassifier, image, searchRect, 16) self._faces.append(face)
def drawDebugRects(self, image): """Draw rectangles around the tracked facial features.""" if utils.isGray(image): faceColor = 255 leftEyeColor = 255 rightEyeColor = 255 noseColor = 255 mouthColor = 255 smileColor = 255 eyepair_smallColor = 255 else: faceColor = (255, 255, 255) # white leftEyeColor = (0, 0, 255) # red rightEyeColor = (0, 255, 255) # yellow noseColor = (0, 255, 0) # green mouthColor = (255, 0, 0) # blue smileColor = (255, 255, 255) # white # eyepair_smallColor = (0, 0, 255) # red for face in self.faces: rects.outlineRect(image, face.faceRect, faceColor) rects.outlineRect(image, face.leftEyeRect, leftEyeColor) rects.outlineRect(image, face.rightEyeRect, rightEyeColor) rects.outlineRect(image, face.noseRect, noseColor) rects.outlineRect(image, face.mouthRect, mouthColor) rects.outlineRect(image, face.smileRect, smileColor)
def copyRect(src, dst, srcRect, dstRect, mask=None, interpolation=cv2.INTER_LINEAR): """Copy part of the source to part of the destination.""" x0, y0, w0, h0 = srcRect x1, y1, w1, h1 = dstRect # Resize the contents of the source sub-rectangle. # Put the result in the destination sub-rectangle. if mask is None: dst[y1:y1+h1, x1:x1+w1] = \ cv2.resize(src[y0:y0+h0, x0:x0+w0], (w1, h1), interpolation = interpolation) else: if not utils.isGray(src): # Convert the mask to 3 channels, like the image. mask = mask.repeat(3).reshape(h0, w0, 3) # Perform the copy, with the mask applied. dst[y1:y1+h1, x1:x1+w1] = \ numpy.where(cv2.resize(mask, (w1, h1), interpolation = \ cv2.INTER_NEAREST), cv2.resize(src[y0:y0+h0, x0:x0+w0], (w1, h1), interpolation = interpolation), dst[y1:y1+h1, x1:x1+w1])
def drawDebugRects(self, image): """ :param image: 全体画像 :return: None """ if utils.isGray(image): faceColor = 255 leftEyeColor = 255 rightEyeColor = 255 noseColor = 255 mouthColor = 255 else: faceColor = (255, 255, 255) # 白 leftEyeColor = (0, 0, 255) # 赤 rightEyeColor = (0, 255, 255) # 黄 noseColor = (0, 255, 0) # 緑 mouthColor = (255, 0, 0) # 青 for face in self.faces: # rects.outlineRect(image, face.faceRect , faceColor ) # rects.outlineRect(image, face.leftEyeRect , leftEyeColor ) # rects.outlineRect(image, face.rightEyeRect, rightEyeColor) # rects.outlineRect(image, face.noseRect , noseColor ) # rects.outlineRect(image, face.mouthRect , mouthColor ) rects.outlineRectWithTitle(image, face.faceRect, faceColor, 'Face') rects.outlineRectWithTitle(image, face.leftEyeRect, leftEyeColor, 'Left Eye') rects.outlineRectWithTitle(image, face.rightEyeRect, rightEyeColor, 'Right Eye') rects.outlineRectWithTitle(image, face.noseRect, noseColor, 'Nose') rects.outlineRectWithTitle(image, face.mouthRect, mouthColor, 'Mouth')
def copyRect(src, dst, srcRect, dstRect, mask = None, interpolation = cv2.INTER_LINEAR): """Copy part of the source to part of the destination.""" x0, y0, w0, h0 = srcRect x1, y1, w1, h1 = dstRect # Resize the contents of the source sub-rectangle. # Put the result in the destination sub-rectangle. if mask is None: dst[y1:y1+h1, x1:x1+w1] = \ cv2.resize(src[y0:y0+h0, x0:x0+w0], (w1, h1), interpolation = interpolation) else: if not utils.isGray(src): # Convert the mask to 3 channels, like the image. mask = mask.repeat(3).reshape(h0, w0, 3) # Perform the copy, with the mask applied. dst[y1:y1+h1, x1:x1+w1] = \ numpy.where(cv2.resize(mask, (w1, h1), interpolation = \ cv2.INTER_NEAREST), cv2.resize(src[y0:y0+h0, x0:x0+w0], (w1, h1), interpolation = interpolation), dst[y1:y1+h1, x1:x1+w1])
def update(self, image): self.swapfaces = [] if utils.isGray(image): cv2.equalizeHist(image, image) else: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.equalizeHist(image, image) minscale = utils.divideHeightWidth(image, 8) facerects = self.facedetector.detectMultiScale(image, self.scale, self.neighbour, self.flag, minscale) facearea = Face() self.swapfaces.append(facerects) if len(facerects) > 0: for face in facerects: facearea.face = face x, y, w, h = face searcharea = (x + w / 7, y, w * 2 / 7, h / 2) facearea.lefteye = FaceDetect.detectObject( self, self.lefteyedetector, image, searcharea, 64) searcharea = (x + w * 4 / 7, y, w * 2 / 7, h / 2) facearea.righteye = FaceDetect.detectObject( self, self.righteyedetector, image, searcharea, 64) #searcharea = (x+w/4,y+h/4,w/2,h/2) #facearea.nose = detectObject(self.nosedetector,image,searcharea,32) searcharea = (x + w / 3, y + h * 2 / 3, w * 2 / 3, h / 3) facearea.smile = FaceDetect.detectObject( self, self.smiledetector, image, searcharea, 16) self.faces.append(facearea)
def drawLinesFromCenter(self, image): """Draw a line from the center of the image to the center of the tracked face.""" if utils.isGray(image): faceColor = 255 else: faceColor = (255, 255, 255) # white for face in self.faces: lines.drawLine(image, face.faceRect, faceColor)
def update(self, image): """Update the tracked facial features.""" self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) else: #image = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.equalizeHist(image, image) minSize = utils.widthHeightDividedBy(image, 8) faceRects = self._faceClassifier.detectMultiScale(image, self.scaleFactor, self.minNeighbors, self.flags, minSize) if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect x, y, w, h = faceRect # Seek an eye in the upper-left part of the face. searchRect = (int(x+w/7), y, int(w*2/7), int(h/2)) face.leftEyeRect = self._detectOneObject(self._eyeClassifier, image, searchRect, 64) # Seek an eye in the upper-right part of the face. searchRect = (int(x+w*4/7), y, int(w*2/7), int(h/2)) face.rightEyeRect = self._detectOneObject(self._eyeClassifier, image, searchRect, 64) # Seek a nose in the middle part of the face. searchRect = (int(x+w/4), int(y+h/4), int(w/2), int(h/2)) face.noseRect = self._detectOneObject(self._noseClassifier, image, searchRect, 32) # Seek a mouth in the lower-middle part of the face. searchRect = (int(x+w/6), int(y+h*2/3), int(w*2/3), int(h/3)) face.mouthRect = self._detectOneObject(self._mouthClassifier, image, searchRect, 16) # Seek a smile in the lower-middle part of the face. searchRect = (int(x+w/6), int(y+h*2/3), int(w*2/3), int(h/3)) face.smileRect = self._detectOneObject(self._smileClassifier, image, searchRect, 16) # Seek a eyepair_small in the left-middle part of the face. # searchRect = (x+w/7, y, w*5/7, h/2) # face.eyepair_smallRect = self._detectOneObject(self._eyepair_smallClassifier, image, searchRect, 16) self._faces.append(face)
def update(self, image): """ Update the tracked facial features """ # reset the features list self._faces = [] # Equalize the variant. This makes the tracker more robust to variations in lighting # Create greyscale variant of the image, if it isn't already, to improve performance if utils.isGray(image): image = cv2.equalizeHist(image) else: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.equalizeHist(image,image) # Classify the image minSize = utils.widthHeightDividedBy(image,8) faceRects = self._faceClassifier.detectMultiScale( image, self.scaleFactor, self.minNeighbours, self.flags, minSize) # If we find viable matches, place them in the faces list if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect # Decompose the target feature x,y,w,h = faceRect # Seek an eye in the upper LHS of the face searchRect = (x+w/7,y,w*2/7,h/2) face.leftEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64 ) # Seek an eye in the upper RHS of the face searchRect = (x+w*4/7,y,w*2/7,h/2) face.rightEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64 ) # Seek a nose in the middle of the face searchRect = (x+w/4,y+h/4,w/2,h/2) face.noseRect = self._detectOneObject( self._noseClassifier, image, searchRect, 32 ) # Seek a mouth in the lower third of the face searchRect = (x+w/6,y+h*2/3,w*2/3,h/3) face.mouthRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 16 ) self._faces.append(face)
def update(self, image): """Update the tracked facial features.""" self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) else: image = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY) cv2.equalizeHist(image, image) minSize = utils.widthHeightDividedBy(image, 8) faceRects = self._faceClassifier.detectMultiScale( image, self.scaleFactor, self.minNeighbors, self.flags, minSize) if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect face.logging_faceZone(image) x, y, w, h = faceRect # Seek an eye in the upper-left part of the face. searchRect = (x+w/7, y, w*2/7, h/2) face.leftEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # Seek an eye in the upper-right part of the face. searchRect = (x+w*4/7, y, w*2/7, h/2) face.rightEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # Seek a nose in the middle part of the face. searchRect = (x+w/4, y+h/4, w/2, h/2) face.noseRect = self._detectOneObject( self._noseClassifier, image, searchRect, 32) # Seek a mouth in the lower-middle part of the face. searchRect = (x+w/6, y+h*2/3, w*2/3, h/3) face.mouthRect = self._detectOneObject( self._mouthClassifier, image, searchRect, 16) if(face.detectOrganCount>=1): self._faces.append(face)
def extractSiftFeatures(image): """ detect interest points in an image """ if not isGray(image): # convert RGB image to gray image image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) sift = cv2.SIFT() # kps: a list of keypoints # des: numpy array of shape [Number_of_Keypoints x 128] # each row represents an observation kps, des = sift.detectAndCompute(image, None) return des
def drawDebugRects(self, image): """Draw rectangles around the tracked facial features.""" if utils.isGray(image): faceColor = 255 leftEyeColor = 255 rightEyeColor = 255 else: faceColor = (255, 255, 255) # white leftEyeColor = (0, 0, 255) # red rightEyeColor = (0, 255, 255) # yellow for face in self.faces: rects.outlineRect(image, face.faceRect, faceColor) rects.outlineRect(image, face.leftEyeRect, leftEyeColor) rects.outlineRect(image, face.rightEyeRect, rightEyeColor)
def update(self, image): """Update the tracked facial features.""" self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) else: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.equalizeHist(image, image) minSize = utils.widthHeightDividedBy(image, 8) print(minSize) faceRects = self._faceClassifier.detectMultiScale( image, self.scaleFactor, self.minNeighbors, self.flags, minSize) print(faceRects) if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect x, y, w, h = faceRect # Seek an eye in the upper-left part of the face. searchRect = (x + w / 7, y, w * 2 / 7, h / 2) face.leftEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # Seek an eye in the upper-right part of the face. searchRect = (x + w * 4 / 7, y, w * 2 / 7, h / 2) face.rightEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # Seek a nose in the middle part of the face. searchRect = (x + w / 4, y + h / 4, w / 2, h / 2) face.noseRect = self._detectOneObject(self._noseClassifier, image, searchRect, 32) # Seek a mouth in the lower-middle part of the face. searchRect = (x + w / 6, y + h * 2 / 3, w * 2 / 3, h / 3) face.mouthRect = self._detectOneObject(self._mouthClassifier, image, searchRect, 16) self._faces.append(face)
def drawRects(self, image): """Draw rectangles around the tracked facial feature""" #White rectangles in a grayscale image if utils.isGray(image): faceColor = 255 else: faceColor = (255, 255, 255) #White for face in self._faces: (x, y, w, h) = face.faceRect cv2.rectangle(image, (x,y), (x+w, y+h), faceColor, 2) for eyeRect in face.eyeRects: (ex, ey, ew, eh) = eyeRect cv2.rectangle(image, (x+ex, y+ey), (x+ex+ew, y+ey+eh), (0,255,0), 2)
def update(self, image): self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) else: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.equalizeHist(image, image) minSize = utils.widthHeightDividedBy(image, 8) minSize = (int(minSize[0]), int(minSize[1])) faceRects = self._faceClassifier.detectMultiScale( image, self.scaleFactor, self.minNeighbors, self.flags, minSize) if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect x, y, w, h = faceRect #buscar o olho e atualizar o lado esquerdo da face searchRect = (x + w / 7, y, w * 2 / 7, h / 2) face.leftEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) #buscar o olho e atualizar o lado direito da face searchRect = (x + w * 4 / 7, y, w * 2 / 7, h / 2) face.rightEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) #buscar nariz searchRect = (x + w / 4, y + h / 4, w / 2, h / 2) face.noseRect = self._detectOneObject(self._noseClassifier, image, searchRect, 32) #buscar boca searchRect = (x + w / 6, y + h * 2 / 3, w * 2 / 3, h / 3) face.mouthRect = self._detectOneObject(self._mouthClassifier, image, searchRect, 16) self._faces.append(face)
def copyRect(src, dst, srcRect, dstRect, mask=None, interpolation=cv2.INTER_LINEAR): x0, y0, w0, h0 = srcRect x1, y1, w1, h1 = dstRect if mask is None: dst[y1:y1 + h1, x1:x1 + w1] = cv2.resize(src[y0:y0 + h0, x0:x0 + w0], (w1, h1), interpolation=interpolation) else: if not utils.isGray(src): mask = mask.repeat(3).reshape(h0, w0, 3) dst[y1:y1 + h1, x1:x1 + w1] = numpy.where( cv2.resize(mask, (w1, h1), interpolation=cv2.INTER_NEAREST), cv2.resize(src[y0:y0 + h0, x0:x0 + w0], (w1, h1), interpolation=interpolation), dst[y1:y1 + h1, x1:x1 + w1])
def drawRect(self, image): if utils.isGray(image): faceColor = 255 leftEyeColor = 255 rightEyeColor = 255 noseColor = 255 mouthColor = 255 else: faceColor = (255, 255, 255) # white leftEyeColor = (0, 0, 255) # red rightEyeColor = (0, 255, 255) # yellow #noseColor = (0, 255, 0) # green mouthColor = (255, 0, 0) # blue for face in self.faces: rects.outlineRect(image, faceColor, face.face) rects.outlineRect(image, leftEyeColor, face.lefteye) rects.outlineRect(image, rightEyeColor, face.righteye) #rects.outlineRect(image, face.noseRect, noseColor) rects.outlineRect(image, mouthColor, face.smile) self.faces = []
def update(self, image): self._faces = [] if not utils.isGray(image): image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = cv2.equalizeHist(image) minFaceSize = utils.sizeDividedBy(image, 8) faceRects = self._faceClassifier.detectMultiScale( image, self.scaleFactor, self.minNeighbors, self.flags, minFaceSize) if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect self._faces.append(face)
def show(self, frame): # Find the frame's dimensions in (w, h) format. frameSize = frame.shape[1::-1] # Convert the frame to RGB, which Pygame requires. if utils.isGray(frame): conversionType = cv2.COLOR_GRAY2RGB else: conversionType = cv2.COLOR_BGR2RGB rgbFrame = cv2.cvtColor(frame, conversionType) # Convert the frame to Pygame's Surface type. pygameFrame = pygame.image.frombuffer( rgbFrame.tostring(), frameSize, 'RGB') # Resize the window to match the frame. displaySurface = pygame.display.set_mode(frameSize) # Blit and display the frame. displaySurface.blit(pygameFrame, (0, 0)) pygame.display.flip()
def drawDebugRects(self, image): """ Draw rectangles around the tracked facial features """ if utils.isGray(image): faceColor = 255 leftEyeColor = 255 rightEyeColor = 255 noseColor = 255 mouthColor = 255 else: faceColor = (255,255,255) leftEyeColor = (0,0,255) rightEyeColor = (0,255,255) noseColor = (0,255,0) mouthColor = (255,0,0) for face in self.faces: rects.outlineRect(image, face.faceRect, faceColor) rects.outlineRect(image, face.leftEyeRect, leftEyeColor) rects.outlineRect(image, face.rightEyeRect, rightEyeColor) rects.outlineRect(image, face.noseRect, noseColor) rects.outlineRect(image, face.mouthRect, mouthColor)
def drawDebugRects(self, image): if utils.isGray(image): faceColor = 255 leftEyeColor = 255 rightEyeColor = 255 noseColor = 255 mouthColor = 255 else: faceColor = (255, 255, 255) # white leftEyeColor = (0, 0, 255) # red rightEyeColor = (0, 255, 255) # yellow noseColor = (0, 255, 0) # green mouthColor = (255, 0, 0) # blue for face in self.faces: rects.outlineRect(image, face.faceRect, faceColor) rects.outlineRect(image, face.leftEyeRect, leftEyeColor) rects.outlineRect(image, face.rightEyeRect, rightEyeColor) rects.outlineRect(image, face.noseRect, noseColor) rects.outlineRect(image, face.mouthRect, mouthColor)
def drawDebugRects(self, image): ''' Draw rectanbles around the tracked facial features''' if utils.isGray(image): faceColor = 255 leftEyeColor = 255 rightEyeColor = 255 noseColor = 255 mouthColor = 255 else: faceColor = (255, 255, 255) # White leftEyeColor = (0, 0, 255) # Red rightEyeColor = (0, 255, 255) # yellow noseColor = (0, 255, 0) # green mouthColor = (255, 0, 0) # blue for face in self.faces: rects.outlineRect(image, face.faceRect, faceColor) rects.outlineRect(image, face.leftEyeRect, leftEyeColor) rects.outlineRect(image, face.rightEyeRect, rightEyeColor) rects.outlineRect(image, face.noseRect, noseColor) rects.outlineRect(image, face.mouthRect, mouthColor)
def drawDebugRects(self, image): """Draw rectangles around the tracked facial features.""" if utils.isGray(image): faceColor = 0 leftEyeColor = 0 rightEyeColor = 0 noseColor = 0 mouthColor = 0 else: faceColor = (255, 255, 255) # white leftEyeColor = (0, 0, 255) # red rightEyeColor = (0, 255, 255) # yellow noseColor = (0, 255, 0) # green mouthColor = (255, 0, 0) # blue for face in self.faces: rects.outlineRect(image, face.faceRect, faceColor) rects.outlineRect(image, face.leftEyeRect, leftEyeColor) rects.outlineRect(image, face.rightEyeRect, rightEyeColor) rects.outlineRect(image, face.noseRect, noseColor) rects.outlineRect(image, face.mouthRect, mouthColor)
def drawDebugRects(self, image): if utils.isGray(image): faceColor = 255 leftEyeColor = 255 rightEyeColor = 255 noseColor = 255 mouthColor = 255 else: faceColor = (255, 255, 255) leftEyeColor = (0, 0, 255) rightEyeColor = (0, 255, 255) noseColor = (0, 255, 0) mouthColor = (255, 0, 0) white = (255, 255, 255) for face in self.faces: rects.outlineRect(image, face.faceRect, faceColor) rects.outlineRect(image, face.leftEyeRect, leftEyeColor) rects.outlineRect(image, face.rightEyeRect, rightEyeColor) rects.outlineRect(image, face.noseRect, noseColor) rects.outlineRect(image, face.mouthRect, mouthColor) k = 5 leftEye = None rightEye = None mouth = None nose = None if face.leftEyeRect is not None: leftEye = (face.leftEyeRect[0] + int(face.leftEyeRect[2] / 2), face.leftEyeRect[1] + int(face.leftEyeRect[3] / 2)) rect = (leftEye[0] - k, leftEye[1] - k, 2 * k, 2 * k) rects.outlineRect(image, rect, white) if face.rightEyeRect is not None: rightEye = (face.rightEyeRect[0] + int(face.rightEyeRect[2] / 2), face.rightEyeRect[1] + int(face.rightEyeRect[3] / 2)) rect = (rightEye[0] - k, rightEye[1] - k, 2 * k, 2 * k) rects.outlineRect(image, rect, white) if face.mouthRect is not None: mouth = (face.mouthRect[0] + int(face.mouthRect[2] / 2), face.mouthRect[1] + int(face.mouthRect[3] / 2)) rect = (mouth[0] - k, mouth[1] - k, 2 * k, 2 * k) rects.outlineRect(image, rect, white) if face.noseRect is not None: nose = (face.noseRect[0] + int(face.noseRect[2] / 2), face.noseRect[1] + int(face.noseRect[3] / 2)) rect = (nose[0] - k, nose[1] - k, 2 * k, 2 * k) rects.outlineRect(image, rect, white) if leftEye and rightEye and nose and mouth is not None: cv2.line(image, leftEye, rightEye, white, 1) cv2.line(image, leftEye, mouth, white, 1) cv2.line(image, mouth, rightEye, white, 1) cv2.line(image, nose, rightEye, white, 1) cv2.line(image, nose, leftEye, white, 1) cv2.line(image, nose, mouth, white, 1) # Descritores distEyes = utils.dist(leftEye, rightEye) distLeftEyeToNose = utils.dist(leftEye, nose) distRightEyeToNose = utils.dist(rightEye, nose) distLeftEyeToMouth = utils.dist(leftEye, mouth) distRightEyeToMouth = utils.dist(rightEye, mouth) distNoseToMouth = utils.dist(nose, mouth) #print(distEyes, distLeftEyeToNose, distRightEyeToNose, distLeftEyeToMouth, distRightEyeToMouth, distNoseToMouth) print(distEyes/distLeftEyeToNose, distEyes/distRightEyeToNose, distEyes/distLeftEyeToMouth, distEyes/distRightEyeToMouth, \ distLeftEyeToNose/distLeftEyeToMouth, distRightEyeToNose/distRightEyeToMouth) # Gerando caracterizadores faciais #print(">> Face: ",face.faceRect) #print(">> Left eye: ",face.leftEyeRect) #print(">> Right eye: ",face.rightEyeRect) #print(">> Nose: ", face.noseRect) #print(">> Mouth: ",face.mouthRect)
def update(self, image): """ :param image: :return: """ self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) # The algorithm normalizes the brightness and increases the contrast of the image. # 明るさをノーマライズしコントラストを強める else: # グレースケール画像に変換する image = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY) image = cv2.equalizeHist(image) # まず顔を検出する # 画像の1/8 * 1/8 = 1/64より小さい顔は認識しない minSize = utils.widthHeightDividedBy(image, 8) # => (w, h) faceRects = self._faceClassifier.detectMultiScale( image, self.scaleFactor, self.minNeighbors, self.flags, minSize) """:type : list[tuple]""" # => (x, y, w, h) # # void CascadeClassifier::detectMultiScale( # const Mat& image, vector<Rect>& objects, double scaleFactor=1.1, int minNeighbors=3, # int flags=0, Size minSize=Size() # ) # 入力画像中から異なるサイズのオブジェクトを検出します.検出されたオブジェクトは,矩形のリストとして返されます. # # パラメタ: # image – CV_8U 型の行列.ここに格納されている画像中からオブジェクトが検出されます # objects – 矩形を要素とするベクトル.それぞれの矩形は,検出したオブジェクトを含みます # scaleFactor – 各画像スケールにおける縮小量を表します # minNeighbors – オブジェクト候補となる矩形は,最低でもこの数だけの近傍矩形を含む必要があります # flags – このパラメータは,新しいカスケードでは利用されません. # 古いカスケードに対しては,cvHaarDetectObjects 関数の場合と同じ意味を持ちます # minSize – オブジェクトが取り得る最小サイズ.これよりも小さいオブジェクトは無視されます # 顔を検出できたら・・・ if faceRects is not None: # 顔ごとに・・・ for faceRect in faceRects: # faceオブジェクトをつくる face = Face() face.faceRect = faceRect x, y, w, h = faceRect # 左目を探す searchRect = (x + w / 7, y, w * 2 / 7, h / 2) # □■■□□□□ # □■■□□□□ # □□□□□□□ # □□□□□□□ face.leftEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # 1/64 * 1/64より小さいオブジェクトを無視する # if face.leftEyeRect == None: # print("Left Eye detect failed") # 右目を探す searchRect = (x + w * 4 / 7, y, w * 2 / 7, h / 2) # □□□□■■□ # □□□□■■□ # □□□□□□□ # □□□□□□□ face.rightEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # 鼻を探す searchRect = (x + w / 4, y + h / 4, w / 2, h / 2) # □□□□□□□ # □□■■■□□ # □□■■■□□ # □□□□□□□ face.noseRect = self._detectOneObject(self._noseClassifier, image, searchRect, 32) # 1/32 * 1/32より小さいオブジェクトを無視する # 口を探す searchRect = (x + w / 6, y + h * 2 / 3, w * 2 / 3, h / 3) # □□□□□□□ # □□□□□□□ # □■■■■■□ # □■■■■■□ face.mouthRect = self._detectOneObject(self._mouthClassifier, image, searchRect, 16) # 1/16 * 1/16より小さいオブジェクトを無視する self._faces.append(face)