def _detectOneObject(self, classifier, image, rect, imageSizeToMinSizeRatio): """ 探索領域の中で目や鼻や口を探す :param classifier: 使う分類器 :type classifier: cv2.CascadeClassifier :param image: 全体画像 :param rect: 対象を探す領域 :param imageSizeToMinSizeRatio: 無視するサイズの割合 :return: 対象の(x, y, w, h) :rtype : tuple """ x, y, w, h = rect minSize = utils.widthHeightDividedBy(image, imageSizeToMinSizeRatio) subImage = image[y:y + h, x:x + w] """:type : numpy.ndarray""" subRects = classifier.detectMultiScale(subImage, self.scaleFactor, self.minNeighbors, self.flags, minSize) """:type : list[tuple]""" if len(subRects) == 0: return None subX, subY, subW, subH = subRects[0] return (x + subX, y + subY, subW, subH)
def update(self, image): self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) else: image = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY) minSize = utils.widthHeightDividedBy(image, 8) faceRects = self._faceClassifier.detectMultiScale(image, self.scaleFactor, self.minNeighbors, self.flags, minSize) if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect x, y, w, h = faceRect searchRect = (x+w/7, y, w*2/7, h/2) face.leftEyeRect = self._detectOneObject(self._eyeClassifier, image, searchRect, 64) searchRect = (x+w*4/7, y, w*2/7, h/2) face.rightEyeRect = self._detectOneObject(self._eyeClassifier, image, searchRect, 64) searchRect = (x+w/4, y+h/4, w/2, h/2) face.noseRect = self._detectOneObject(self._noseClassifier, image, searchRect, 32) searchRect = (x+w/6, y+h*2/3, w*2/3, h/3) face.mouthRect = self._detectOneObject(self._mouthClassifier, image, searchRect, 16) self._faces.append(face)
def _detectOneObject(self, classifier, image, rect, imageSizeToMinSizeRatio): x, y, w, h = rect minSize = utils.widthHeightDividedBy(image, imageSizeToMinSizeRatio) #dgb = 'x : {} , y : {} , w : {} , h : {}'.format(x,y,w,h) #print(dgb) y = int(y) x = int(x) w = int(w) h = int(h) subImage = image[y:y + h, x:x + w] subRects = classifier.detectMultiScale(subImage, self.scaleFactor, self.minNeighbors, self.flags, minSize) if len(subRects) == 0: return None subX, subY, subW, subH = subRects[0] return (x + subX, y + subY, subW, subH)
def _detect_one_object(self, classifier, image, search_rect, image_size_to_min_size_ratio): """ detects object in region sepcified by search_rect will only detect image > image_size_to_min_size_ratio classifier = haarscascade search_rect = [x,y,w,h] """ x, y, w, h = search_rect min_size = utils.widthHeightDividedBy( image, image_size_to_min_size_ratio) sub_image = image[y:y+h, x:x+w] sub_rects = [] try: sub_rects = classifier.detectMultiScale( sub_image, self.scale_factor, self.min_neighbors, self.flags, min_size) except Exception as e: print("classifer", classifier) print(e) if (len(sub_rects) == 0): return None sub_x, sub_y, sub_w, sub_h = sub_rects[0] return (x+sub_x, y+sub_y, sub_w, sub_h)
def update(self, image): """Update the tracked facial features.""" self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) else: #image = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.equalizeHist(image, image) minSize = utils.widthHeightDividedBy(image, 8) faceRects = self._faceClassifier.detectMultiScale(image, self.scaleFactor, self.minNeighbors, self.flags, minSize) if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect x, y, w, h = faceRect # Seek an eye in the upper-left part of the face. searchRect = (int(x+w/7), y, int(w*2/7), int(h/2)) face.leftEyeRect = self._detectOneObject(self._eyeClassifier, image, searchRect, 64) # Seek an eye in the upper-right part of the face. searchRect = (int(x+w*4/7), y, int(w*2/7), int(h/2)) face.rightEyeRect = self._detectOneObject(self._eyeClassifier, image, searchRect, 64) # Seek a nose in the middle part of the face. searchRect = (int(x+w/4), int(y+h/4), int(w/2), int(h/2)) face.noseRect = self._detectOneObject(self._noseClassifier, image, searchRect, 32) # Seek a mouth in the lower-middle part of the face. searchRect = (int(x+w/6), int(y+h*2/3), int(w*2/3), int(h/3)) face.mouthRect = self._detectOneObject(self._mouthClassifier, image, searchRect, 16) # Seek a smile in the lower-middle part of the face. searchRect = (int(x+w/6), int(y+h*2/3), int(w*2/3), int(h/3)) face.smileRect = self._detectOneObject(self._smileClassifier, image, searchRect, 16) # Seek a eyepair_small in the left-middle part of the face. # searchRect = (x+w/7, y, w*5/7, h/2) # face.eyepair_smallRect = self._detectOneObject(self._eyepair_smallClassifier, image, searchRect, 16) self._faces.append(face)
def _detectOneObject(self, classifier, image, rect, imageSizeToMinSizeRatio): x, y, w, h = rect minSize = utils.widthHeightDividedBy(image, imageSizeToMinSizeRatio) subImage = image[y:y+h, x:x+w] subRects = classifier.detectMultiScale(subImage, self.scaleFactor, self.minNeighbors, self.flags, minSize) if len(subRects) == 0: return None subX, subY, subW, subH = subRects[0] return (x + subX, y + subY, subW, subH)
def _detectOneObject(self, classifier, image, rect, imageSizeToMinSizeRatio): x, y, w, h = rect minSize = utils.widthHeightDividedBy(image, imageSizeToMinSizeRatio) subImage = image[y:y + h, x:x + w] subRects = classifier.detectMultiScale(subImage, self.scaleFactor, self.minNeighbors, self.flags, minSize) if len(subRects) == 0: return None subX, subY, subW, subH = subRects[0] return (x + subX, y + subY, subW, subH)
def update(self, image): """ Update the tracked facial features """ # reset the features list self._faces = [] # Equalize the variant. This makes the tracker more robust to variations in lighting # Create greyscale variant of the image, if it isn't already, to improve performance if utils.isGray(image): image = cv2.equalizeHist(image) else: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.equalizeHist(image,image) # Classify the image minSize = utils.widthHeightDividedBy(image,8) faceRects = self._faceClassifier.detectMultiScale( image, self.scaleFactor, self.minNeighbours, self.flags, minSize) # If we find viable matches, place them in the faces list if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect # Decompose the target feature x,y,w,h = faceRect # Seek an eye in the upper LHS of the face searchRect = (x+w/7,y,w*2/7,h/2) face.leftEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64 ) # Seek an eye in the upper RHS of the face searchRect = (x+w*4/7,y,w*2/7,h/2) face.rightEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64 ) # Seek a nose in the middle of the face searchRect = (x+w/4,y+h/4,w/2,h/2) face.noseRect = self._detectOneObject( self._noseClassifier, image, searchRect, 32 ) # Seek a mouth in the lower third of the face searchRect = (x+w/6,y+h*2/3,w*2/3,h/3) face.mouthRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 16 ) self._faces.append(face)
def update(self, image): """Update the tracked facial features.""" self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) else: image = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY) cv2.equalizeHist(image, image) minSize = utils.widthHeightDividedBy(image, 8) faceRects = self._faceClassifier.detectMultiScale( image, self.scaleFactor, self.minNeighbors, self.flags, minSize) if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect face.logging_faceZone(image) x, y, w, h = faceRect # Seek an eye in the upper-left part of the face. searchRect = (x+w/7, y, w*2/7, h/2) face.leftEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # Seek an eye in the upper-right part of the face. searchRect = (x+w*4/7, y, w*2/7, h/2) face.rightEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # Seek a nose in the middle part of the face. searchRect = (x+w/4, y+h/4, w/2, h/2) face.noseRect = self._detectOneObject( self._noseClassifier, image, searchRect, 32) # Seek a mouth in the lower-middle part of the face. searchRect = (x+w/6, y+h*2/3, w*2/3, h/3) face.mouthRect = self._detectOneObject( self._mouthClassifier, image, searchRect, 16) if(face.detectOrganCount>=1): self._faces.append(face)
def update(self, image): """updates tracked face features""" self._faces = [] if (utils.is_gray(image)): image = cv2.equalizeHist(image) else: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = cv2.equalizeHist(image) min_size = utils.widthHeightDividedBy(image, 8) face_rects = self._face_classifier.detectMultiScale( image, self.scale_factor, self.min_neighbors, self.flags, min_size) if (face_rects is not None): for face_rect in face_rects: face = Face() face.face_rect = face_rect x, y, w, h = face_rect # look for an eye in the upper-left part of the face search_rect = (x+w//7, y, w*2//7, h//2) face.left_eye_rect = self._detect_one_object( self._eye_classifier, image, search_rect, 64 ) # look for an eye in the upper-right part of the face search_rect = (x + (w*4)//7, y, w*2//7, h//2) face.right_eye_rect = self._detect_one_object( self._eye_classifier, image, search_rect, 64 ) # look for an node in the middle part of the face search_rect = (x+w//4, y+h//4, w*2, h//2) face.nose_rect = self._detect_one_object( self._nose_classifier, image, search_rect, 32 ) # look for an mouth in the lower-middle part of the face search_rect = (x+w//6, y + (h*2)//3, w*2//3, h//3) face.mouth_rect = self._detect_one_object( self._mouth_classifier, image, search_rect, 16 ) self._faces.append(face)
def update(self, image): """Update the tracked facial features.""" self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) else: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.equalizeHist(image, image) minSize = utils.widthHeightDividedBy(image, 8) print(minSize) faceRects = self._faceClassifier.detectMultiScale( image, self.scaleFactor, self.minNeighbors, self.flags, minSize) print(faceRects) if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect x, y, w, h = faceRect # Seek an eye in the upper-left part of the face. searchRect = (x + w / 7, y, w * 2 / 7, h / 2) face.leftEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # Seek an eye in the upper-right part of the face. searchRect = (x + w * 4 / 7, y, w * 2 / 7, h / 2) face.rightEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # Seek a nose in the middle part of the face. searchRect = (x + w / 4, y + h / 4, w / 2, h / 2) face.noseRect = self._detectOneObject(self._noseClassifier, image, searchRect, 32) # Seek a mouth in the lower-middle part of the face. searchRect = (x + w / 6, y + h * 2 / 3, w * 2 / 3, h / 3) face.mouthRect = self._detectOneObject(self._mouthClassifier, image, searchRect, 16) self._faces.append(face)
def update(self, image): self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) else: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.equalizeHist(image, image) minSize = utils.widthHeightDividedBy(image, 8) minSize = (int(minSize[0]), int(minSize[1])) faceRects = self._faceClassifier.detectMultiScale( image, self.scaleFactor, self.minNeighbors, self.flags, minSize) if faceRects is not None: for faceRect in faceRects: face = Face() face.faceRect = faceRect x, y, w, h = faceRect #buscar o olho e atualizar o lado esquerdo da face searchRect = (x + w / 7, y, w * 2 / 7, h / 2) face.leftEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) #buscar o olho e atualizar o lado direito da face searchRect = (x + w * 4 / 7, y, w * 2 / 7, h / 2) face.rightEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) #buscar nariz searchRect = (x + w / 4, y + h / 4, w / 2, h / 2) face.noseRect = self._detectOneObject(self._noseClassifier, image, searchRect, 32) #buscar boca searchRect = (x + w / 6, y + h * 2 / 3, w * 2 / 3, h / 3) face.mouthRect = self._detectOneObject(self._mouthClassifier, image, searchRect, 16) self._faces.append(face)
def update(self, image): """ :param image: :return: """ self._faces = [] if utils.isGray(image): image = cv2.equalizeHist(image) # The algorithm normalizes the brightness and increases the contrast of the image. # 明るさをノーマライズしコントラストを強める else: # グレースケール画像に変換する image = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY) image = cv2.equalizeHist(image) # まず顔を検出する # 画像の1/8 * 1/8 = 1/64より小さい顔は認識しない minSize = utils.widthHeightDividedBy(image, 8) # => (w, h) faceRects = self._faceClassifier.detectMultiScale( image, self.scaleFactor, self.minNeighbors, self.flags, minSize) """:type : list[tuple]""" # => (x, y, w, h) # # void CascadeClassifier::detectMultiScale( # const Mat& image, vector<Rect>& objects, double scaleFactor=1.1, int minNeighbors=3, # int flags=0, Size minSize=Size() # ) # 入力画像中から異なるサイズのオブジェクトを検出します.検出されたオブジェクトは,矩形のリストとして返されます. # # パラメタ: # image – CV_8U 型の行列.ここに格納されている画像中からオブジェクトが検出されます # objects – 矩形を要素とするベクトル.それぞれの矩形は,検出したオブジェクトを含みます # scaleFactor – 各画像スケールにおける縮小量を表します # minNeighbors – オブジェクト候補となる矩形は,最低でもこの数だけの近傍矩形を含む必要があります # flags – このパラメータは,新しいカスケードでは利用されません. # 古いカスケードに対しては,cvHaarDetectObjects 関数の場合と同じ意味を持ちます # minSize – オブジェクトが取り得る最小サイズ.これよりも小さいオブジェクトは無視されます # 顔を検出できたら・・・ if faceRects is not None: # 顔ごとに・・・ for faceRect in faceRects: # faceオブジェクトをつくる face = Face() face.faceRect = faceRect x, y, w, h = faceRect # 左目を探す searchRect = (x + w / 7, y, w * 2 / 7, h / 2) # □■■□□□□ # □■■□□□□ # □□□□□□□ # □□□□□□□ face.leftEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # 1/64 * 1/64より小さいオブジェクトを無視する # if face.leftEyeRect == None: # print("Left Eye detect failed") # 右目を探す searchRect = (x + w * 4 / 7, y, w * 2 / 7, h / 2) # □□□□■■□ # □□□□■■□ # □□□□□□□ # □□□□□□□ face.rightEyeRect = self._detectOneObject( self._eyeClassifier, image, searchRect, 64) # 鼻を探す searchRect = (x + w / 4, y + h / 4, w / 2, h / 2) # □□□□□□□ # □□■■■□□ # □□■■■□□ # □□□□□□□ face.noseRect = self._detectOneObject(self._noseClassifier, image, searchRect, 32) # 1/32 * 1/32より小さいオブジェクトを無視する # 口を探す searchRect = (x + w / 6, y + h * 2 / 3, w * 2 / 3, h / 3) # □□□□□□□ # □□□□□□□ # □■■■■■□ # □■■■■■□ face.mouthRect = self._detectOneObject(self._mouthClassifier, image, searchRect, 16) # 1/16 * 1/16より小さいオブジェクトを無視する self._faces.append(face)