Exemple #1
0
    def __init__(self):
        """
        Class constructor.
        """

        self._faceDet = FaceDetector()
        '''
        The instance of the face detector.
        '''

        self._bank = GaborBank()
        '''
        The instance of the bank of Gabor filters.
        '''

        self._emotionsDet = EmotionsDetector()
        '''
        The instance of the emotions detector.
        '''

        self._face = FaceData()
        '''
        Data of the last face detected.
        '''

        self._emotions = OrderedDict()
        '''
    def __dataloader(self, train):
        # init data generators
        transform = transforms.Compose([
            transforms.ToTensor(),
            #    transforms.Normalize((0.1307,), (0.3081,))
        ])
        # dataset = CustomDataset(transform=transform)
        dataset = FaceData(root='dataset', train=train, transform=transform)

        if not train:
            device = torch.device(
                "cuda:0" if torch.cuda.is_available() else "cpu")
            self.support_set = torch.from_numpy(
                dataset.support_set).float().to(device)

        # when using multi-node (ddp) we need to add the  datasampler
        train_sampler = None
        batch_size = self.hparams.batch_size

        if self.use_ddp:
            train_sampler = DistributedSampler(dataset)

        should_shuffle = train_sampler is None
        loader = DataLoader(dataset=dataset,
                            batch_size=batch_size,
                            shuffle=should_shuffle,
                            sampler=train_sampler,
                            num_workers=0,
                            drop_last=True)

        return loader
Exemple #3
0
    def detect(self, image, downSampleRatio=None):
        if FaceDetector._detector is None or FaceDetector._predictor is None:
            FaceDetector._detector = dlib.get_frontal_face_detector()

            faceModel = os.path.abspath('D:\\workspace\\emotional-analysis\\models\\face_model.dat' \
                            .format(os.path.dirname(__file__)))
            FaceDetector._predictor = dlib.shape_predictor(faceModel)

        if downSampleRatio is not None:
            detImage = cv2.resize(image, (0, 0),
                                  fx=1.0 / downSampleRatio,
                                  fy=1.0 / downSampleRatio)
        else:
            detImage = image

# 이미지에서 얼굴을 감지
        detectedFaces = FaceDetector._detector(detImage, 1)
        if len(detectedFaces) == 0:
            return False, None

# 많은 얼굴이 발견 되더라도 첫 번째 얼굴만 고려
        region = detectedFaces[0]

        if downSampleRatio is not None:
            region = dlib.rectangle(region.left() * downSampleRatio,
                                    region.top() * downSampleRatio,
                                    region.right() * downSampleRatio,
                                    region.bottom() * downSampleRatio)

# 얼굴 영역에 모양 모델을 맞추어 위치를 예측
        faceShape = FaceDetector._predictor(image, region)
        face = FaceData()

        face.landmarks = np.array([[p.x, p.y] for p in faceShape.parts()])

        margin = 10
        x, y, w, h = cv2.boundingRect(face.landmarks)
        face.region = (max(x - margin,
                           0), max(y - margin,
                                   0), min(x + w + margin, image.shape[1] - 1),
                       min(y + h + margin, image.shape[0] - 1))

        return True, face
Exemple #4
0
    def __init__(self):

        # 얼굴 검출기 인스턴스
        self._faceDet = FaceDetector()

        # Gabor 인스턴스
        self._bank = GaborBank()

        # 감정 탐지기
        self._emotionsDet = EmotionsDetector()

        # 검출 된 마지막 얼굴 데이터
        self._face = FaceData()

        # 감지 된 마지막 검정 데이터
        self._emotions = OrderedDict()
Exemple #5
0
    def detect(self, image, downSampleRatio = None):
        """
        Tries to automatically detect a face in the given image.

        This method uses the face detector/predictor from the dlib package (with
        its default face model) to detect a face region and 68 facial landmarks.
        Even though dlib is able to detect more than one face in the image, for
        the current purposes of the fsdk project only a single face is needed.
        Hence, only the biggest face detected (estimated from the region size)
        is considered.

        Parameters
        ------
        image: numpy.array
            Image data where to search for the face.
        downSampleRatio: float

        Returns
        ------
        result: bool
            Indication on the success or failure of the facial detection.
        face: FaceData
            Instance of the FaceData class with the region and landmarks of the
            detected face, or None if no face was detected.
        """

        #####################
        # Setup the detector
        #####################

        # Initialize the static detector and predictor if this is first use
        if FaceDetector._detector is None or FaceDetector._predictor is None:
            FaceDetector._detector = dlib.get_frontal_face_detector()

            faceModel = os.path.abspath('{}/models/face_model.dat' \
                            .format(os.path.dirname(__file__)))
            FaceDetector._predictor = dlib.shape_predictor(faceModel)

        #####################
        # Performance cues
        #####################

        # If requested, scale down the original image in order to improve
        # performance in the initial face detection
        if downSampleRatio is not None:
            detImage = cv2.resize(image, (0, 0), fx=1.0 / downSampleRatio,
                                                 fy=1.0 / downSampleRatio)
        else:
            detImage = image

        #####################
        # Face detection
        #####################

        # Detect faces in the image
        detectedFaces = FaceDetector._detector(detImage, 1)
        if len(detectedFaces) == 0:
            return False, None

        # No matter how many faces have been found, consider only the first one
        region = detectedFaces[0]

        # If downscaling was requested, scale back the detected region so the
        # landmarks can be proper located on the image in full resolution
        if downSampleRatio is not None:
            region = dlib.rectangle(region.left() * downSampleRatio,
                                    region.top() * downSampleRatio,
                                    region.right() * downSampleRatio,
                                    region.bottom() * downSampleRatio)

        # Fit the shape model over the face region to predict the positions of
        # its facial landmarks
        faceShape = FaceDetector._predictor(image, region)

        #####################
        # Return data
        #####################

        face = FaceData()

        # Update the object data with the predicted landmark positions and
        # their bounding box (with a small margin of 10 pixels)
        face.landmarks = np.array([[p.x, p.y] for p in faceShape.parts()])

        margin = 10
        x, y, w, h = cv2.boundingRect(face.landmarks)
        face.region = (
                       max(x - margin, 0),
                       max(y - margin, 0),
                       min(x + w + margin, image.shape[1] - 1),
                       min(y + h + margin, image.shape[0] - 1)
                      )

        return True, face