コード例 #1
0
ファイル: processarVideo.py プロジェクト: fleuryz/Focus
class VideoData:
    """
    Helper class to present the detected face region, landmarks and emotions.
    """

    #-----------------------------------------
    def __init__(self):
        """
        Class constructor.
        """

        self._faceDet = FaceDetector()
        '''
        The instance of the face detector.
        '''

        self._bank = GaborBank()
        '''
        The instance of the bank of Gabor filters.
        '''

        self._emotionsDet = EmotionsDetector()
        '''
        The instance of the emotions detector.
        '''

        self._face = FaceData()
        '''
        Data of the last face detected.
        '''

        self._emotions = OrderedDict()
        '''
        Data of the last emotions detected.
        '''

    #-----------------------------------------
    def detect(self, frame):
        """
        Detects a face and the prototypic emotions on the given frame image.

        Parameters
        ----------
        frame: numpy.ndarray
            Image where to perform the detections from.

        Returns
        -------
        ret: bool
            Indication of success or failure.
        """

        ret, face = self._faceDet.detect(frame)
        if ret:
            self._face = face

            # Crop just the face region
            frame, face = face.crop(frame)

            # Filter it with the Gabor bank
            responses = self._bank.filter(frame)

            # Detect the prototypic emotions based on the filter responses
            self._emotions = self._emotionsDet.detect(face, responses)

            return True
        else:
            self._face = None
            return False

    #---------------------------------------------
    def imprimir_tempo(self, tempo, frame, lab, val, fps):

        [ano, mes, dia, hora, minuto, segundo, nano] = tempo.split('/')

        nano = int(nano)
        segundo = int(segundo)
        minuto = int(minuto)
        hora = int(hora)
        dia = int(dia)
        mes = int(mes)
        ano = int(ano)

        tempoPassado = frame * int(1000000000 / fps)

        nano += tempoPassado % 1000000000

        tempoPassado = int(tempoPassado / 1000000000)
        if nano >= 1000000000:
            nano -= 1000000000
            tempoPassado += 1

        segundo += tempoPassado % 60
        tempoPassado = int(tempoPassado / 60)
        if segundo >= 60:
            segundo -= 60
            tempoPassado += 1

        minuto += tempoPassado % 60
        tempoPassado = int(tempoPassado / 60)
        if minuto >= 60:
            minuto -= 60
            tempoPassado += 1

        hora += tempoPassado % 60
        tempoPassado = int(tempoPassado / 24)
        if hora >= 24:
            hora -= 24
            tempoPassado += 1

        dia += tempoPassado % 24
        if hora >= 24:
            hora -= 24

        saida = str(ano) + '/' + str(mes) + '/' + str(dia) + '/' + str(
            hora) + '/' + str(minuto) + '/' + str(segundo) + '/' + str(
                nano) + '-' + lab + '-' + val

        print(saida)

    #-----------------------------------------
    def draw(self, frame, tempo, frameNum, fps):
        """
        Draws the detected data of the given frame image.

        Parameters
        ----------
        frame: numpy.ndarray
            Image where to draw the information to.
        """

        empty = True

        try:
            face = self._face
            empty = face.isEmpty()
            face.draw(frame)
        except:
            pass

        # Plot the emotion probabilities
        try:
            emotions = self._emotions
            if empty:
                labels = []
                values = []
            else:

                labels = list(emotions.keys())
                values = list(emotions.values())
                bigger = labels[values.index(max(values))]

            for l, v in zip(labels, values):
                lab = '{}'.format(l)
                val = '{:.2f}'.format(v)

                self.imprimir_tempo(tempo, frameNum, lab, val, fps)

        except Exception as e:
            print(e)
            pass
コード例 #2
0
class VideoData:
    """
    Helper class to present the detected face region, landmarks and emotions.
    """

    #-----------------------------------------
    def __init__(self):
        """
        Class constructor.
        """

        self._faceDet = FaceDetector()
        '''
        The instance of the face detector.
        '''

        self._bank = GaborBank()
        '''
        The instance of the bank of Gabor filters.
        '''

        self._emotionsDet = EmotionsDetector()
        '''
        The instance of the emotions detector.
        '''

        self._face = FaceData()
        '''
        Data of the last face detected.
        '''

        self._emotions = OrderedDict()
        '''
        Data of the last emotions detected.
        '''

    #-----------------------------------------
    def detect(self, frame):
        """
        Detects a face and the prototypic emotions on the given frame image.

        Parameters
        ----------
        frame: numpy.ndarray
            Image where to perform the detections from.

        Returns
        -------
        ret: bool
            Indication of success or failure.
        """

        ret, face = self._faceDet.detect(frame)
        if ret:
            self._face = face

            # Crop just the face region
            frame, face = face.crop(frame)

            # Filter it with the Gabor bank
            responses = self._bank.filter(frame)

            # Detect the prototypic emotions based on the filter responses
            self._emotions = self._emotionsDet.detect(face, responses)

            return self._emotions
        else:
            self._face = None
            return False

    #-----------------------------------------
    def draw(self, frame):
        """
        Draws the detected data of the given frame image.

        Parameters
        ----------
        frame: numpy.ndarray
            Image where to draw the information to.
        """
        # Font settings
        font = cv2.FONT_HERSHEY_SIMPLEX
        scale = 0.5
        thick = 1
        glow = 3 * thick

        # Color settings
        black = (0, 0, 0)
        white = (255, 255, 255)
        yellow = (0, 255, 255)
        red = (0, 0, 255)

        empty = True

        # Plot the face landmarks and face distance
        x = 5
        y = 0
        w = int(frame.shape[1] * 0.2)

        try:
            face = self._face
            empty = face.isEmpty()
            #face.draw(frame)
        except:
            traceback.print_exc()
            pass

        # Plot the emotion probabilities
        try:
            emotions = self._emotions
            if empty:
                labels = []
                values = []
            else:
                labels = list(emotions.keys())
                values = list(emotions.values())
                bigger = labels[values.index(max(values))]

                # Draw the header
                text = 'emotions'
                size, _ = cv2.getTextSize(text, font, scale, thick)
                y += size[1] + 20

                cv2.putText(frame, text, (x, y), font, scale, black, glow)
                cv2.putText(frame, text, (x, y), font, scale, yellow, thick)

                y += 5
                cv2.line(frame, (x, y), (x + w, y), black, 1)

            size, _ = cv2.getTextSize('happiness', font, scale, thick)
            t = size[0] + 20
            w = 150
            h = size[1]
            for l, v in zip(labels, values):
                lab = '{}:'.format(l)
                val = '{:.2f}'.format(v)
                size, _ = cv2.getTextSize(l, font, scale, thick)

                # Set a red color for the emotion with bigger probability
                color = red if l == bigger else yellow

                y += size[1] + 15

                p1 = (x + t, y - size[1] - 5)
                p2 = (x + t + w, y - size[1] + h + 5)
                cv2.rectangle(frame, p1, p2, black, 1)

                # Draw the filled rectangle proportional to the probability
                p2 = (p1[0] + int((p2[0] - p1[0]) * v), p2[1])
                cv2.rectangle(frame, p1, p2, color, -1)
                cv2.rectangle(frame, p1, p2, black, 1)

                # Draw the emotion label
                cv2.putText(frame, lab, (x, y), font, scale, black, glow)
                cv2.putText(frame, lab, (x, y), font, scale, color, thick)

                # Draw the value of the emotion probability
                cv2.putText(frame, val, (x + t + 5, y), font, scale, black,
                            glow)
                cv2.putText(frame, val, (x + t + 5, y), font, scale, white,
                            thick)
        except Exception as e:
            print(e)
            pass
コード例 #3
0
    if args.fov:
        fov = args.fov
        f = None
    elif args.f:
        fov = None
        f = args.f
    else:
        print("Please, specify FOV or focal length!")
        parser.print_help()
        exit(2)

    step = args.step

    if args.detect:
        print("Will use face detector")
        detector = FaceDetector()
        faces = detector.find_faces(src)
        masks = get_mask_from_face_detect(src, faces)
    elif args.maskrcnn:
        print("Will use Mask R-CNN")
        from segmentation import PersonSegmentation
        segm = PersonSegmentation()
        segm.detect(src)
        masks = segm.person_masks
    elif args.mask:
        print(f"Will use user-defined mask {args.mask}")
        masks = get_mask_from_input(args.mask)
    else:
        print("Please, specify way of detection!")
        parser.print_help()
        exit(3)
コード例 #4
0
sys.path.append('../godofeye/lib/yoloface')

from blueeyes.face_recognition import FaceDetector, FaceRecognition
from blueeyes.utils import Camera

cap = Camera(
    source='rtsp://*****:*****@10.10.46.224:554/Streaming/Channels/101',
    frameskip=5)
cap.start()

# Configurations
FRAME_COUNT_TO_DECIDE = 5

HOME = os.environ['HOME']
# detector = FaceDetector('mtcnn', min_face_size=60)
detector = FaceDetector('yolo', model_img_size=(128, 128))
# detector = YOLO()
recog = FaceRecognition(
    # model_dir='/home/huy/code/godofeye/models',
    model_dir=f'{HOME}/Downloads',
    dataset=f'{HOME}/output',
    vggface=False,
    use_knn=True
    # retrain=False
)


def process_id(result_id):
    print(result_id)

コード例 #5
0
ファイル: exportarVideo.py プロジェクト: fleuryz/Focus
class VideoData:
    """
    Helper class to present the detected face region, landmarks and emotions.
    """

    #-----------------------------------------
    def __init__(self):
        """
        Class constructor.
        """

        self._faceDet = FaceDetector()
        '''
        The instance of the face detector.
        '''

        self._bank = GaborBank()
        '''
        The instance of the bank of Gabor filters.
        '''

        self._emotionsDet = EmotionsDetector()
        '''
        The instance of the emotions detector.
        '''

        self._face = FaceData()
        '''
        Data of the last face detected.
        '''

        self._emotions = OrderedDict()
        '''
        Data of the last emotions detected.
        '''

    #-----------------------------------------
    def detect(self, frame):
        """
        Detects a face and the prototypic emotions on the given frame image.

        Parameters
        ----------
        frame: numpy.ndarray
            Image where to perform the detections from.

        Returns
        -------
        ret: bool
            Indication of success or failure.
        """

        ret, face = self._faceDet.detect(frame)
        if ret:
            self._face = face

            # Crop just the face region
            frame, face = face.crop(frame)

            # Filter it with the Gabor bank
            responses = self._bank.filter(frame)

            # Detect the prototypic emotions based on the filter responses
            self._emotions = self._emotionsDet.detect(face, responses)

            return True
        else:
            self._face = None
            return False

    #---------------------------------------------
    def imprimir_tempo(self, tempo, frame, lab, val, fps):

        [ano, mes, dia, hora, minuto, segundo, nano] = tempo.split('/')

        nano = int(nano)
        segundo = int(segundo)
        minuto = int(minuto)
        hora = int(hora)
        dia = int(dia)
        mes = int(mes)
        ano = int(ano)

        tempoPassado = frame * int(1000000000 / fps)

        nano += tempoPassado % 1000000000

        tempoPassado = int(tempoPassado / 1000000000)
        if nano >= 1000000000:
            nano -= 1000000000
            tempoPassado += 1

        segundo += tempoPassado % 60
        tempoPassado = int(tempoPassado / 60)
        if segundo >= 60:
            segundo -= 60
            tempoPassado += 1

        minuto += tempoPassado % 60
        tempoPassado = int(tempoPassado / 60)
        if minuto >= 60:
            minuto -= 60
            tempoPassado += 1

        hora += tempoPassado % 60
        tempoPassado = int(tempoPassado / 24)
        if hora >= 24:
            hora -= 24
            tempoPassado += 1

        dia += tempoPassado % 24
        if hora >= 24:
            hora -= 24

        saida = str(ano) + '/' + str(mes) + '/' + str(dia) + '/' + str(
            hora) + '/' + str(minuto) + '/' + str(segundo) + '/' + str(
                nano) + '-' + lab + '-' + val

        print(saida)

    #-----------------------------------------

    def drawFrame(self, frame, labels):
        atual = -1
        preto = (0, 0, 0)
        amarelo = (0, 255, 255)
        soft = TAM_LINHA
        font = cv2.FONT_HERSHEY_SIMPLEX

        cv2.line(frame, (COM_X, FIM_Y), (FIM_X, FIM_Y), preto, soft)
        y = COM_Y
        for l in labels:
            atual += 1
            lab = '{}:'.format(l)

            x = OFFSETLETRA_X
            y = OFFSETLETRA_FRAME - atual * TAM_LET
            #size, _ = cv2.getTextSize(lab, font, 1, soft)
            #maior label tem tamanho de (164,22)
            #print (size)
            cv2.putText(frame, lab, (x, y + OFFSETLETRA_Y), font, 1, amarelo,
                        soft)
            cv2.line(frame, (COM_X, y), (FIM_X, y), preto, soft)

        cv2.line(frame, (DIVISOR_X, y), (DIVISOR_X, FIM_Y), preto, soft)
        cv2.line(frame, (FIM_X, y), (FIM_X, FIM_Y), preto, soft)
        #cv2.line(frame, (600, y), (600,465), cor, soft)

        return frame

    #-----------------------------------------
    def draw(self, frame, tempo, frameNum, vals, fps, processar):
        """
        Draws the detected data of the given frame image.

        Parameters
        ----------
        frame: numpy.ndarray
            Image where to draw the information to.
        """

        amarelo = (0, 255, 255)

        empty = True

        try:
            face = self._face
            empty = face.isEmpty()
        except:
            pass

        # Plot the emotion probabilities
        try:
            emotions = self._emotions
            atual = 0
            labels = [
                'Neutral', 'Felicidade', 'Tristeza', 'Raiva', 'Medo',
                'Surpresa', 'Desgosto'
            ]
            if empty:
                values = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
            else:
                values = list(emotions.values())
                bigger = labels[values.index(max(values))]

            frame = self.drawFrame(frame, labels)
            for l, v in zip(labels, values):
                lab = '{}'.format(l)
                val = '{:.2f}'.format(v)
                vals[atual].rotate(-1)
                vals[atual].pop()
                vals[atual].append(v)
                for i in range(PONTOS - 1):
                    valor1 = int(OFFSETPONTO - vals[atual][i] * RESOL_LINHA_Y -
                                 atual * RESOL_LINHA_Y)
                    valor2 = int(OFFSETPONTO -
                                 vals[atual][i + 1] * RESOL_LINHA_Y -
                                 atual * RESOL_LINHA_Y)
                    cv2.line(frame, (OFFSETLINHA + RESOL_LINHA_X * i, valor1),
                             (OFFSETLINHA + RESOL_LINHA_X * (i + 1), valor2),
                             amarelo, TAM_LINHA)
                #cv2.putText(frame, val, (5, 20 + atual*25), font, 1, yellow, 1)
                #cv2.putText(frame, '{}'.format(vals[atual][199]), (320, 20 + atual*25), font, 1, yellow, 1)
                if processar:
                    self.imprimir_tempo(tempo, frameNum, lab, val, fps)
                atual += 1

            return frame, vals
        except Exception as e:
            print(e)
            pass