Exemple #1
0
    def detection_thread(stop_event, face_queue):

        # Inicializar detector de cara
        face_detector = fd.FaceDetector()

        while not stop_event.is_set():

            # Obtener frame de video
            _, cam_frame = video_capture.read()

            # No se bien por que pero hay que espejar la imagen para que las
            # coordenadas sean las que esperamos
            cam_frame = cv2.flip(cam_frame, 1)

            # Detectar cara
            face_rect = face_detector.detect(cam_frame)

            # Tomar desde la cola para limpiarla en el caso que el thread
            # principal vaya lento y todavía no haya tomado el resultado
            # anterior
            try:
                face_rect = face_queue.get_nowait()
            except queue.Empty:
                pass

            # Colocar nueva detección en la cola
            try:
                face_queue.put_nowait(face_rect)
            except queue.Full:
                # Nunca debería pasar
                print("Error: Queue de detección llena")
 def __init__(self, time):
     self.prev_face = [0,0,0,0]
     self.skin_prev = []
     self.rPPG = []
     self.cropped_img = []
     self.start_time = time
     self.last_time_check = 0
     self.running_time = 0
     self.detect_face = face_detection.FaceDetector()
import cv2
import dlib
import openface
import face_detection

predictor_model = "/home/unknownpgr/anaconda3/envs/opencv/openface/models/dlib/shape_predictor_68_face_landmarks.dat"
face_aligner = openface.AlignDlib(predictor_model)

# Training part
model_path = "/home/unknownpgr/anaconda3/envs/opencv/openface/models/openface/nn4.small2.v1.t7"

with face_detection.FaceDetector(torch_net_model=model_path) as fd:
    fd.append_dir("Obama", "./training_images/obama_aligned")
    fd.append_dir("Trump", "./training_images/trump_aligned")
    fd.append_dir("Unknown", "./training_images/unknowns_aligned")

    fd.train_model()

    fd.save('./face_detector')

# Prediction part

with face_detection.FaceDetector.load('./face_detector') as fd:
    def get_frame_size(capture):
        return capture.get(cv2.CAP_PROP_FRAME_WIDTH), capture.get(cv2.CAP_PROP_FRAME_HEIGHT)


    def draw_caption(img, rect, caption):
        cv2.rectangle(img, (rect.left(), rect.top()), (rect.right(), rect.bottom()), (0, 0, 64))
        cv2.putText(img, caption, (rect.left(), rect.top() - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 32))
Exemple #4
0
    def __init__(self):
        self.face_detector = face_detection.FaceDetector(
            face_detection.DetectionMethods.HAAR)

        QMainWindow.__init__(self)
        self.setObjectName("Face Swaping Application")
        self.resize(1024, 700)
        self._central_widget = QtWidgets.QWidget(self)
        self._central_widget.setObjectName("_central_widget")
        self._selected_icon = None
        self._click_x = 0
        self._click_y = 0
        self.mouse_x = 0
        self.mouse_y = 0
        self._file_path = ""
        self._detection = False
        self._current_method = DetectionMethods.HAAR
        self._current_track_method = TrackingMethods.DLIB

        # video widget
        self._video = QtWidgets.QLabel(self._central_widget)
        self._video.setGeometry(
            QtCore.QRect(self.VIDEO_BOX_X, self.VIDEO_BOX_Y, self.VIDEO_WIDTH,
                         self.VIDEO_HEIGHT))
        self._video.setObjectName("video")
        self._capturing = Capture(self._video, self)

        # video frame rate
        self._timer = QtCore.QTimer()
        self._timer.timeout.connect(self.show_frame)
        self._timer.start(1000 / self.VIDEO_REFRESH_RATE)

        # choose file button
        self._choose_file_button = QtWidgets.QPushButton(self._central_widget)
        self._choose_file_button.setGeometry(
            QtCore.QRect(30, self.BUTTON_Y_POS, 151, 31))
        self._choose_file_button.setObjectName("Input_File")
        self._choose_file_button.clicked.connect(self.get_file)

        # start button
        self._start_button = QtWidgets.QPushButton(self._central_widget)
        self._start_button.setGeometry(
            QtCore.QRect(240, self.BUTTON_Y_POS, 151, 31))
        self._start_button.setObjectName("_push_button_2")
        self._start_button.clicked.connect(self.start_event)
        self._start_button.setCheckable(True)

        # trackers remove button
        self._trackers_button = QtWidgets.QPushButton(self._central_widget)
        self._trackers_button.setGeometry(
            QtCore.QRect(20, 352 + self.MESSAGE_BOX_Y_OFFSET, 151, 20))
        self._trackers_button.setObjectName("Remove_Trackers")
        self._trackers_button.clicked.connect(self.remove_trackers)

        # file input path (text box)
        self._input_box = QtWidgets.QTextEdit(self._central_widget)
        self._input_box.setGeometry(
            QtCore.QRect(20, 100, 381, self.TEXT_BOX_HEIGHT))
        self._input_box.setObjectName("Input File")

        # input file label
        self._input_label = QtWidgets.QLabel(self._central_widget)
        self._input_label.setGeometry(
            QtCore.QRect(20, 70, 381, self.TEXT_BOX_HEIGHT))
        self._input_label.setObjectName("_input_label")
        self._input_label.setText("Input file path:")

        # text label "Choose a method"
        self._method_label = QtWidgets.QLabel(self._central_widget)
        self._method_label.setGeometry(
            QtCore.QRect(20, 130, 381, self.TEXT_BOX_HEIGHT))
        self._method_label.setObjectName("_method_label")
        self._method_label.setText("Choose a method of face detection:")

        # choose detection method
        self._method_box = QtWidgets.QComboBox(self._central_widget)
        self._method_box.setGeometry(
            QtCore.QRect(20, 160, 381, self.TEXT_BOX_HEIGHT))
        self._method_box.setObjectName("_detection_method_box")
        self._method_box.addItems([
            "Haar cascade", "Lbp cascade", "HOG method(dlib)",
            "CNN method(dlib)"
        ])
        self._method_box.currentIndexChanged.connect(self.methods_change)

        # text label "Choose a tracking method"
        self._track_method_label = QtWidgets.QLabel(self._central_widget)
        self._track_method_label.setGeometry(
            QtCore.QRect(20, 190, 381, self.TEXT_BOX_HEIGHT))
        self._track_method_label.setObjectName("_method_label")
        self._track_method_label.setText("Choose a method of face tracking:")

        # choose tracking method
        self._track_method_box = QtWidgets.QComboBox(self._central_widget)
        self._track_method_box.setGeometry(
            QtCore.QRect(20, 220, 381, self.TEXT_BOX_HEIGHT))
        self._track_method_box.setObjectName("_tracking_method_box")
        self._track_method_box.addItems([
            "Dlib: Correlation Tracker", "OpenCV: BOOSTING", "OpenCV: MIL",
            "OpenCV: KCF", "OpenCV: TLD", "OpenCV: MEDIANFLOW",
            "OpenCV: GOTURN"
        ])
        # Disable GOTURN method since there is a bug in OpenCV
        self._track_method_box.model().item(6).setEnabled(False)
        self._track_method_box.currentIndexChanged.connect(
            self.track_method_change)

        # infos about FR
        self._text_browser = QtWidgets.QTextBrowser(self._central_widget)
        self._text_browser.setGeometry(
            QtCore.QRect(20, 220 + self.MESSAGE_BOX_Y_OFFSET, 381, 121))
        self._text_browser.setObjectName("_text_browser")
        self._text_browser.setText("Information about face detection")

        # label infos about process of FD
        self._info_label = QtWidgets.QLabel(self._central_widget)
        self._info_label.setGeometry(
            QtCore.QRect(20, 190 + self.MESSAGE_BOX_Y_OFFSET, 381,
                         self.TEXT_BOX_HEIGHT))
        self._info_label.setObjectName("_info_label")
        self._info_label.setText("Information about face detection:")

        # on/off face swap checkbox
        self._check_box = QtWidgets.QCheckBox(self._central_widget)
        self._check_box.setGeometry(
            QtCore.QRect(280, 350 + self.MESSAGE_BOX_Y_OFFSET, 150,
                         self.TEXT_BOX_HEIGHT))
        self._check_box.setObjectName("_check_box")
        self._check_box.stateChanged.connect(
            lambda: self.check_box_event(self._check_box))

        # Face icons
        pic_face = QPixmap("./bach.jpg")  # default icon
        pic_face = pic_face.scaled(FaceIcon.ICON_SIZE, FaceIcon.ICON_SIZE)

        self._face_slot = []
        for i in range(0, 14):
            if i < 7:
                self._face_slot.append(
                    FaceIcon(self, self._central_widget, 53 + i * 134, 470,
                             pic_face, i, "bach.jpg"))
            else:
                self._face_slot.append(
                    FaceIcon(self, self._central_widget, 53 + (i - 7) * 134,
                             585, pic_face, i, "bach.jpg"))

        # rest ~~~~~
        self.setCentralWidget(self._central_widget)
        self.retranslateUi()
        QtCore.QMetaObject.connectSlotsByName(self)
os.makedirs(in_folder, exist_ok=True)
os.makedirs(faces_folder, exist_ok=True)
os.makedirs(no_faces_folder, exist_ok=True)

# Get the contents of the input directory
images = os.scandir(in_folder)

# Getting the profile and frontal cascades from the cv2 library
profileface_cascade = os.path.join(args.cv_path,
                                   "data\haarcascade_profileface.xml")
frontalface_cascade = os.path.join(args.cv_path,
                                   "data\haarcascade_frontalface_alt.xml")

# Creating two face detectors, one for detecting full frontal faces and one for profiles
# For best result we combine results from both detectors to check for faces
frontal_detector = fd.FaceDetector(frontalface_cascade)
profile_detector = fd.FaceDetector(profileface_cascade)

# Only sort files that are images
print("Identifying images...")
images = [
    image for image in images if image.is_file() and ft.is_image(image.path)
]

if not images:  # Exit the program if no images are found
    print("No Images found in the given folder.")
    exit()

for image in tqdm(images, desc="Sorting pictures"):
    try:
        cv_image = cv.imread(image.path)
Exemple #6
0
def main():

    if os.path.exists('./training-images/model'):
        shutil.rmtree('./training-images/model')
        os.mkdir('./training-images/model')
    else:
        os.mkdir('./training-images/model')

    VideoManager1 = VideoManager(7)
    VideoManager1._extract_images_from_video(
        'sample_video.mp4', './training-images/model'
    )  # FIX ME : short video for face recognition (Don't blur me~~)

    imgNum = 0

    minsize = 20

    caffe_model_path = "./model"

    threshold = [0.6, 0.7, 0.7]
    factor = 0.709

    caffe.set_mode_cpu()
    PNet = caffe.Net(caffe_model_path + "/det1.prototxt",
                     caffe_model_path + "/det1.caffemodel", caffe.TEST)
    RNet = caffe.Net(caffe_model_path + "/det2.prototxt",
                     caffe_model_path + "/det2.caffemodel", caffe.TEST)
    ONet = caffe.Net(caffe_model_path + "/det3.prototxt",
                     caffe_model_path + "/det3.caffemodel", caffe.TEST)

    #Unknowns_align_path = imglist_text('./training-images/Unknowns', 'imglist_Unknowns.txt')
    model_align_path = imglist_text('./training-images/model',
                                    'imglist_model.txt')

    result_images('imglist_model.txt', './training-images/model/',
                  model_align_path, imgNum)
    #result_images('imglist_Unknowns.txt','./training-images/Unknowns/', Unknowns_align_path, imgNum)

    # Loading model
    predictor_model = "./openface/models/dlib/shape_predictor_68_face_landmarks.dat"
    face_aligner = openface.AlignDlib(predictor_model)

    # Training part
    model_path = "./openface/models/openface/nn4.small2.v1.t7"

    with face_detection.FaceDetector(torch_net_model=model_path) as fd:
        fd.append_dir("Unknowns", "./aligned-images/Unknowns_aligned")
        fd.append_dir("model", "./aligned-images/model_aligned")

        fd.train_model()

        fd.save('./face_detector')

    # Prediction part

    with face_detection.FaceDetector.load('./face_detector') as fd:

        cv2.namedWindow('Window')
        cv2.moveWindow('Window', 20, 30)

        cap = cv2.VideoCapture()
        cap.open('test_video.mp4')

        fourcc = cv2.VideoWriter_fourcc(*'DIVX')
        fps, width, height = video_info(
            'test_video.mp4')  # FIX ME : original video for blur
        out = cv2.VideoWriter('output_video_.mp4', fourcc, int(fps),
                              (int(width), int(height)))

        while cap.isOpened():

            ret, frame = cap.read()
            if frame is None:
                break

            frame_matlab = frame.copy()
            tmp = frame_matlab[:, :, 2].copy()
            frame_matlab[:, :, 2] = frame_matlab[:, :, 0]
            frame_matlab[:, :, 0] = tmp

            boundingboxes, points = detect_face(frame_matlab, minsize, PNet,
                                                RNet, ONet, threshold, False,
                                                factor)

            x1 = boundingboxes[:, 0]
            y1 = boundingboxes[:, 1]
            x2 = boundingboxes[:, 2]
            y2 = boundingboxes[:, 3]

            for i in range(x1.shape[0]):
                face_box = frame[(int(y1[i])):(int(y2[i])),
                                 (int(x1[i])):(int(x2[i]))]
                face_rect = dlib.rectangle(left=int(x1[i]),
                                           top=int(y1[i]),
                                           right=int(x2[i]),
                                           bottom=int(y2[i]))

                aligned_face = face_aligner.align(
                    96,
                    frame,
                    face_rect,
                    landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
                label = fd.predict(aligned_face)
                if (label == 'Unknowns'):
                    blur = cv2.medianBlur(face_box, 33)
                    frame[(int(y1[i])):(int(y2[i])),
                          (int(x1[i])):(int(x2[i]))] = blur

            cv2.imshow('Window', frame)
            out.write(frame)

            if cv2.waitKey(1) == ord('q'):
                break

        cap.release()

    cv2.destroyAllWindows()
Exemple #7
0
import argparse
import os.path
import cv2 as cv

if __name__ == "__main__":

    parser = argparse.ArgumentParser(description="Face Detector")
    parser.add_argument(
        '--cv_path',
        help='Path to cv installation.',
        default="C:/Users/Roope/miniconda3/envs/tf/Lib/site-packages/cv2")
    parser.add_argument('--image_path', help='Path to the image', default=None)
    args = parser.parse_args()

    profileface_cascade = os.path.join(args.cv_path,
                                       "data\haarcascade_profileface.xml")
    frontalface_cascade = os.path.join(args.cv_path,
                                       "data\haarcascade_frontalface_alt.xml")

    image = cv.imread(args.image_path)

    if image is None:
        print('--(!)Error loading the image')
        exit(0)

    frontal_detector = fd.FaceDetector(frontalface_cascade)

    cv.namedWindow('Face Detector', flags=cv.WINDOW_NORMAL)

    frontal_detector.detect_and_display(image)