Пример #1
0
    def __get_video__(self):
        file_dir = filedialog.askopenfilename()
        filetype = os.path.splitext(file_dir)[1]

        if len(file_dir) > 0:
            if filetype in ['.mov']:
                model = load_model("ModelandPickle/AlexnetRP.model")
                # Open the computer web cam.
                cap = cv2.VideoCapture(file_dir)

                while True:
                    # Capture the frame from the web camera.
                    ret, frame = cap.read()

                    facedector = Detector()
                    face = facedector.detect_face(frame)
                    for (x, y, w, h) in face:

                        color = (0, 0, 255)
                        stroke = 2
                        cv2.rectangle(frame, (x, y), (x + w, y + h), color, stroke)
                        # Capture the  face from the frame and then test it
                        Capture_face = frame[y: y + h, x + 10: x + w - 10]
                        Capture_face = cv2.resize(Capture_face, (Image_size, Image_size))
                        Capture_face = Capture_face.astype("float")
                        Capture_face = img_to_array(Capture_face)
                        face = np.expand_dims(Capture_face, axis=0)
                        preds = model.predict(face)[0]
                        j = np.argmax(preds)
                        result = int(j)
                        label = image_label[result]
                        if (label == 'real'):
                            label = "{}".format(label)
                            cv2.putText(frame, label, (x, y - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                            cv2.rectangle(frame, (x, y), (x + w, y + h),
                                          (0, 255, 0), 2)
                        elif (label == 'fake'):
                            label = "{}".format(label)
                            cv2.putText(frame, label, (x, y - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                            cv2.rectangle(frame, (x, y), (x + w, y + h),
                                          (0, 0, 255), 2)

                    frame = cv2.putText(frame, 'Press Q to exit', (0, 10), cv2.FONT_HERSHEY_COMPLEX, 1.2,
                                        (255, 255, 255), 1)
                    # Display the real time frame.
                    cv2.imshow('Face Detection', frame)

                    # If user type Q from the keyboard the loop could be break.
                    if cv2.waitKey(20) & 0xFF == ord('q'):
                        break

                # Release the resource and destroy all windows.
                cap.release()
                cv2.destroyAllWindows()

            else:
                messagebox.showerror('error',
                                     message='The file type is wrong, please upload .mov file')
Пример #2
0
    def __Real_time__(self):
        # Loading the Realtime liveness test model...
        model = load_model("ModelandPickle/Realtimev2.model")
        # le = pickle.loads(open('Userpickle2.pickle', "rb").read())

        # Open the computer web cam.
        cap = cv2.VideoCapture(0)

        while True:
            # Capture the frame from the web camera.
            ret, frame = cap.read()

            facedector = Detector()
            face = facedector.detect_face(frame)
            for (x, y, w, h) in face:

                color = (0, 0, 255)
                stroke = 2
                cv2.rectangle(frame, (x, y), (x + w, y + h), color, stroke)
                # Capture the  face from the frame and then test it
                Capture_face = frame[y + 10: y + h - 10, x + 10: x + w - 10]
                Capture_face = cv2.resize(Capture_face, (Image_size, Image_size))
                Capture_face = Capture_face.astype("float")
                Capture_face = img_to_array(Capture_face)
                face = np.expand_dims(Capture_face, axis=0)
                preds = model.predict(face)[0]
                j = np.argmax(preds)
                result = int(j)
                label = image_label[result]
                if (label == 'real'):
                    label = "{}: {:.3f}".format(label, preds[j])
                    cv2.putText(frame, label, (x, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.rectangle(frame, (x, y), (x + w, y + h),
                                  (0, 255, 0), 2)
                elif (label == 'fake'):
                    label = "{}: {:.3f}".format(label, preds[j])
                    cv2.putText(frame, label, (x, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                    cv2.rectangle(frame, (x, y), (x + w, y + h),
                                  (0, 0, 255), 2)

            frame = cv2.putText(frame, 'Press Q to exit', (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1.2, (255, 255, 255), 2)
            # Display the real time frame.
            cv2.imshow('Face Detection', frame)

            # If user type Q from the keyboard the loop could be break.
            if cv2.waitKey(20) & 0xFF == ord('q'):
                break

        # Release the resource and destroy all windows.
        cap.release()
        cv2.destroyAllWindows()
Пример #3
0
    def __face_detect__(self):
        global panelA
        global panelB
        if panelA is None:
            messagebox.showerror('error', message='No image has been upload..')
        if self.image is None:
            messagebox.showerror('error', message='No image has been upload..')
        else:
            facedector = Detector()
            # print(self.image)
            face = facedector.detect_face(self.image)
            if len(face) > 0:
                for (x, y, w, h) in face:
                    color = (0, 0, 255)
                    stroke = 2
                    # cv2.rectangle(self.image, (x, y), (x + w, y + h), color, stroke)
                    image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
                    # Get the image dimensions
                    height, width, no_channels = image.shape
                    image = PIL.Image.fromarray(image)
                    image = PIL.ImageTk.PhotoImage(image)
                    panelA.configure(image=image)
                    panelA.image = image

                    self.Crop_image = self.image[y: y + h, x: x + w]
                    Crop_image = cv2.cvtColor(self.Crop_image, cv2.COLOR_BGR2RGB)
                    Crop_image = PIL.Image.fromarray(Crop_image)
                    Crop_image = PIL.ImageTk.PhotoImage(Crop_image)

                    if panelB is None:
                        face_detect_frame = tkinter.Frame(self.root)
                        face_image_lb = tkinter.Label(face_detect_frame, text="The Crop Image",
                                                        font=('微软雅黑', 20), fg="black")
                        face_image_lb.pack()
                        panelB = tkinter.Label(face_detect_frame, image=Crop_image)
                        panelB.image = Crop_image
                        panelB.pack()
                        # face_detect_frame.pack(side="left", fill='y',padx=10, pady=10)
                        face_detect_frame.pack(anchor='nw',padx=10, pady=10)

                    # otherwise, update the image panels
                    else:
                        # update the pannels
                        panelB.configure(image=Crop_image)
                        panelB.image = Crop_image

            else:
                messagebox.showerror('error', message='No face has been detected..')
def Getface(Filename,Video_Dir,FramNum,Save_Dir):
    count = 0
    for filenames in Filename:
        for filename in filenames:
            Videoname= Video_Dir+'/'+filename

            vc = cv2.VideoCapture(Videoname)
            flag, frame = vc.read()
            # Calculating total frames
            frame_count = 0
            while (flag):
                ret, frame = vc.read()
                if ret is False:
                    break
                frame_count = frame_count + 1
            vc.release()

            gap = frame_count // FramNum
            c = 1

            vc = cv2.VideoCapture(Videoname)
            flag, frame = vc.read()
            while (flag):
                flag, frame = vc.read()
                if (flag == 0):
                    break
                if (c % gap == 0):
                    facedector = Detector()
                    # print(self.image)
                    face = facedector.detect_face(frame)
                    if len(face) > 0:
                        for (x, y, w, h) in face:
                            Crop_face = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                            Crop_image = Crop_face[y: y + h, x: x + w]
                            Crop_image = cv2.cvtColor(Crop_image, cv2.COLOR_BGR2RGB)
                            Crop_image = cv2.resize(Crop_image,(IMG_SIZE, IMG_SIZE))
                            cv2.imwrite(Save_Dir + '/' + str(count) + '.jpg', Crop_image)
                            print("image saving....")
                            count = count + 1
                c = c + 1
            cv2.waitKey(1)