def __init__(self, config):
        self.config = configInfo(config)
        self.hyperparameters = self.config["hyperparameters"]
        self.width, self.height, self.channel = self.hyperparameters["size"]
        self.batch_size = self.hyperparameters["batch_size"]
        self.target_size = (self.width, self.height)
        train_dir = self.config["train_dir"]
        validation_dir = self.config["validation_dir"]
        # train_dir = "../dataset/face_liveness_train"
        # validation_dir = "../dataset/face_liveness_validation"

        self.train_datagen = ImageDataGenerator(brightness_range=[0.2, 1.0])

        self.validation_datagen = ImageDataGenerator()

        self.train_generator = self.train_datagen.flow_from_directory(
            train_dir,
            batch_size=self.batch_size,
            target_size=self.target_size,
            color_mode="rgb",
            class_mode='binary',
            shuffle=True)

        self.validation_generator = self.validation_datagen.flow_from_directory(
            validation_dir,
            batch_size=self.batch_size,
            target_size=self.target_size,
            color_mode="rgb",
            class_mode='binary',
            shuffle=False)

        self.labels = self.train_generator.class_indices
Esempio n. 2
0
def face_gatherer(configfile, updated_phone_number):
    #############################################################
    config = configInfo(configfile)
    hyperparameters = config["hyperparameters"]
    width, height, _ = hyperparameters["size"]
    model = load_model(config["best_saved_model"])
    le = config["le"]["classes"]
    ##############################################################

    fv = FaceVerification(configfile)
    known_face_encodings, known_face_names, face_locations, face_encodings, face_names, process_this_frame = fv.face_information(
    )

    filename = updated_phone_number + ".mp4"
    filepath = os.path.join("video", "Register", filename)
    video_capture = cv2.VideoCapture(filepath)

    w, h = video_capture.get(3), video_capture.get(4)

    if h == w:
        angle = cv2.ROTATE_90_CLOCKWISE
    else:
        angle = cv2.ROTATE_90_COUNTERCLOCKWISE

    if updated_phone_number not in os.listdir("image"):
        os.mkdir(os.path.join("image", updated_phone_number))

    i = 0

    while True:
        ret, frame = video_capture.read()
        frame = cv2.rotate(frame, angle)
        if ret == False:
            break

        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        rgb_small_frame = small_frame[:, :, ::-1]  # BGR을 RGB로 변환

        if process_this_frame:
            face_locations = fr.face_locations(rgb_small_frame)

        process_this_frame = not process_this_frame

        for (top, right, bottom, left) in face_locations:
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            ###############################################
            face = frame[top:bottom, left:right]
            size = face.shape
            face_image = face
            if i % 10 == 0:
                cv2.imwrite(f"image/{updated_phone_number}/test_{i}.jpg",
                            face_image)
        i += 1
Esempio n. 3
0
def test(dataloader, config="config/config.json"):
    config = configInfo(config)
    model = tf.keras.models.load_model(config["trial_saved_model"])

    _, validation_generator = dataloader.data_generator()
    batch_size = dataloader.batch_size
    target_names = list(dataloader.labels.keys())

    y_pred = model.predict(validation_generator,
                           validation_generator.samples // batch_size + 1)
    y_pred = np.argmax(y_pred, axis=1)

    print('Classification Report')
    print(classification_report(validation_generator.classes, y_pred))
Esempio n. 4
0
def gather_example(config, saved_video_path, image_save_path, skip=10):
    config = configInfo(config)

    video_capture = cv2.VideoCapture(saved_video_path)
    dirname = os.path.basename(saved_video_path).split(".")[0]

    if dirname not in os.listdir(image_save_path):
        os.mkdir(f"{image_save_path}/{dirname}")

    i = 0
    process_this_frame = True

    while True:
        ret, frame = video_capture.read()
        if ret == False:
            break

        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        rgb_small_frame = small_frame[:, :, ::-1]  # BGR을 RGB로 변환

        if process_this_frame:
            face_locations = fr.face_locations(
                rgb_small_frame
            )  # Returns an array of bounding boxes of human faces in a image
            face_encodings = fr.face_encodings(
                rgb_small_frame, face_locations
            )  # Given an image, return the 128-dimension face encoding for each face in the image.

        for (top, right, bottom, left) in face_locations:
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            face = frame[top:bottom, left:right]
            if i % skip == 0:
                cv2.imwrite(f"{image_save_path}/{dirname}/{i}.jpg", face)
                print(f"{dirname}_{i}.jpg saved")

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        else:
            i += 1

    video_capture.release()
    cv2.destroyAllWindows()

    return face_encodings
Esempio n. 5
0
    def __init__(self, dataloader, model, config):

        self.dataloader = dataloader
        self.model = model
        self.config = configInfo(config)
        self.hyperparameters = self.config["hyperparameters"]
        self.batch_size = self.hyperparameters["batch_size"]
        self.epochs = self.hyperparameters["epochs"]
        self.INIT_LR = self.hyperparameters["learning_rate"]
        self.optimizer = tf.keras.optimizers.Adam(lr=self.INIT_LR,
                                                  decay=self.INIT_LR /
                                                  self.epochs)

        self.train_generator, self.validation_generator = self.dataloader.data_generator(
        )

        self.step_size_train = self.train_generator.n // self.train_generator.batch_size
        self.step_size_validation = self.validation_generator.samples // self.validation_generator.batch_size
def download_from_firebase(phonenumber,
                           firebase_dir="videos",
                           firebase_path="2020-09-22 17:24:330102345679",
                           config="../config/config.json"):
    config = configInfo(config)

    firebase = pyrebase.initialize_app(config["firebase_config"])
    storage = firebase.storage()
    db = firebase.database()
    company = db.child("UserList").get()

    bdict = company.val()

    download_dir = config["video_save_path"]

    for i in bdict.keys():
        print(bdict[i])
        if i == phonenumber:
            download_path = os.path.join(download_dir, f"{i}.mp4")
            storage.child(os.path.join(firebase_dir,
                                       firebase_path)).download(download_path)
 def __init__(self, config):
     self.config = configInfo(config)
     self.hyperparameters = self.config["hyperparameters"]
     self.width, self.height, self.depth = self.hyperparameters["size"]
     self.classes = self.config["le"]["num_classes"]
Esempio n. 8
0
def main():
    #############################################################
    config = configInfo("config/config.json")
    hyperparameters = config["hyperparameters"]
    width, height, _ = hyperparameters["size"]
    model = load_model(config["best_saved_model"])
    le = config["le"]["classes"]
    ##############################################################

    fv = FaceVerification("config/config.json")
    known_face_encodings, known_face_names, face_locations, face_encodings, face_names, process_this_frame = fv.face_information(
    )

    video_capture = cv2.VideoCapture(config["video2read"])
    w, h = video_capture.get(3), video_capture.get(4)

    if h == w:
        angle = cv2.ROTATE_90_CLOCKWISE
    else:
        angle = cv2.ROTATE_90_COUNTERCLOCKWISE

    # save test images per frame
    now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
    os.mkdir(os.path.join("image", "frame", now))
    logger = resultLogger(
        os.path.join(config["logpath"], ("logs_" + now)) + ".log")
    i = 0

    while True:
        ret, frame = video_capture.read()

        if ret == False:
            break

        frame = cv2.rotate(frame, angle)

        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        rgb_small_frame = small_frame[:, :, ::-1]  # BGR을 RGB로 변환

        if process_this_frame:
            face_locations = fr.face_locations(
                rgb_small_frame
            )  # Returns an array of bounding boxes of human faces in a image
            face_encodings = fr.face_encodings(
                rgb_small_frame, face_locations
            )  # Given an image, return the 128-dimension face encoding for each face in the image.

            face_names = []
            for face_encoding in face_encodings:
                matches = fr.compare_faces(
                    known_face_encodings, face_encoding, tolerance=0.43
                )  # Compare a list of face encodings against a candidate encoding to see if they match.
                # tolerance: How much distance between faces to consider it a match. Lower is more strict. 0.6 is typical best performance.
                name = "Unknown"

                # Given a list of face encodings, compare them to a known face encoding and get a euclidean distance
                # for each comparison face. The distance tells you how similar the faces are.
                face_distances = fr.face_distance(known_face_encodings,
                                                  face_encoding)
                best_match_index = np.argmin(face_distances)

                if matches[best_match_index]:
                    name = known_face_names[best_match_index]

                face_names.append(name)

        process_this_frame = not process_this_frame

        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            ###############################################
            face = frame[top:bottom, left:right]
            size = face.shape
            face = cv2.resize(face, (width, height))
            # cv2.imwrite("test.jpg", face)
            # face = face.astype("float") / 255.0
            face = img_to_array(face)
            face = np.expand_dims(face, axis=0)

            preds = model.predict(face)[0]
            j = np.argmax(preds)
            label = le[j]
            ###############################################

            if name != "Unknown" and label != "fake":
                rectcolor = (0, 255, 0)
            else:
                rectcolor = (0, 0, 255)

            cv2.rectangle(frame, (left, top), (right, bottom), rectcolor, 2)
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          rectcolor, cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, f"{name}/{label}", (left + 6, bottom - 6), font,
                        1.0, (255, 255, 255), 1)
            cv2.imwrite(
                f"image/frame/{now}/test_{i}_{name}_{label}_{max(preds)}.jpg",
                frame)
            logger.info(
                f"{name} {label} {max(preds)} {size[0]} {size[1]} {size[2]}")

        cv2.imshow('Video', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        else:
            i += 1

    video_capture.release()
    cv2.destroyAllWindows()
Esempio n. 9
0
 def __init__(self, config):
     self.config = configInfo(config)
Esempio n. 10
0
def face_liveness_detector(updated_phone_number):
    #############################################################
    config = configInfo("config/config.json")
    hyperparameters = config["hyperparameters"]
    width, height, _ = hyperparameters["size"]
    model = load_model(config["best_saved_model"])
    le = config["le"]["classes"]
    ##############################################################

    fv = FaceVerification("config/config.json")
    known_face_encodings, known_face_names, face_locations, face_encodings, face_names, process_this_frame = fv.face_information(
    )

    filename = updated_phone_number + ".mp4"
    filepath = os.path.join("video", "Login", filename)
    video_capture = cv2.VideoCapture(filepath)
    w, h = video_capture.get(3), video_capture.get(4)

    if h == w:
        angle = cv2.ROTATE_90_CLOCKWISE
    else:
        angle = cv2.ROTATE_90_COUNTERCLOCKWISE

    # save test images per frame
    now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
    # os.mkdir(os.path.join("image", "frame", now))
    logger = resultLogger(
        os.path.join(config["logpath"], ("logs_" + now)) + ".log")
    i = 0

    while True:
        ret, frame = video_capture.read()
        # frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
        frame = cv2.rotate(frame, angle)
        if ret == False:
            break

        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        rgb_small_frame = small_frame[:, :, ::-1]  # BGR을 RGB로 변환

        if process_this_frame:
            face_locations = fr.face_locations(
                rgb_small_frame
            )  # Returns an array of bounding boxes of human faces in a image
            face_encodings = fr.face_encodings(
                rgb_small_frame, face_locations
            )  # Given an image, return the 128-dimension face encoding for each face in the image.

            face_names = []
            for face_encoding in face_encodings:
                matches = fr.compare_faces(
                    known_face_encodings, face_encoding, tolerance=0.43
                )  # Compare a list of face encodings against a candidate encoding to see if they match.
                # tolerance: How much distance between faces to consider it a match. Lower is more strict. 0.6 is typical best performance.
                name = "Unknown"

                # Given a list of face encodings, compare them to a known face encoding and get a euclidean distance
                # for each comparison face. The distance tells you how similar the faces are.
                face_distances = fr.face_distance(known_face_encodings,
                                                  face_encoding)
                best_match_index = np.argmin(face_distances)

                if matches[best_match_index]:
                    name = known_face_names[best_match_index]

                face_names.append(name)

        process_this_frame = not process_this_frame

        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            ###############################################
            face = frame[top:bottom, left:right]
            size = face.shape
            face = cv2.resize(face, (width, height))
            # cv2.imwrite("test.jpg", face)
            # face = face.astype("float") / 255.0
            face = img_to_array(face)
            face = np.expand_dims(face, axis=0)

            preds = model.predict(face)[0]
            j = np.argmax(preds)
            label = le[j]
            ###############################################

            logger.info(
                f"{name} {label} {max(preds)} {size[0]} {size[1]} {size[2]}")

    logfile = os.path.join(config["logpath"], ("logs_" + now)) + ".log"

    return logfile, updated_phone_number
Esempio n. 11
0
import pyrebase
from flask import *
import face_recognition as fr
import cv2
from face_verification.face_verification import FaceVerification
from tensorflow.keras.preprocessing.image import img_to_array
from utils.logger import resultLogger
from tensorflow.keras.models import load_model
from datetime import datetime
import os
from utils.utils import configInfo
import numpy as np

config = configInfo("config/config.json")
config = config["firebase_config"]

# firebase config of app initialize
firebase = pyrebase.initialize_app(config)
storage = firebase.storage()
db = firebase.database()


def face_gatherer(configfile, updated_phone_number):
    #############################################################
    config = configInfo(configfile)
    hyperparameters = config["hyperparameters"]
    width, height, _ = hyperparameters["size"]
    model = load_model(config["best_saved_model"])
    le = config["le"]["classes"]
    ##############################################################