Exemple #1
0
def save_face_encoding(src_path):
    facenet = FaceNet()
    img_files = glob.glob('{0}/**/*.jpg'.format(src_path), recursive=True)

    for img_file in tqdm(img_files):
        em = facenet.embeddings([cv2.imread(img_file)[:, :, ::-1]])
        np.save(img_file.replace('jpg', 'npy'), em[0])
Exemple #2
0
def _main():
    # img_paths = generate_input_paths(dir_path, 138)
    # for ip in img_paths:
    #     faces = process_image(ip)
    with open(sys.argv[1], 'rb') as f:
        (le, model) = pickle.load(f, encoding='latin1')

    embedder = FaceNet()
    directory = sys.argv[2]
    files = sorted(os.listdir(directory))

    for i, filename in enumerate(files):
        path = directory + filename
        boxes = process_image(path)
        img = np.asarray(Image.open(path).convert('RGB'))

        result = []
        for box in boxes:
            x1, y1, width, height = box
            x1, y1 = abs(x1), abs(y1)
            x2, y2 = x1 + width, y1 + height
            face = img[y1:y2, x1:x2]
            face = Image.fromarray(face).resize((IMAGE_SIZE, IMAGE_SIZE))
            face = np.asarray(face)

            rep = embedder.embeddings([face])
            pred = model.predict_proba(rep).ravel()
            maxI = np.argmax(pred)
            confidence = pred[maxI]
            person = le.inverse_transform([maxI])[0]
            result.append('{} ({:.2f})'.format(person, confidence))

        print(i, ', '.join(result))
Exemple #3
0
def trackimage():
    embedder = FaceNet()
    b = []
    cap = cv2.VideoCapture(0)
    while True:
        ret, frame = cap.read()
        img1 = extract_face(frame)
        plt.imshow(frame)
        img1 = np.expand_dims(img1, axis=0)
        if (img1.any()):
            emb = embedder.embeddings(img1)
            emb = np.transpose(emb)
            min_dist = 100
            for key, value in dictq.items():
                dist = np.linalg.norm(emb - value)
                b.append(dist)
                if dist < min_dist:
                    min_dist = dist
                    identity = key
            print(identity)
            if min_dist < 1.0:
                cv2.putText(frame, "Face : " + identity, (100, 100),
                            cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)
                unknown_yes_or_no = 'no'
            else:
                cv2.putText(frame, 'no match', (100, 100),
                            cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)
                unknown_yes_or_no = 'yes'
            cv2.imshow('face', frame)

            if cv2.waitKey(1) & 0xFF == 27:
                break

    cap.release()
    cv2.destroyAllWindows()
    import pyttsx3
    engine = pyttsx3.init()
    if (unknown_yes_or_no == "yes"):
        engine.say("Good morning sorry we couldn't recognise you")
    else:
        str1 = "good morning " + identity + " your attendance has been recorded"
        engine.say(str1)
        import mysql.connector
        mydb = mysql.connector.connect(host="localhost",
                                       user="******",
                                       passwd="albertbolt23",
                                       database="faceproject",
                                       auth_plugin='mysql_native_password')

        mycursor = mydb.cursor()
        from datetime import date
        today1 = date.today()

        sql = "INSERT INTO attendance values('%s','%s','morning')" % (
            identity, str(today1))

        mycursor.execute(sql)
        mydb.commit()

    engine.runAndWait()
Exemple #4
0
def calculate_euclidean_distance(ref_frame, frame):
  """
  128차원 embedding vector를 구하고 유클리드 거리 계산.

  - input
  reference_clip: VideoFileClip
  clip: VideoFileClip

  - output
  euclidean distance between two vector, clip의 마지막 frame return
  """
  # Get image as an numpy array
  #ref_frame = reference_clip.get_frame(reference_clip.reader.nframes/reference_clip.fps)
  #frame = clip.get_frame(clip.reader.nframes/clip.fps)

  # detection
  ref_frame_detected = extract_face_from_frame(ref_frame)
  frame_detected = extract_face_from_frame(frame)

  if (len(ref_frame_detected) == 0 or len(frame_detected) == 0): # 하나라도 얼굴이 감지되지 않는 경우
      return 100

  # feature extraction (embedding)
  ref_frame_detected = ref_frame_detected.reshape((1,)+ref_frame_detected.shape) # 모델 인풋 차원에 맞게 수정
  frame_detected = frame_detected.reshape((1,)+frame_detected.shape)


  embed_model = FaceNet()
  ref_frame_embed = embed_model.embeddings(ref_frame_detected)
  frame_embed = embed_model.embeddings(frame_detected)

  diff = ref_frame_embed - frame_embed
  euclidean_dist = np.sqrt(np.sum(np.multiply(diff, diff)))
  return euclidean_dist
Exemple #5
0
def give_embeddings(image_links):
    print("line1")
    detector = MTCNN()
    print("line2")
    model = FaceNet()
    print("line3")
    embeddings_list = []
    for image in image_links:
        print("line4")
        flag, faces_array = extract_faces(image, detector)
        print("line5")
        if (flag):
            print("line6")
            for face_pixels in faces_array:
                print("line15")
                face_pixels = face_pixels.astype('float32')
                print("line16")
                face_pixels = face_pixels.reshape(1, 160, 160, 3)
                print("line17")
                yhat = model.embeddings(face_pixels)
                print("line18")
                embeddings_list.append(yhat[0])
                print("line19")
    print("line20")
    return embeddings_list
Exemple #6
0
def extract_faceID(filename, id):
    print("BAT DAU LAY THONG TIN FACE ID")
    img = cv2.imread(filename)
    embedder = FaceNet()
    detector = MTCNN()
    data = []
    faces = detector.detect_faces(img)
    for face in faces:
        st = (face['box'][0], face['box'][1])
        en = (face['box'][0] + face['box'][2], face['box'][1] + face['box'][3])
        data.append(img[st[1]:en[1], st[0]:en[0]])
        break
    vt = embedder.embeddings(data)[0]

    # save to database
    sql = SQL_Server()

    query = "INSERT INTO dbo.FaceID VALUES (" + str(id)

    for i in vt:
        query += " , " + str(i)

    query += ")"

    # print(query)
    sql.insert(query)

    print("ADD FACE ID INTO CSDL THANH CÔNG")
Exemple #7
0
 def __init__(self, callback=None):
   self.file_list = os.listdir('./dataset/train')
   self.faces = []
   self.names = []  # faces - names相同下标位置一一对应
   for file in self.file_list:
     self.faces.append(convert_gray_to_bgr_use_path(os.path.join('./dataset/train', file)))
     self.names.append(file.split('.')[0].split('-')[1])
   self.embedder = FaceNet()
   self.embeddings = self.embedder.embeddings(self.faces)
   if callback:
     callback()
    def __init__(self):
        """
        Initialize Face Recognition model
        """
        # Load Face Detector
        self.face_detector = MTCNN()

        # Load FaceNet
        self.facenet = FaceNet()

        # Euclidean Classifier
        self.clf = None
def create_pickle(training_path, testing_path, training_destination,
                  testing_destination):
    embedder = FaceNet()
    # images is a list of images, each as an
    # np.ndarray of shape (H, W, 3).

    for paths, destination in [(training_paths, training_destination),
                               (testing_paths, testing_destination)]:

        print("Getting images...")
        images = get_images_from_filepaths(paths)
        print("Getting embeddings...")
        try:
            embeddings = embedder.embeddings(images, verbose=1)
        except:
            embeddings = embedder.embeddings(images)

        print("Saving embeddings...")
        save_imgs(paths, embeddings, destination)
Exemple #10
0
class FaceNetWrap:
  def __init__(self, callback=None):
    self.file_list = os.listdir('./dataset/train')
    self.faces = []
    self.names = []  # faces - names相同下标位置一一对应
    for file in self.file_list:
      self.faces.append(convert_gray_to_bgr_use_path(os.path.join('./dataset/train', file)))
      self.names.append(file.split('.')[0].split('-')[1])
    self.embedder = FaceNet()
    self.embeddings = self.embedder.embeddings(self.faces)
    if callback:
      callback()

  def get_embedding(self, img):
    embeddings_test = self.embedder.embeddings([img])
    return embeddings_test[0]

  # 返回匹配人名和距离、照片
  def get_best_fit(self, embedding):
    dis = [get_L2_norm_squared(em, embedding) for em in self.embeddings]
    idx = np.argmin(dis)
    img = cv2.imread(os.path.join('./dataset/train', self.file_list[idx]), cv2.IMREAD_GRAYSCALE)
    name = self.names[idx]
    return name, dis[idx], img
def createDatabase():
	database = pd.DataFrame({'folder_id':[],'photo_id':[],'face_id':[],'embedding':[]},dtype="float32")

	#traverse through all the images
	images_path = "/home/darealappu/Desktop/CDAC/DR-GAN tensorflow/vggface2_test/test"
	index=0
	time_to_return = False
	embedder = FaceNet()

	for folder in os.listdir(images_path):
		folder_id = int(folder[1:])
		for image in os.listdir(os.path.join(images_path,folder)):
			if time_to_return == True:
				return database
			photo_id = int(image[0:4])
			face_id = int(image[5:7])
			img = cv2.imread(os.path.join(images_path,folder,image)).astype(np.float32)
			
			img.resize((1,img.shape[0],img.shape[1],img.shape[2]))
			
			em = embedder.embeddings(img)

			database=database.append({
				'folder_id':folder_id,
				'photo_id':photo_id,
				'face_id':face_id,
				'embedding':em},ignore_index=True)

			index+=1
			print(index-1)

			if index%10==0:
				if(index==5000):
					time_to_return = True
				break;
	return database
Exemple #12
0
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    results = detector.detect_faces(img)
    if (results):
        x1, y1, w, h = results[0]['box']
        x1, y1 = abs(x1), abs(y1)
        x2, y2 = x1 + w, y1 + h
        # extract the face
        face = img[y1:y2, x1:x2]
        # resize pixels to the model size
        image = cv2.resize(face, required_size, cv2.INTER_AREA)
        return image


x = []
for i in glob.glob(r'C:\Users\albertbolt\Downloads\face_dataset\*\*.jpeg'):
    img = cv2.imread(i)
    img = extract_face(img)
    x.append(img)
x = np.stack(x)
embedder = FaceNet()
embeddings = embedder.embeddings(x)
a = []
for root, dirp, file in os.walk(r'C:\Users\albertbolt\Downloads\face_dataset'):
    a.append(dirp)
dictq = {}
for i in range(len(a[0])):
    dictq[a[0][i]] = embeddings[i]
for key, value in dictq.items():
    value.shape = [512, 1]

print(dictq)
import numpy as np
import tqdm
from FaceLoader import FaceLoader
import time
import json
import uuid
import random
import os
import imageio
import cv2
from skimage import img_as_ubyte

from keras_facenet import FaceNet

embedder = FaceNet()


class SocialProcessor:
    def __init__(self, connection, model, batch=128, limit=1200, prefix='./'):
        self.batch = batch
        self.connection = connection
        self.model = model
        self.prefix = prefix
        self.size = FaceLoader.image_size
        self.limit = limit
        if not os.path.isdir("fragments"):
            os.mkdir("fragments")

    def addRecords(self, data):
        for i, row in enumerate(data):
            data[i] = list(row) + [str(uuid.uuid4())]
Exemple #14
0
import cv2
import numpy as np
from keras_facenet import FaceNet
from scipy.spatial import distance

from .utils.CustomScaller import CustomScaller

model = FaceNet().model


def l2_normalize(x, axis=-1, epsilon=1e-10):
    output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))
    return output


def norm(x, scaller=None):
    if scaller is None:
        scaller = CustomScaller()
    return scaller.fit_transform(x)


def get_embeding(x: np.ndarray):
    return model.predict(
        norm(cv2.resize(x, (160, 160))
             .reshape(-1, 160, 160, 3))
    ).flatten()


def predict_embedded_euclidean_distance(test_x: np.ndarray, train_x: np.ndarray, train_y: np.ndarray):
    res = []
    for emb_test in test_x:
Exemple #15
0
from keras_facenet import FaceNet
embedder = FaceNet()

# images is a list of images, each as an
# np.ndarray of shape (H, W, 3).
import cv2
import numpy as np
import os

#paths = ['019QoF6jwBU_150.633000-154.466000','6CUNIOtQ9L4_139.360000-144.440000',
#         'sl08afxcx4_115.515400-119.986533','bLEddi92aFI_171.480000-177.400000'] 

faces2_folder = '../../../../final_data/separated_data/faces2'

paths = []
for (dirpath, dirnames, filenames) in os.walk(faces2_folder):
    paths.extend(dirnames)
    break

count = 0
for path in paths:
  print("{0}/{1}".format(count, len(paths)))
  count += 1
  a = np.zeros((75, 512))
  for i in range(75):
    image = cv2.imread('../../../../final_data/separated_data/faces2/'+path+'/face'+str(i)+'.jpg')
#    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    image = np.array(image)
    print(image.shape)
    tmp = []
    tmp.append(image)
Exemple #16
0
        for path, embedding in zip(filtered_paths, embeddings)
    }

    # Distance
    for image in ["1", "2", "3"]:
        v1 = dictionary_vectors[f"{filtered_path}/{image}A.png"]
        v2 = dictionary_vectors[f"{filtered_path}/{image}B.png"]
        v1 = v1 / LA.norm(v1)
        v2 = v2 / LA.norm(v2)
        distance = cosine(v1, v2)
        distance = LA.norm(distance)
        print(distance)


if __name__ == "__main__":
    genral_embedder = FaceNet()

    paths = [
        "./demo/wrong_impostor", "./demo/right_impostor",
        "./demo/wrong_genuine", "./demo/right_genuine"
    ]
    for act in paths:
        do_examples(act, genral_embedder)
        input()
"""
wrong_impostor: impostor clasificado como genuino
1.- 036981A.jpg-046217B.jpg
2.- 038004A.jpg-040757B.jpg
3.- 037087A.jpg-109451B.jpg

right impostor: impostor clasificado como impostor
Exemple #17
0
import times
from PIL import Image
from PIL.Image import fromarray
from cv2 import cv2
from keras.backend import expand_dims
from mtcnn_cv2 import MTCNN
from numpy import asarray
# from keras.models import load_model     # to get FaceNet Embedding model
from deepface import DeepFace
from keras_facenet import FaceNet  # it will load model, without error
from scipy.spatial.distance import cosine

import rec_by_deepface as df

# seeting model encodder to global
encodder = FaceNet()


def extract_align_face(img):  # it return align face
    result = DeepFace.detectFace(img)
    # print(result)
    return result


def get_embedding(face):
    # face = expand_dims(face, axis=0)
    embeddings = encodder.embeddings(np.array(face))
    return embeddings


def compare_faces(pic_url, vid_url, fps, threshold, v_fps, person_name, v_out,
Exemple #18
0
import cv2
import pickle
import matplotlib.pyplot as plt
import numpy as np
import mtcnn
import tensorflow as tf
from PIL import Image
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)

from sklearn.preprocessing import Normalizer
from keras_facenet import FaceNet
embedder = FaceNet()

normalizer = Normalizer(norm='l2')
from detect_face import face_extract
class_label = [
    'Ben Affleck', 'Elton John', 'Jerry Seinfeld', 'Madonna', 'Mindy Kaling'
]
with open('model.pkl', 'rb') as f:
    clf = pickle.load(f)
file = 'dataset/val/madonna/httpassetsrollingstonecomassetsarticlemadonnadavidbowiechangedthecourseofmylifeforeversmallsquarexmadonnabowiejpg.jpg'
img = plt.imread(file)
face = face_extract(file, (160, 160))
face_array = np.asarray(face)
face_array = np.expand_dims(face_array, axis=0)
emded = embedder.embeddings(face_array)
emded = normalizer.fit_transform(emded)
pred = clf.predict(emded)
label = class_label[pred.argmax()]
print(label)
Exemple #19
0
class FaceRegModel(object):
    def __init__(self):
        self.model = MTCNN()
        self.embedder = FaceNet()

    @staticmethod
    def load_from_file(f):
        img_str = f.read()
        nparr = np.fromstring(img_str, np.uint8)
        img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
        return img_np

    def encode_face(self, img):
        location = self.locate_faces(img)[:1]
        face_img = self.extract_faces(img, location)
        return self.embedder.embeddings(face_img)[0]

    def encode_faces(self, img, locations):
        return self.embedder.embeddings(self.extract_faces(img, locations))

    def extract_faces(self, img, locations):
        res_imgs = []
        for (y1, x1, y2, x2) in locations:
            res_imgs.append(img[y1:y2, x2:x1])
        return res_imgs

    def locate_faces(self, img):
        faces = self.model.detect_faces(img)
        faces = [x['box'] for x in faces]
        faces = [(max(y, 0), min(x + w,
                                 img.shape[1]), min(y + h,
                                                    img.shape[0]), max(x, 0))
                 for x, y, w, h in faces]
        return np.array(faces)

    @staticmethod
    def face_distance(face1, face2):
        return np.linalg.norm(face1 - face2, axis=1)

    @staticmethod
    def compare_faces(face1, face2, tolerance=0.9):
        return list(FaceRegModel.face_distance(face1, face2) <= tolerance)

    def process_images(self, images, scale=0.5):
        """Get face locations and encodings from given images."""
        face_information = {}

        for (img_name, img) in images.items():
            # Scale down for faster processing
            small_img = cv2.resize(img, (0, 0), fx=scale, fy=scale)

            # Face detection
            face_locations = self.locate_faces(small_img)
            if len(face_locations) == 0:
                # Ignoring empty
                face_encodings = []
            else:
                # Face extraction
                face_encodings = self.encode_faces(small_img, face_locations)

            face_information[img_name] = {
                'locations': face_locations,
                'encodings': face_encodings
            }

        return face_information

    def find_images_with_person(self, face_encoding, face_information):
        images_with_person = []
        for img_name, face_info in face_information.items():
            face_info_encodings = face_info

            if len(face_info_encodings) == 0:
                continue

            matches = self.compare_faces(face_encoding, face_info_encodings)

            if any(matches):
                images_with_person.append(img_name)

        return images_with_person
Exemple #20
0
import os
import pickle
import sys
from keras_facenet import FaceNet
from PIL.ImageDraw import Draw
from sklearn.preprocessing import LabelEncoder, Normalizer

from detect_faces import detect_faces

min_conf = 0.5

if __name__ == '__main__':
    with open(sys.argv[1], 'rb') as f:
        (le, model) = pickle.load(f, encoding='latin1')

    embedder = FaceNet()
    directory = sys.argv[2]
    files = sorted(os.listdir(directory))

    for i, filename in enumerate(files):
        path = directory + filename
        faces, d_conf, d_loc, img = detect_faces(path, min_conf)
        d = Draw(img)

        results = []
        for j, face in enumerate(faces):
            rep = embedder.embeddings([face])
            pred = model.predict_proba(rep).ravel()
            maxI = np.argmax(pred)
            confidence = pred[maxI]
            person = le.inverse_transform([maxI])[0]
Exemple #21
0
class FaceRecognition(object):
    """
    Face Recognition object class
    """
    def __init__(self):
        """
        Initialize Face Recognition model
        """
        # GRAPH
        self.graph = tf.get_default_graph()

        # Load Face Detector
        self.face_detector = MTCNN()

        # Load FaceNet
        self.facenet = FaceNet()

        # Euclidean Classifier
        self.clf = None

    def predict(self, path, threshold=None):
        """
        Find faces and recognize them, return predicted people into image
        :param path: Source image path
        :param threshold: cutoff threshold
        :return: Return predictions and images with rectangles drawn
        """
        if not self.clf:
            raise RuntimeError("No classifier found. Please load classifier")

        start_at = time.time()
        bounding_boxes = []
        image = cv2.imread(path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = image.astype(np.uint8)
        for person, confidence, box in self.__predict__(image,
                                                        threshold=threshold):
            # Draw rectangle with person name
            cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]),
                          (0, 255, 0), 2)
            cv2.putText(image, person, (box[0], box[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0))

            bounding_boxes.append({
                "person": person,
                "confidence": confidence,
                "box": box,
            })

        # encode frame
        _, buffer = cv2.imencode('.jpg', image)

        return {
            "frame": base64.b64encode(buffer).decode('ascii'),
            "elapsed_time": (time.time() - start_at),
            "predictions": bounding_boxes
        }

    def __predict__(self, image, threshold=None):
        """
        Extract face and perform evaluation
        :param image: Source image
        :param threshold: decision threshold
        :return:  yield (person_id, person, confidence, box)
        """
        # Resize Image
        for encoding, face, box in self.face_encoding(image):
            # Check face size
            if (box[2] - box[0]) < config.MIN_FACE_SIZE[0] or \
                    (box[3] - box[1]) < config.MIN_FACE_SIZE[1]:
                yield (config.UNKNOWN_LABEL, 0.0, box)
            else:
                results = self.clf.predict(encoding)
                person, confidence = results["person"], results["confidence"]
                if threshold and confidence < threshold:
                    person = config.UNKNOWN_LABEL

                yield (person, confidence, box)

    def face_detection(self, image):
        """
        Face detection from source image
        :param image: Source image
        :return: extracted face and bounding box
        """
        image_to_detect = image.copy()

        # detect faces in the image
        for face_attributes in self.face_detector.detect_faces(
                image_to_detect):
            if face_attributes["confidence"] > config.FACE_CONFIDENCE:
                # extract the bounding box
                x1, y1, w, h = [
                    max(point, 0) for point in face_attributes["box"]
                ]
                x2, y2 = x1 + w, y1 + h

                face = image[y1:y2, x1:x2]
                # Align face
                face = FaceRecognition.align_face(face_attributes, face.copy())

                yield (cv2.resize(face, config.FACE_SIZE), (x1, y1, x2, y2))

    def face_encoding(self, source_image):
        """
        Extract face encodings from image
        :param source_image: Source image
        :return: 512 encoding, face and bounding box
        """
        for face, box in self.face_detection(source_image):
            with self.graph.as_default():
                # Face encoding
                encoding = self.facenet.embeddings(np.expand_dims(face,
                                                                  axis=0))[0]

                yield (encoding, face, box)

    @staticmethod
    def align_face(face_attribute, image):
        if not face_attribute:
            return image
        # Get left and right eyes
        left_eye = face_attribute["keypoints"]["left_eye"]
        right_eye = face_attribute["keypoints"]["right_eye"]
        # Get distance between eyes
        d = math.sqrt(
            math.pow(right_eye[0] - left_eye[0], 2) +
            math.pow(right_eye[1] - left_eye[1], 2))
        a = left_eye[1] - right_eye[1]
        # get alpha degree
        alpha = (math.asin(a / d) * 180.0) / math.pi

        return imutils.rotate(image, -alpha)

    def load(self, path):
        """
        Load classifier from pickle file
        :param path: path
        """
        clf = EuclideanClassifier()
        clf.load(path)

        self.clf = clf

    def save(self, path):
        """
        Save classifier as pickle file
        :param path: path
        """
        self.clf.save(path)

    def fit(self, folder):
        """
        Fit classifier from directory.
        Directory must have this structure:
            Person 1:
                file.jpg
                ....
                file.jpg
            Person 2:
                file.jpg
                ...
                file.jpg
            ...
        :param folder: root folder path
        """
        # Initialize classifier
        clf = EuclideanClassifier()

        # Load all files
        files = []
        for ext in config.ALLOWED_IMAGE_TYPES:
            files.extend(
                glob.glob(os.path.join(folder, "*", ext), recursive=True))

        for path in tqdm.tqdm(files):
            # Load image
            image = cv2.imread(path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            # Get person name by folder
            person = os.path.split(os.path.split(path)[0])[1]

            # Get encoding
            for encoding, face, box in self.face_encoding(image):
                # Add to classifier
                clf.fit([encoding], [person])

        self.clf = clf

    def fit_from_dataframe(self, df, person_col="person", path_col="path"):
        """
        Fit classifier from dataframe.
        :param df: Pandas dataframe
        :param person_col: Dataframe column with person id
        :param path_col: Dataframe column with image path
        """
        # Initialize classifier
        clf = EuclideanClassifier()

        for index, row in tqdm.tqdm(df.iterrows(), total=df.shape[0]):
            # Load image
            image = cv2.imread(row[path_col])
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            # Get person name by folder
            person = row[person_col]

            # Get encoding
            for encoding, face, box in self.face_encoding(image):
                # Add to classifier
                clf.fit([encoding], [person])

        self.clf = clf
Exemple #22
0
from keras_facenet import FaceNet
from PIL import Image
embedder = FaceNet()

detections = embedder.extract('test.jpg', threshold=0.95)


def crop_for_attendance(test_img):
    print(detections)

    boxes = []
    for i in range(len(detections)):
        box = detections[i]['box']

    img = cv2.imread(test_img)
    im = Image.open(r"" + test_img)
    #detect faces

    #Draw Rectangles around the faces
    #idx = 0
    for (x, y, w, h) in boxes:
        idx += 1
        cv2.rectangle(img, (x, y), (x + w, y + h), (50, 205, 50), 20)
        im1 = im.crop((x, y, x + w, y + h))
        im1 = im1.resize((96, 96))
        im1.save('./upload/images/cropped_' + str(idx) + '.JPG')
    #Exporting the result
    cv2.imwrite("face_detected.png", img)
    print("succesfully saved cropped images from uploaded photo")

from keras_facenet import FaceNet
from mtcnn import MTCNN
import pandas as pd 
import numpy as np 
import os
import cv2

embedder = FaceNet()
detector = MTCNN()

data = []

ID = [1,2,3]

Name = []

for file in os.listdir("person"):

    Name.append(file.split(".jpg")[0])
    path_file = os.path.join("person", file)

    img = cv2.imread(path_file)

    faces =  detector.detect_faces(img)

    face = faces[0]
    st = (face['box'][0], face['box'][1])
    en = (face['box'][0] + face['box'][2], face['box'][1] + face['box'][3])
    data.append(img[st[1] : en[1], st[0]: en[0]])

    img_show = img[st[1] : en[1], st[0]: en[0]]
    NAME = data[:, 515]

    distance = np.array([Norm(vt, i) for i in X])

    min = np.argmin(distance)

    if distance[min] < 0.80:

        return ID[min], NAME[min], distance[min]

    return None, None, None


def is_have_face(id, name):
    pass


if __name__ == "__main__":

    import cv2
    img = cv2.imread("trung.png")

    from keras_facenet import FaceNet
    embedder = FaceNet()

    vt = embedder.embeddings([img])

    id, name, distance = identification(vt[0])

    print(id, name, distance)
_ind = list(range(len(indices)))
shuffle(_ind)

p1size = int(len(indices) * 0.9)

indices_1, indices_2 = [indices[i] for i in sorted(_ind[:p1size])
                        ], [indices[i] for i in sorted(_ind[p1size:])]
labels_1, labels_2 = [labels[i] for i in sorted(_ind[:p1size])
                      ], [labels[i] for i in sorted(_ind[p1size:])]

batch_size = 128

print("Num siamese pairs", len(indices))

from keras_facenet import FaceNet
face_embedder = FaceNet()
train_embeddings = face_embedder.embeddings(train_images)
train_embeddings = normalize(train_embeddings)

generator = batch_generator(indices=indices_1,
                            labels=labels_1,
                            embeddings=train_embeddings,
                            batch_size=batch_size)

val = val_generator(indices=indices_2,
                    labels=labels_2,
                    embeddings=train_embeddings,
                    batch_size=batch_size)


def lr_schedule(epoch):
Exemple #26
0
import os
import cv2
import numpy as np
import mtcnn
import tensorflow as tf
from PIL import Image
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVC
import pickle
from keras_facenet import FaceNet
embedder = FaceNet()

with open('data.pkl', 'rb') as f:
    x_train, y_train, x_val, y_val = pickle.load(f)

emb_train_x = embedder.embeddings(x_train)
emb_val_x = embedder.embeddings(x_val)

normalizer = Normalizer(norm='l2')
x_train = normalizer.transform(emb_train_x)
x_val = normalizer.transform(emb_val_x)
encoder = LabelEncoder()
encoder.fit(y_train)
y_train = encoder.transform(y_train)
y_val = encoder.transform(y_val)

model = SVC(kernel='linear', probability=True)
Exemple #27
0
# -*- coding: utf-8 -*-
"""Face_reco.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/gist/AtulJoshi1/979e4652fbdf56d86335f089cbc6f133/face_reco.ipynb

**Face Recognition using Transfer Learning from FaceNet**
"""

from google.colab import drive
drive.mount('/content/drive')

from keras_facenet import FaceNet
embedder = FaceNet()

import matplotlib.image as mpimg
import cv2
import numpy as np
import pandas as pd


def load_train_data(n):
    lst = []
    for i in range(1, n + 1):
        filename = str(i) + '.jpg'
        x = mpimg.imread('/content/drive/My Drive/face reco/train/' + filename)
        x = cv2.resize(x, (160, 160))
        lst.append(x)
    df = np.array(lst)
Exemple #28
0
 def __init__(self):
     self.model = MTCNN()
     self.embedder = FaceNet()
Exemple #29
0
status_message_main = None
recognized_face_table = None
attendance_table = None
sub_attendance_table = "<table class='table'><thead><tr><th scope='col'><div class='float-left d-inline-block'><a href='#' id='attendance_backbtn' onclick='ViewAttendance()'><i class='material-icons' style='vertical-align:middle;font-size:19px;color: white;'>keyboard_backspace</i></a></div>View Attendance</th></tr></thead><tbody><tr><td><div style='height:38vh; padding-top: 8px;'><table class='table-dark table-bordered compressed'><thead><tr><th style='width: 4%;'>S.no</th><th style='width: 40%;'>Name</th><th style='width: 20%;'>Rollno</th><th>Date</th><th>Time</th></tr></thead><tbody></tbody></table></div></td></tr> </tbody></table>"
temp_predicted_names = []
label_path = "C:/Users/MicroMedia/Desktop/Project_A_v2/model/datasets/label.csv"
testing_path = "C:/Users/MicroMedia/Desktop/Project_A_v2/model/datasets/test/"
training_path = "C:/Users/MicroMedia/Desktop/Project_A_v2/model/datasets/train/"
label_encoder_path = "C:/Users/MicroMedia/Desktop/Project_A_v2/model/output/label_encoder.pickle"
recognizer_model_path = "C:/Users/MicroMedia/Desktop/Project_A_v2/model/output/recognizer_model.pickle"
os.environ["OPENCV_VIDEOIO_PRIORITY_MSMF"] = "0"
resnet_ssd_model_path = "C:/Users/MicroMedia/Desktop/Project_A_v2/model/premade/face_detection_model/deploy.prototxt"
resnet_ssd_weight_path = "C:/Users/MicroMedia/Desktop/Project_A_v2/model/premade/face_detection_model/res10_300x300_ssd_iter_140000.caffemodel"
"""Objects"""
app = Flask(__name__)
facenet = FaceNet()
resnet_ssd = opencv.dnn.readNetFromCaffe(resnet_ssd_model_path,
                                         resnet_ssd_weight_path)
dlib_front_face_detector = dlib.get_frontal_face_detector()
dlib_front_face_landmarks = dlib.shape_predictor(
    "C:/Users/MicroMedia/Desktop/Project_A_v2/model/premade/dlib_model/shape_predictor_68_face_landmarks.dat"
)
dlib_front_face_alignment = FaceAligner(dlib_front_face_landmarks,
                                        desiredFaceWidth=256)
"""Routes"""


@app.route("/")
def frontend():
    return render_template("index.html")
import numpy as np
import sys
from keras.models import load_model
from keras_facenet import FaceNet

BATCH_SIZE = 1000
embedder = FaceNet()

if __name__ == '__main__':
    data = np.load(sys.argv[1])
    X, y = data['arr_0'], data['arr_1']
    print('Loaded X: {}, y: {}'.format(X.shape, y.shape))
    embeddings = []
    for i in range(0, X.shape[0], BATCH_SIZE):
        batch = X[i:i + BATCH_SIZE]
        print(batch.shape)
        batch_emb = np.asarray(embedder.embeddings(batch))
        embeddings.extend(batch_emb)
    np.savez_compressed(sys.argv[2], embeddings, y)