Example #1
0
def calculate_euclidean_distance(ref_frame, frame):
  """
  128차원 embedding vector를 구하고 유클리드 거리 계산.

  - input
  reference_clip: VideoFileClip
  clip: VideoFileClip

  - output
  euclidean distance between two vector, clip의 마지막 frame return
  """
  # Get image as an numpy array
  #ref_frame = reference_clip.get_frame(reference_clip.reader.nframes/reference_clip.fps)
  #frame = clip.get_frame(clip.reader.nframes/clip.fps)

  # detection
  ref_frame_detected = extract_face_from_frame(ref_frame)
  frame_detected = extract_face_from_frame(frame)

  if (len(ref_frame_detected) == 0 or len(frame_detected) == 0): # 하나라도 얼굴이 감지되지 않는 경우
      return 100

  # feature extraction (embedding)
  ref_frame_detected = ref_frame_detected.reshape((1,)+ref_frame_detected.shape) # 모델 인풋 차원에 맞게 수정
  frame_detected = frame_detected.reshape((1,)+frame_detected.shape)


  embed_model = FaceNet()
  ref_frame_embed = embed_model.embeddings(ref_frame_detected)
  frame_embed = embed_model.embeddings(frame_detected)

  diff = ref_frame_embed - frame_embed
  euclidean_dist = np.sqrt(np.sum(np.multiply(diff, diff)))
  return euclidean_dist
Example #2
0
def give_embeddings(image_links):
    print("line1")
    detector = MTCNN()
    print("line2")
    model = FaceNet()
    print("line3")
    embeddings_list = []
    for image in image_links:
        print("line4")
        flag, faces_array = extract_faces(image, detector)
        print("line5")
        if (flag):
            print("line6")
            for face_pixels in faces_array:
                print("line15")
                face_pixels = face_pixels.astype('float32')
                print("line16")
                face_pixels = face_pixels.reshape(1, 160, 160, 3)
                print("line17")
                yhat = model.embeddings(face_pixels)
                print("line18")
                embeddings_list.append(yhat[0])
                print("line19")
    print("line20")
    return embeddings_list
Example #3
0
def extract_faceID(filename, id):
    print("BAT DAU LAY THONG TIN FACE ID")
    img = cv2.imread(filename)
    embedder = FaceNet()
    detector = MTCNN()
    data = []
    faces = detector.detect_faces(img)
    for face in faces:
        st = (face['box'][0], face['box'][1])
        en = (face['box'][0] + face['box'][2], face['box'][1] + face['box'][3])
        data.append(img[st[1]:en[1], st[0]:en[0]])
        break
    vt = embedder.embeddings(data)[0]

    # save to database
    sql = SQL_Server()

    query = "INSERT INTO dbo.FaceID VALUES (" + str(id)

    for i in vt:
        query += " , " + str(i)

    query += ")"

    # print(query)
    sql.insert(query)

    print("ADD FACE ID INTO CSDL THANH CÔNG")
Example #4
0
def _main():
    # img_paths = generate_input_paths(dir_path, 138)
    # for ip in img_paths:
    #     faces = process_image(ip)
    with open(sys.argv[1], 'rb') as f:
        (le, model) = pickle.load(f, encoding='latin1')

    embedder = FaceNet()
    directory = sys.argv[2]
    files = sorted(os.listdir(directory))

    for i, filename in enumerate(files):
        path = directory + filename
        boxes = process_image(path)
        img = np.asarray(Image.open(path).convert('RGB'))

        result = []
        for box in boxes:
            x1, y1, width, height = box
            x1, y1 = abs(x1), abs(y1)
            x2, y2 = x1 + width, y1 + height
            face = img[y1:y2, x1:x2]
            face = Image.fromarray(face).resize((IMAGE_SIZE, IMAGE_SIZE))
            face = np.asarray(face)

            rep = embedder.embeddings([face])
            pred = model.predict_proba(rep).ravel()
            maxI = np.argmax(pred)
            confidence = pred[maxI]
            person = le.inverse_transform([maxI])[0]
            result.append('{} ({:.2f})'.format(person, confidence))

        print(i, ', '.join(result))
Example #5
0
def trackimage():
    embedder = FaceNet()
    b = []
    cap = cv2.VideoCapture(0)
    while True:
        ret, frame = cap.read()
        img1 = extract_face(frame)
        plt.imshow(frame)
        img1 = np.expand_dims(img1, axis=0)
        if (img1.any()):
            emb = embedder.embeddings(img1)
            emb = np.transpose(emb)
            min_dist = 100
            for key, value in dictq.items():
                dist = np.linalg.norm(emb - value)
                b.append(dist)
                if dist < min_dist:
                    min_dist = dist
                    identity = key
            print(identity)
            if min_dist < 1.0:
                cv2.putText(frame, "Face : " + identity, (100, 100),
                            cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)
                unknown_yes_or_no = 'no'
            else:
                cv2.putText(frame, 'no match', (100, 100),
                            cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)
                unknown_yes_or_no = 'yes'
            cv2.imshow('face', frame)

            if cv2.waitKey(1) & 0xFF == 27:
                break

    cap.release()
    cv2.destroyAllWindows()
    import pyttsx3
    engine = pyttsx3.init()
    if (unknown_yes_or_no == "yes"):
        engine.say("Good morning sorry we couldn't recognise you")
    else:
        str1 = "good morning " + identity + " your attendance has been recorded"
        engine.say(str1)
        import mysql.connector
        mydb = mysql.connector.connect(host="localhost",
                                       user="******",
                                       passwd="albertbolt23",
                                       database="faceproject",
                                       auth_plugin='mysql_native_password')

        mycursor = mydb.cursor()
        from datetime import date
        today1 = date.today()

        sql = "INSERT INTO attendance values('%s','%s','morning')" % (
            identity, str(today1))

        mycursor.execute(sql)
        mydb.commit()

    engine.runAndWait()
Example #6
0
def save_face_encoding(src_path):
    facenet = FaceNet()
    img_files = glob.glob('{0}/**/*.jpg'.format(src_path), recursive=True)

    for img_file in tqdm(img_files):
        em = facenet.embeddings([cv2.imread(img_file)[:, :, ::-1]])
        np.save(img_file.replace('jpg', 'npy'), em[0])
def create_pickle(training_path, testing_path, training_destination,
                  testing_destination):
    embedder = FaceNet()
    # images is a list of images, each as an
    # np.ndarray of shape (H, W, 3).

    for paths, destination in [(training_paths, training_destination),
                               (testing_paths, testing_destination)]:

        print("Getting images...")
        images = get_images_from_filepaths(paths)
        print("Getting embeddings...")
        try:
            embeddings = embedder.embeddings(images, verbose=1)
        except:
            embeddings = embedder.embeddings(images)

        print("Saving embeddings...")
        save_imgs(paths, embeddings, destination)
Example #8
0
class FaceNetWrap:
  def __init__(self, callback=None):
    self.file_list = os.listdir('./dataset/train')
    self.faces = []
    self.names = []  # faces - names相同下标位置一一对应
    for file in self.file_list:
      self.faces.append(convert_gray_to_bgr_use_path(os.path.join('./dataset/train', file)))
      self.names.append(file.split('.')[0].split('-')[1])
    self.embedder = FaceNet()
    self.embeddings = self.embedder.embeddings(self.faces)
    if callback:
      callback()

  def get_embedding(self, img):
    embeddings_test = self.embedder.embeddings([img])
    return embeddings_test[0]

  # 返回匹配人名和距离、照片
  def get_best_fit(self, embedding):
    dis = [get_L2_norm_squared(em, embedding) for em in self.embeddings]
    idx = np.argmin(dis)
    img = cv2.imread(os.path.join('./dataset/train', self.file_list[idx]), cv2.IMREAD_GRAYSCALE)
    name = self.names[idx]
    return name, dis[idx], img
def createDatabase():
	database = pd.DataFrame({'folder_id':[],'photo_id':[],'face_id':[],'embedding':[]},dtype="float32")

	#traverse through all the images
	images_path = "/home/darealappu/Desktop/CDAC/DR-GAN tensorflow/vggface2_test/test"
	index=0
	time_to_return = False
	embedder = FaceNet()

	for folder in os.listdir(images_path):
		folder_id = int(folder[1:])
		for image in os.listdir(os.path.join(images_path,folder)):
			if time_to_return == True:
				return database
			photo_id = int(image[0:4])
			face_id = int(image[5:7])
			img = cv2.imread(os.path.join(images_path,folder,image)).astype(np.float32)
			
			img.resize((1,img.shape[0],img.shape[1],img.shape[2]))
			
			em = embedder.embeddings(img)

			database=database.append({
				'folder_id':folder_id,
				'photo_id':photo_id,
				'face_id':face_id,
				'embedding':em},ignore_index=True)

			index+=1
			print(index-1)

			if index%10==0:
				if(index==5000):
					time_to_return = True
				break;
	return database
Example #10
0
#         'sl08afxcx4_115.515400-119.986533','bLEddi92aFI_171.480000-177.400000'] 

faces2_folder = '../../../../final_data/separated_data/faces2'

paths = []
for (dirpath, dirnames, filenames) in os.walk(faces2_folder):
    paths.extend(dirnames)
    break

count = 0
for path in paths:
  print("{0}/{1}".format(count, len(paths)))
  count += 1
  a = np.zeros((75, 512))
  for i in range(75):
    image = cv2.imread('../../../../final_data/separated_data/faces2/'+path+'/face'+str(i)+'.jpg')
#    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    image = np.array(image)
    print(image.shape)
    tmp = []
    tmp.append(image)
    a[i,:] = embedder.embeddings(tmp)
  try:
      os.makedirs('../../../../final_data/separated_data/nparray')
  except FileExistsError:
      pass
  np.save('../../../../final_data/separated_data/nparray/'+path+'_facenet.npy',a)
#    with open('../../../download/separated_data/faces2/'+path+'/facenet_out.csv', mode='a') as f:
#      facenet_writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
#      facenet_writer.writerow([path,str(i),embeddings])
Example #11
0
import pickle
import matplotlib.pyplot as plt
import numpy as np
import mtcnn
import tensorflow as tf
from PIL import Image
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)

from sklearn.preprocessing import Normalizer
from keras_facenet import FaceNet
embedder = FaceNet()

normalizer = Normalizer(norm='l2')
from detect_face import face_extract
class_label = [
    'Ben Affleck', 'Elton John', 'Jerry Seinfeld', 'Madonna', 'Mindy Kaling'
]
with open('model.pkl', 'rb') as f:
    clf = pickle.load(f)
file = 'dataset/val/madonna/httpassetsrollingstonecomassetsarticlemadonnadavidbowiechangedthecourseofmylifeforeversmallsquarexmadonnabowiejpg.jpg'
img = plt.imread(file)
face = face_extract(file, (160, 160))
face_array = np.asarray(face)
face_array = np.expand_dims(face_array, axis=0)
emded = embedder.embeddings(face_array)
emded = normalizer.fit_transform(emded)
pred = clf.predict(emded)
label = class_label[pred.argmax()]
print(label)
Example #12
0
import tensorflow as tf
from PIL import Image
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVC
import pickle
from keras_facenet import FaceNet
embedder = FaceNet()

with open('data.pkl', 'rb') as f:
    x_train, y_train, x_val, y_val = pickle.load(f)

emb_train_x = embedder.embeddings(x_train)
emb_val_x = embedder.embeddings(x_val)

normalizer = Normalizer(norm='l2')
x_train = normalizer.transform(emb_train_x)
x_val = normalizer.transform(emb_val_x)
encoder = LabelEncoder()
encoder.fit(y_train)
y_train = encoder.transform(y_train)
y_val = encoder.transform(y_val)

model = SVC(kernel='linear', probability=True)
model.fit(x_train, y_train)
# predict

prediction = model.predict(x_val)
Example #13
0
def evaluate_id_lfw(dataset_path, cleanir, dsize=(64, 64)):
    """Evaluates CLEANIR model by using LFW dataset

    Arguments:
        dataset_path {str} -- lfw dataset path
        cleanir {Cleanir} -- Cleanir instance

    Keyword Arguments:
        dsize {tuple} -- size of cropped face (default: {(64, 64)})

    Returns:
        dict -- evaluation results
    """

    print('Reading LFW dataset pair information..')
    matched_list, unmatched_list = read_lfw_pair_info(dataset_path)

    print('Loading FaceNet..')
    facenet = FaceNet()

    o_dists = []
    m0_dists = []
    m30_dists = []
    m60_dists = []
    m90_dists = []
    m120_dists = []
    m150_dists = []
    m180_dists = []

    print('Loading and modifying LFW dataset images..')
    for face1_path, face2_path in tqdm(matched_list):
        face1 = crop_face_from_file(face1_path, dsize)
        face2 = crop_face_from_file(face2_path, dsize)

        deid = cleanir.get_deid_single_axis_func(face1)

        ems = facenet.embeddings([
            face1, face2,
            deid(180),
            deid(90),
            deid(0),
            deid(30),
            deid(60),
            deid(120),
            deid(150)
        ])
        o_dists += [facenet.compute_distance(ems[0], ems[1])]
        m180_dists += [facenet.compute_distance(ems[2], ems[1])]
        m90_dists += [facenet.compute_distance(ems[3], ems[1])]
        m0_dists += [facenet.compute_distance(ems[4], ems[1])]
        m30_dists += [facenet.compute_distance(ems[5], ems[1])]
        m60_dists += [facenet.compute_distance(ems[6], ems[1])]
        m120_dists += [facenet.compute_distance(ems[7], ems[1])]
        m150_dists += [facenet.compute_distance(ems[8], ems[1])]

    results = {
        'threshold': [],
        'original': [],
        '0': [],
        '30': [],
        '60': [],
        '90': [],
        '120': [],
        '150': [],
        '180': []
    }

    print('Thresholding..')
    for threshold in tqdm(np.arange(0.1, 2.0, 0.1)):
        results['threshold'].append(threshold)
        results['original'].append(
            np.sum(np.array(o_dists) < threshold) / len(o_dists))
        results['180'].append(
            np.sum(np.array(m180_dists) < threshold) / len(m180_dists))
        results['90'].append(
            np.sum(np.array(m90_dists) < threshold) / len(m90_dists))
        results['0'].append(
            np.sum(np.array(m0_dists) < threshold) / len(m0_dists))
        results['30'].append(
            np.sum(np.array(m30_dists) < threshold) / len(m30_dists))
        results['60'].append(
            np.sum(np.array(m60_dists) < threshold) / len(m60_dists))
        results['120'].append(
            np.sum(np.array(m120_dists) < threshold) / len(m120_dists))
        results['150'].append(
            np.sum(np.array(m150_dists) < threshold) / len(m150_dists))

    return results
Example #14
0
class FaceRecognition(object):
    """
    Face Recognition object class
    """
    def __init__(self):
        """
        Initialize Face Recognition model
        """
        # GRAPH
        self.graph = tf.get_default_graph()

        # Load Face Detector
        self.face_detector = MTCNN()

        # Load FaceNet
        self.facenet = FaceNet()

        # Euclidean Classifier
        self.clf = None

    def predict(self, path, threshold=None):
        """
        Find faces and recognize them, return predicted people into image
        :param path: Source image path
        :param threshold: cutoff threshold
        :return: Return predictions and images with rectangles drawn
        """
        if not self.clf:
            raise RuntimeError("No classifier found. Please load classifier")

        start_at = time.time()
        bounding_boxes = []
        image = cv2.imread(path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = image.astype(np.uint8)
        for person, confidence, box in self.__predict__(image,
                                                        threshold=threshold):
            # Draw rectangle with person name
            cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]),
                          (0, 255, 0), 2)
            cv2.putText(image, person, (box[0], box[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0))

            bounding_boxes.append({
                "person": person,
                "confidence": confidence,
                "box": box,
            })

        # encode frame
        _, buffer = cv2.imencode('.jpg', image)

        return {
            "frame": base64.b64encode(buffer).decode('ascii'),
            "elapsed_time": (time.time() - start_at),
            "predictions": bounding_boxes
        }

    def __predict__(self, image, threshold=None):
        """
        Extract face and perform evaluation
        :param image: Source image
        :param threshold: decision threshold
        :return:  yield (person_id, person, confidence, box)
        """
        # Resize Image
        for encoding, face, box in self.face_encoding(image):
            # Check face size
            if (box[2] - box[0]) < config.MIN_FACE_SIZE[0] or \
                    (box[3] - box[1]) < config.MIN_FACE_SIZE[1]:
                yield (config.UNKNOWN_LABEL, 0.0, box)
            else:
                results = self.clf.predict(encoding)
                person, confidence = results["person"], results["confidence"]
                if threshold and confidence < threshold:
                    person = config.UNKNOWN_LABEL

                yield (person, confidence, box)

    def face_detection(self, image):
        """
        Face detection from source image
        :param image: Source image
        :return: extracted face and bounding box
        """
        image_to_detect = image.copy()

        # detect faces in the image
        for face_attributes in self.face_detector.detect_faces(
                image_to_detect):
            if face_attributes["confidence"] > config.FACE_CONFIDENCE:
                # extract the bounding box
                x1, y1, w, h = [
                    max(point, 0) for point in face_attributes["box"]
                ]
                x2, y2 = x1 + w, y1 + h

                face = image[y1:y2, x1:x2]
                # Align face
                face = FaceRecognition.align_face(face_attributes, face.copy())

                yield (cv2.resize(face, config.FACE_SIZE), (x1, y1, x2, y2))

    def face_encoding(self, source_image):
        """
        Extract face encodings from image
        :param source_image: Source image
        :return: 512 encoding, face and bounding box
        """
        for face, box in self.face_detection(source_image):
            with self.graph.as_default():
                # Face encoding
                encoding = self.facenet.embeddings(np.expand_dims(face,
                                                                  axis=0))[0]

                yield (encoding, face, box)

    @staticmethod
    def align_face(face_attribute, image):
        if not face_attribute:
            return image
        # Get left and right eyes
        left_eye = face_attribute["keypoints"]["left_eye"]
        right_eye = face_attribute["keypoints"]["right_eye"]
        # Get distance between eyes
        d = math.sqrt(
            math.pow(right_eye[0] - left_eye[0], 2) +
            math.pow(right_eye[1] - left_eye[1], 2))
        a = left_eye[1] - right_eye[1]
        # get alpha degree
        alpha = (math.asin(a / d) * 180.0) / math.pi

        return imutils.rotate(image, -alpha)

    def load(self, path):
        """
        Load classifier from pickle file
        :param path: path
        """
        clf = EuclideanClassifier()
        clf.load(path)

        self.clf = clf

    def save(self, path):
        """
        Save classifier as pickle file
        :param path: path
        """
        self.clf.save(path)

    def fit(self, folder):
        """
        Fit classifier from directory.
        Directory must have this structure:
            Person 1:
                file.jpg
                ....
                file.jpg
            Person 2:
                file.jpg
                ...
                file.jpg
            ...
        :param folder: root folder path
        """
        # Initialize classifier
        clf = EuclideanClassifier()

        # Load all files
        files = []
        for ext in config.ALLOWED_IMAGE_TYPES:
            files.extend(
                glob.glob(os.path.join(folder, "*", ext), recursive=True))

        for path in tqdm.tqdm(files):
            # Load image
            image = cv2.imread(path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            # Get person name by folder
            person = os.path.split(os.path.split(path)[0])[1]

            # Get encoding
            for encoding, face, box in self.face_encoding(image):
                # Add to classifier
                clf.fit([encoding], [person])

        self.clf = clf

    def fit_from_dataframe(self, df, person_col="person", path_col="path"):
        """
        Fit classifier from dataframe.
        :param df: Pandas dataframe
        :param person_col: Dataframe column with person id
        :param path_col: Dataframe column with image path
        """
        # Initialize classifier
        clf = EuclideanClassifier()

        for index, row in tqdm.tqdm(df.iterrows(), total=df.shape[0]):
            # Load image
            image = cv2.imread(row[path_col])
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            # Get person name by folder
            person = row[person_col]

            # Get encoding
            for encoding, face, box in self.face_encoding(image):
                # Add to classifier
                clf.fit([encoding], [person])

        self.clf = clf
Example #15
0
from keras_facenet import FaceNet
from utils import load_data
from scipy.spatial.distance import euclidean
from sklearn.preprocessing import normalize
import numpy as np
from tqdm import tqdm

train_images, train_labels, test_images, test_labels, train_pairs, submission_pairs = load_data(
)

embedder = FaceNet()
embeddings = embedder.embeddings(test_images)
embeddings = normalize(embeddings)

print(embeddings.shape)

test_labels_indices = {l: i for i, l in enumerate(test_labels)}

distances = []

for pair in tqdm(submission_pairs):
    p1, p2 = pair
    i1 = test_labels_indices[p1]
    i2 = test_labels_indices[p2]
    e1 = embeddings[i1]
    e2 = embeddings[i2]
    dist = euclidean(e1, e2)
    distances.append(dist)

# convert distances to probability distribution
distances = np.asarray(distances)
Example #16
0
import numpy as np
import sys
from keras.models import load_model
from keras_facenet import FaceNet

BATCH_SIZE = 1000
embedder = FaceNet()

if __name__ == '__main__':
    data = np.load(sys.argv[1])
    X, y = data['arr_0'], data['arr_1']
    print('Loaded X: {}, y: {}'.format(X.shape, y.shape))
    embeddings = []
    for i in range(0, X.shape[0], BATCH_SIZE):
        batch = X[i:i + BATCH_SIZE]
        print(batch.shape)
        batch_emb = np.asarray(embedder.embeddings(batch))
        embeddings.extend(batch_emb)
    np.savez_compressed(sys.argv[2], embeddings, y)
Example #17
0
                         header=None)
    y_test = y_test.values
    return df, y_test


'''
Loading the data set 
'''
X_train_orig, y_train = load_train_data(24)
X_test_orig, y_test = load_test_data(7)
print('X_Train data shape=', X_train_orig.shape)
print('X_Test data shape=', X_test_orig.shape)
print('y_Train data shape=', y_train.shape)
print('y_Test data shape=', y_test.shape)

X_train = embedder.embeddings(X_train_orig)
X_test = embedder.embeddings(X_test_orig)
print('Train embed shape=', X_train.shape)
print('Test embed shape=', X_test.shape)

from sklearn.svm import SVC

from sklearn.linear_model import LogisticRegression

model = LogisticRegression()

model.fit(X_train, y_train)

#Predicted faces
model.predict(X_test)
Example #18
0
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    results = detector.detect_faces(img)
    if (results):
        x1, y1, w, h = results[0]['box']
        x1, y1 = abs(x1), abs(y1)
        x2, y2 = x1 + w, y1 + h
        # extract the face
        face = img[y1:y2, x1:x2]
        # resize pixels to the model size
        image = cv2.resize(face, required_size, cv2.INTER_AREA)
        return image


x = []
for i in glob.glob(r'C:\Users\albertbolt\Downloads\face_dataset\*\*.jpeg'):
    img = cv2.imread(i)
    img = extract_face(img)
    x.append(img)
x = np.stack(x)
embedder = FaceNet()
embeddings = embedder.embeddings(x)
a = []
for root, dirp, file in os.walk(r'C:\Users\albertbolt\Downloads\face_dataset'):
    a.append(dirp)
dictq = {}
for i in range(len(a[0])):
    dictq[a[0][i]] = embeddings[i]
for key, value in dictq.items():
    value.shape = [512, 1]

print(dictq)
Example #19
0
class FaceRegModel(object):
    def __init__(self):
        self.model = MTCNN()
        self.embedder = FaceNet()

    @staticmethod
    def load_from_file(f):
        img_str = f.read()
        nparr = np.fromstring(img_str, np.uint8)
        img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
        return img_np

    def encode_face(self, img):
        location = self.locate_faces(img)[:1]
        face_img = self.extract_faces(img, location)
        return self.embedder.embeddings(face_img)[0]

    def encode_faces(self, img, locations):
        return self.embedder.embeddings(self.extract_faces(img, locations))

    def extract_faces(self, img, locations):
        res_imgs = []
        for (y1, x1, y2, x2) in locations:
            res_imgs.append(img[y1:y2, x2:x1])
        return res_imgs

    def locate_faces(self, img):
        faces = self.model.detect_faces(img)
        faces = [x['box'] for x in faces]
        faces = [(max(y, 0), min(x + w,
                                 img.shape[1]), min(y + h,
                                                    img.shape[0]), max(x, 0))
                 for x, y, w, h in faces]
        return np.array(faces)

    @staticmethod
    def face_distance(face1, face2):
        return np.linalg.norm(face1 - face2, axis=1)

    @staticmethod
    def compare_faces(face1, face2, tolerance=0.9):
        return list(FaceRegModel.face_distance(face1, face2) <= tolerance)

    def process_images(self, images, scale=0.5):
        """Get face locations and encodings from given images."""
        face_information = {}

        for (img_name, img) in images.items():
            # Scale down for faster processing
            small_img = cv2.resize(img, (0, 0), fx=scale, fy=scale)

            # Face detection
            face_locations = self.locate_faces(small_img)
            if len(face_locations) == 0:
                # Ignoring empty
                face_encodings = []
            else:
                # Face extraction
                face_encodings = self.encode_faces(small_img, face_locations)

            face_information[img_name] = {
                'locations': face_locations,
                'encodings': face_encodings
            }

        return face_information

    def find_images_with_person(self, face_encoding, face_information):
        images_with_person = []
        for img_name, face_info in face_information.items():
            face_info_encodings = face_info

            if len(face_info_encodings) == 0:
                continue

            matches = self.compare_faces(face_encoding, face_info_encodings)

            if any(matches):
                images_with_person.append(img_name)

        return images_with_person
    "static/img/din.jpg", "static/img/ani.jpg", "static/img/sidh.jpg"
]
images = []

for i in tqdm(images_path):
    print(path + i)
    img = cv2.imread(path + i)
    img = cv2.resize(img, (480, 480))
    img = numpy.asarray(img)
    images.append(img)

images = np.array(images)
print(images.shape)
# images is a list of images, each as an
# np.ndarray of shape (H, W, 3).
embeddings = embedder.embeddings(images)

import math
print(embeddings.shape)
# print(embeddings[0][:50])
temp1 = 0
temp2 = 0
temp3 = 0
temp4 = 0
temp5 = 0
for i in range(len(embeddings[0])):
    temp1 += ((embeddings[0][i]) - (embeddings[1][i]))  #--same
    temp2 += ((embeddings[0][i]) - (embeddings[2][i]))  #diff
    temp3 += ((embeddings[0][i]) - (embeddings[3][i]))  #diff
    temp4 += ((embeddings[0][i]) - (embeddings[4][i]))  #diff
    temp5 += ((embeddings[0][i]) - (embeddings[5][i]))  #diff
Example #21
0
if __name__ == '__main__':
    with open(sys.argv[1], 'rb') as f:
        (le, model) = pickle.load(f, encoding='latin1')

    embedder = FaceNet()
    directory = sys.argv[2]
    files = sorted(os.listdir(directory))

    for i, filename in enumerate(files):
        path = directory + filename
        faces, d_conf, d_loc, img = detect_faces(path, min_conf)
        d = Draw(img)

        results = []
        for j, face in enumerate(faces):
            rep = embedder.embeddings([face])
            pred = model.predict_proba(rep).ravel()
            maxI = np.argmax(pred)
            confidence = pred[maxI]
            person = le.inverse_transform([maxI])[0]
            result = '{} ({:.2f})'.format(person, confidence)
            d.rectangle(d_loc[j], outline='green')
            d.text(d_loc[j][0],
                   '{}\n detect: {:.2f}'.format(result, d_conf[j]))
            results.append(result)

        print(i, ', '.join(results))

        if sys.argv[3]:
            img.save('{}/{}'.format(sys.argv[3], filename), 'JPEG')
shuffle(_ind)

p1size = int(len(indices) * 0.9)

indices_1, indices_2 = [indices[i] for i in sorted(_ind[:p1size])
                        ], [indices[i] for i in sorted(_ind[p1size:])]
labels_1, labels_2 = [labels[i] for i in sorted(_ind[:p1size])
                      ], [labels[i] for i in sorted(_ind[p1size:])]

batch_size = 128

print("Num siamese pairs", len(indices))

from keras_facenet import FaceNet
face_embedder = FaceNet()
train_embeddings = face_embedder.embeddings(train_images)
train_embeddings = normalize(train_embeddings)

generator = batch_generator(indices=indices_1,
                            labels=labels_1,
                            embeddings=train_embeddings,
                            batch_size=batch_size)

val = val_generator(indices=indices_2,
                    labels=labels_2,
                    embeddings=train_embeddings,
                    batch_size=batch_size)


def lr_schedule(epoch):
    if epoch <= 30:
    NAME = data[:, 515]

    distance = np.array([Norm(vt, i) for i in X])

    min = np.argmin(distance)

    if distance[min] < 0.80:

        return ID[min], NAME[min], distance[min]

    return None, None, None


def is_have_face(id, name):
    pass


if __name__ == "__main__":

    import cv2
    img = cv2.imread("trung.png")

    from keras_facenet import FaceNet
    embedder = FaceNet()

    vt = embedder.embeddings([img])

    id, name, distance = identification(vt[0])

    print(id, name, distance)
    Name.append(file.split(".jpg")[0])
    path_file = os.path.join("person", file)

    img = cv2.imread(path_file)

    faces =  detector.detect_faces(img)

    face = faces[0]
    st = (face['box'][0], face['box'][1])
    en = (face['box'][0] + face['box'][2], face['box'][1] + face['box'][3])
    data.append(img[st[1] : en[1], st[0]: en[0]])

    img_show = img[st[1] : en[1], st[0]: en[0]]

    img_show = cv2.resize(img_show, (200,200))

    cv2.imshow('frame', img_show)

    cv2.waitKey(1000)

vt = embedder.embeddings(data)
print(vt.shape)

df=pd.DataFrame(data=vt, index=[i for i in range(vt.shape[0] )], columns=[str(i) for i in range( vt.shape[1] )])

df['ID'] = ID 
df['NAME'] = Name

df.to_csv("root.csv")
    sorted_path.sort()
    testing_paths = [f"{testing_path}/{path}" for path in sorted_path if path.split(".")[1] == "png"]
    # cropped_testing, cropped_paths_testing = filter_faces(testing_paths)
    # for img in cropped_testing:
    #   img = expand_dims(img, axis=3)

    # print("Getting images...")

    images = get_images_from_filepaths(testing_paths)
    print("Getting embeddings...")

    # FaceNet From keras
    embedder = FaceNet()

    try:
        embeddings = embedder.embeddings(images, verbose=1)
    except:
        embeddings = embedder.embeddings(images)

    dictionary_vectors = {path: embedding for path, embedding in zip(testing_paths, embeddings)}

    # Split Data
    names = []
    for name_index in range(len(testing_paths) // 2):
        names.append(basename(testing_paths[name_index * 2])[:-5])

    pbar = tqdm.tqdm(total=len(names)**2)
    data = {"TP": 0, "FN": 0, "TN": 0, "FP": 0}
    # Distance
    for name1 in names:
        for name2 in names: