Exemplo n.º 1
0
 def __init__(self):
     self.pre_model = create_model()
     self.pre_model.load_weights('./deep/weights/nn4.small2.v1.h5')
     self.alignment = AlignDlib('./deep/models/landmarks.dat')
     self.metadata = None
     self.embedded = None
     self.classifier = None
    def __init__(self):
        dst_dir = 'models'
        dst_file = os.path.join(dst_dir, 'landmarks.dat')

        if not os.path.exists(dst_file):
            os.makedirs(dst_dir)
            download_landmarks(dst_file)

        # Create CNN model and load pretrained weights (OpenFace nn4.small2)
        self.nn4_small2_pretrained = create_model()
        self.nn4_small2_pretrained.load_weights('models/nn4.small2.v1.h5')
        self.metadata = self.load_metadata('faces')

        # Initialize the OpenFace face alignment utility
        self.alignment = AlignDlib('models/landmarks.dat')

        # Get embedding vectorsf
        # self.embedded = np.zeros((self.metadata.shape[0], 128))
        self.embedded = np.zeros((0, 128))

        # Train images
        custom_metadata = self.load_metadata("faces")
        self.metadata = np.append(self.metadata, custom_metadata)
        self.update_embeddings()
        self.train_images()
Exemplo n.º 3
0
    def __init__(self):
        # Initialize the dlib face alignment utility
        self.alignment = AlignDlib('shape_predictor_68_face_landmarks.dat')
        self.detector = dlib.get_frontal_face_detector()
        # Load the model
        self.model = load_model('weights_final.hdf5',
                                custom_objects={
                                    'triplet_loss': self.triplet_loss,
                                    'tf': tf
                                })

        # Get the web camera feed
        self.cap = cv2.VideoCapture(0)

        # Defining variables
        self.threshold = 0.7
        self.base_images = []
        self.distances = []
        self.set_new_person = False
        self.saving = False
        self.pressed = 0
        self.next_base_image = 0
        self.names = []
        self.saved_images = []
        self.counter = 0

        # Defining the path for image saving
        self.path = os.getcwd() + '\persons'
        # Delete previous directories
        if os.path.isdir(self.path):
            shutil.rmtree(self.path, onerror=self.onerror)
        # Create the 'persons' directory
        if not os.path.isdir(self.path):
            os.mkdir(self.path)
    def __init__(self):
        dst_dir = 'models'
        dst_file = os.path.join(dst_dir, 'landmarks.dat')

        if not os.path.exists(dst_file):
            os.makedirs(dst_dir, exist_ok=True)
            url = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
            decompressor = bz2.BZ2Decompressor()
            with urlopen(url) as src, open(dst_file, 'wb') as dst:
                data = src.read(1024)
                while len(data) > 0:
                    dst.write(decompressor.decompress(data))
                    data = src.read(1024)
        # download_landmarks(self,dst_file)
        # Create CNN model and load pretrained weights (OpenFace nn4.small2)
        self.nn4_small2_pretrained = create_model()
        self.nn4_small2_pretrained.load_weights('models/nn4.small2.v1.h5')
        self.metadata = self.load_metadata('faces')

        # Initialize the OpenFace face alignment utility
        self.alignment = AlignDlib('models/landmarks.dat')

        # Get embedding vectorsf
        # self.embedded = np.zeros((self.metadata.shape[0], 128))
        self.embedded = np.zeros((0, 128))

        # Train images
        custom_metadata = self.load_metadata("faces")
        self.metadata = np.append(self.metadata, custom_metadata)
        self.update_embeddings()
        self.train_images()
Exemplo n.º 5
0
class DeepFace:
    def __init__(self):
        self.pre_model = create_model()
        self.pre_model.load_weights('./deep/weights/nn4.small2.v1.h5')
        self.alignment = AlignDlib('./deep/models/landmarks.dat')
        self.metadata = None
        self.embedded = None
        self.classifier = None

    def pre_train(self, path):

        self.metadata = load_metadata(path)
        self.embedded = np.zeros((self.metadata.shape[0], 128))

        for i, m in enumerate(self.metadata):
            img = load_image(m.image_path())
            img = self.align_image(img)
            if img is None:
                continue
            # scale RGB values to interval [0,1]
            img = (img / 255.).astype(np.float32)
            # obtain embedding vector for image
            self.embedded[i] = self.pre_model.predict(
                np.expand_dims(img, axis=0))[0]

        targets = np.array([m.name for m in self.metadata])

        #   LabelEncoder可以将标签分配一个0—n_classes - 1 之间的编码
        self.encoder = LabelEncoder()
        self.encoder.fit(targets)
        # 将各种标签分配一个可数的连续编号
        self.y = self.encoder.transform(targets)

    def align_image(self, img):
        return self.alignment.align(
            96,
            img,
            self.alignment.getLargestFaceBoundingBox(img),
            landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)

    def train(self, classifier=LinearSVC):
        X_train = self.embedded
        y_train = self.y

        self.classifier = globals()[classifier]()
        self.classifier.fit(X_train, y_train)

    def predict(self, img):
        img = self.align_image(img)
        if img is None:
            return str("UnKnown")
        img = (img / 255.).astype(np.float32)
        one_embedded = self.pre_model.predict(np.expand_dims(img, axis=0))[0]
        example_prediction = self.classifier.predict([one_embedded])
        example_identity = self.encoder.inverse_transform(
            example_prediction)[0]
        return str(example_identity)
Exemplo n.º 6
0
def load_image_alignment(path):
    alignment = AlignDlib('models/landmarks.dat')
    img = cv2.imread(path, 1)
    # detect face and return bounding box
    bb = alignment.getLargestFaceBoundingBox(img)
    jc_aligned = alignment.align(96,
                                 img,
                                 bb,
                                 landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
    return jc_aligned
Exemplo n.º 7
0
def predict():
    """
    Extracts from webcam feed and passes through served model and ML models to recognise faces
    :return: Name of the Person
    """
    
    #Load served models
	channel = implementations.insecure_channel(host, int(port))
	stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
	request_pb2 = predict_pb2.PredictRequest()
	request_pb2.model_spec.name = 'my_model'
	request_pb2.model_spec.signature_name = 'predict' 

    #Extract image from webcam and convrt it to required format
	imgData = request.get_data()
	convertImage(imgData)
	im_orig = cv2.imread('app/output.png', 1)
	im_orig = im_orig[...,::-1]
		
    #Align image
	alignment = AlignDlib('app/landmarks.dat')
	bb = alignment.getAllFaceBoundingBoxes(im_orig)
	im_aligned = []
	n = []
	for i in bb:
		j = alignment.align(96, im_orig, i, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
		im_aligned.append(j)   

	for n in im_aligned:
		n = (n / 255.).astype(np.float32)
		new_n = np.expand_dims(n, axis=0)

    #pass the face cropped images through the server DL model
	request_pb2.inputs['images'].CopyFrom(tf.contrib.util.make_tensor_proto(n, shape=[1, 96, 96, 3]))
	result_future = stub.Predict(request_pb2, 10.) 
	embedded_t = tf.make_ndarray(result_future.outputs['scores']) 
    
    #Pass the extracted embeddings into ML models to get the nearest neighbour 
	example_predic = knn.predict(embedded_t)
	example_prob = svc.predict_proba(embedded_t)
	print(example_prob)
    
	if np.any(example_prob > 0.35):
		example_i = encoder.inverse_transform(example_predic)[0]
		print(example_i)
	else:
		print("Not a face from database...")
        
	maximum = np.max(example_prob, axis=1)
	index_of_maximum = np.where(example_prob == maximum)
	cdsid = {0:'P0',  1:'P1',  2:'P2',  3:'P3',  4:'P4',  5:'P5',  6:'P6',  7:'P7',  8:'P8',  9:'P9',  10:'P10',  11:'P11',  12:'P12',  13:'P13',  14:'P14',  15:'P15',  16:'P16',  17:'P17',  18:'P18',  19:'P19', 20:'P20'}
    
	print(cdsid[float(index_of_maximum[1])])
	response = jsonify(cdsid[float(index_of_maximum[1])])
	return response
Exemplo n.º 8
0
 def __init__(self, anchors, name):
     nn4_small2_pretrained = create_model()
     nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')
     self.nn4_small2_pretrained = nn4_small2_pretrained
     self.alignment = AlignDlib('models/landmarks.dat')
     anchors = [(img / 255.).astype(np.float32) for img in anchors]
     anchors_embeddings = [
         self.nn4_small2_pretrained.predict(np.expand_dims(anchor,
                                                           axis=0))[0]
         for anchor in anchors
     ]
     self.anchors_embeddings = anchors_embeddings
     self.name = name
Exemplo n.º 9
0
def main_2():
    # Initialize the OpenFace face alignment utility
    alignment = AlignDlib('models/landmarks.dat')

    cap = cv2.VideoCapture(0)

    ret, frame = cap.read()
    while (True):

        #b,g,r = cv2.split(frame)
        #frame = cv2.merge((r,g,b))

        #frame = frame[...,::-1]

        #frame = cv2.resize(frame, (250,250))

        # cv2.imwrite('img.jpg', frame)

        frame = load_image('img.jpg')

        bb = alignment.getLargestFaceBoundingBox(frame)

        # Transform image using specified face landmark indices and crop image to 96x96
        jc_aligned = alignment.align(
            96, frame, bb, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)

        # Show original image
        plt.subplot(131)
        plt.imshow(frame)

        # Show original image with bounding box
        plt.subplot(132)
        plt.imshow(frame)
        plt.gca().add_patch(
            patches.Rectangle((bb.left(), bb.top()),
                              bb.width(),
                              bb.height(),
                              fill=False,
                              color='red'))

        # Show aligned image
        plt.subplot(133)
        plt.imshow(jc_aligned)

        plt.show()

        if cv2.waitKey(1) & 0xFF == ord('y'):  #save on pressing 'y'
            cv2.destroyAllWindows()
            break
        _, frame = cap.read()
Exemplo n.º 10
0
def init_sequential_model():
    global model, alignment, df_train, train_paths

    model = create_squential_model()

    mnist = tf.keras.datasets.mnist

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    NAME = 'CNN-{}'.format(int(time.time()))
    tboard_log_dir = os.path.join("logs", NAME)
    tensorboard_callback = TensorBoard(log_dir=tboard_log_dir,
                                       histogram_freq=1)

    model.fit(x=x_train,
              y=y_train,
              epochs=5,
              validation_data=(x_test, y_test),
              callbacks=[tensorboard_callback])

    alignment = AlignDlib('weights/shape_predictor_68_face_landmarks.dat')
Exemplo n.º 11
0
def normalization(metadata):
    def load_image(path):
        img = cv2.imread(path, 1)
        return img[..., ::-1]
        
    def align_image(img):
        return alignment.align(96, img, alignment.getLargestFaceBoundingBox(img),
                               landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)

    alignment = AlignDlib('models/landmarks.dat')
    embedded = np.zeros((metadata.shape[0], 128))
    
    for i, m in enumerate(metadata):
        # noinspection PyBroadException
       try:
            img = load_image(m.image_path())
            img = align_image(img)
            img = (img / 255.).astype(np.float32)
            embedded[i] = nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]
            recognize_flag = 1
            
            del img,metadata
            
       except Exception as e:
            recognize_flag = 0
    return recognize_flag, embedded,i
def setup_landmarks():
    dst_dir = 'models'
    dst_file = os.path.join(dst_dir, 'landmarks.dat')

    if not os.path.exists(dst_file):
        os.makedirs(dst_dir)
        download_landmarks(dst_file)


    alignment = AlignDlib('models/landmarks.dat')
    return alignment
Exemplo n.º 13
0
    def __init__(self, metadata):
        self.nn4_small2_pretrained = create_model()
        self.nn4_small2.load_weights('weights\nn4.small2.v1.h5')
        self.alignment = AlignDlib('models/landmarks.dat')
        self.svm_model = LinearSVC()

        self.recognizer = np.zeros((metadata.shape[0]), 128)

        embed_vec(metadata, self.recognizer)
        
        train_recog(metadata, self.recognizer, self.svm_model)
def plot_sample():
    metadata = load_metadata('../data/chapter7/images/')

    # 初始化OpenFace人脸对齐工具,使用Dlib提供的68个关键点
    alignment = AlignDlib('../data/chapter7/landmarks.dat')
    # 加载一张训练图像
    img = load_image(metadata[45].get_image_path())
    # 检测人脸并返回边框
    bounding_box = alignment.getLargestFaceBoundingBox(img)
    # 使用指定的人脸关键点转换图像并截取96*96的人脸图像
    aligned_img = alignment.align(96, img, bounding_box, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)

    # 绘制原图
    plt.subplot(1, 3, 1)
    plt.imshow(img)
    plt.xticks([])
    plt.yticks([])

    # 绘制带人脸边框的原图
    plt.subplot(1, 3, 2)
    plt.imshow(img)
    plt.gca().add_patch(patches.Rectangle(
        (bounding_box.left(), bounding_box.top()),
        bounding_box.width(),
        bounding_box.height(),
        fill=False,
        color='red'
    ))
    plt.xticks([])
    plt.yticks([])

    # 绘制对齐后截取的96*96人脸图像
    plt.subplot(1, 3, 3)
    plt.imshow(aligned_img)
    plt.xticks([])
    plt.yticks([])
    plt.show()
def create_pretrain_model():
    nn4_small2 = create_model()
    nn4_small2.load_weights(MODEL_FILE_H5)
    alignment = AlignDlib(LANDMARKER_FILE)

    def align_image(img):
        return alignment.align(96,
                               img,
                               alignment.getLargestFaceBoundingBox(img),
                               landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)

    metadata = load_metadata(IMAGE_DIR)
    embedded = np.zeros((metadata.shape[0], 128))

    for i, m in enumerate(metadata):
        img = load_image(m.get_image_path())
        img = align_image(img)
        # 数据规范化
        img = (img / 255.).astype(np.float32)
        # 获取人脸特征向量
        embedded[i] = nn4_small2.predict(np.expand_dims(img, axis=0))[0]
        print('Process', i, m, 'Finish')

    def distance(emb1, emb2):
        return np.sum(np.square(emb1 - emb2))

    def show_pair(idx1, idx2):
        plt.figure(figsize=(8, 3))
        plt.suptitle(
            f'Distance = {distance(embedded[idx1], embedded[idx2]):.2f}')
        plt.subplot(121)
        plt.imshow(load_image(metadata[idx1].get_image_path()))
        plt.xticks([])
        plt.yticks([])
        plt.subplot(122)
        plt.imshow(load_image(metadata[idx2].get_image_path()))
        plt.xticks([])
        plt.yticks([])

    show_pair(1, 45)
    show_pair(28, 47)
    show_pair(46, 47)
    plt.show()
    return embedded
Exemplo n.º 16
0
def normalization_Flask(img):
    def load_image_Flask(img):
        return img[..., ::-1]

    def align_image_Flask(img):
        return alignment.align(96, img, alignment.getLargestFaceBoundingBox(img),
                               landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)

    alignment = AlignDlib('models/landmarks.dat')
    embedded = np.zeros((1, 128))

    try:
        img = load_image_Flask(img)
        img = align_image_Flask(img)
        img = (img / 255.).astype(np.float32)
        embedded= nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]
        recognize_flag = 1

    except Exception as e:
        recognize_flag = 0
    return recognize_flag, embedded
def create_pretrain_model():
    nn4_small2 = load_model(MODEL_FILE_H5, custom_objects={'tf': tf})
    alignment = AlignDlib(LANDMARKER_FILE)

    def align_image(img):
        return alignment.align(96,
                               img,
                               alignment.getLargestFaceBoundingBox(img),
                               landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)

    metadata = load_metadata(IMAGE_DIR)
    embedded = np.zeros((metadata.shape[0], 128))

    for i, m in enumerate(metadata):
        img = load_image(m.get_image_path())
        img = align_image(img)
        # 数据规范化
        img = (img / 255.).astype(np.float32)
        # 获取人脸特征向量
        embedded[i] = nn4_small2.predict(np.expand_dims(img, axis=0))[0]
        print('Process', i, m, 'Finish')

    return embedded
Exemplo n.º 18
0
import numpy
from PIL import Image
import time
import cv2
import dlib
from align import AlignDlib
import time
import numpy as np

data_path = '/home/dl-linux/Desktop/face_recognition/CASIA-maxpy-clean/'
data_save_path = '/home/dl-linux/Desktop/face_recognition/save/'

if not os.path.isdir(data_save_path):
    os.makedirs(data_save_path)

alignment = AlignDlib('models/landmarks.dat')


def align_image(img):
    bb = alignment.getLargestFaceBoundingBox(img)
    if bb is None:
        #print 'OMG'
        bb = dlib.rectangle(int(46), int(47), int(201), int(202))
    #print 'bb:'+ str(type(bb))
    return alignment.align(250,
                           img,
                           bb,
                           landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE), bb


i = 0
Exemplo n.º 19
0
from model import create_model
from align import AlignDlib
import glob
import imutils
import os

# INITIALIZE MODELS
nn4_small2 = create_model()

nn4_small2.summary()

# load dữ liệu để tối ưu mô hình cnn
nn4_small2.load_weights('weights/nn4.small2.v1.h5')

# bộ dữ liệu 68 điểm đặc trưng để căn chỉnh mặt.
alignment = AlignDlib('shape_predictor_68_face_landmarks.dat')

# LOAD Image Data
train_paths = glob.glob("image/*")
print(train_paths)
train_paths_v2 = glob.glob("image/csdl/*")
nb_classes = len(train_paths)

df_train = pd.DataFrame(columns=['index', 'image', 'label', 'name'])

index = 0
for i, train_path in enumerate(train_paths):
    name = train_path.split("\\")[-1]
    images = glob.glob(train_path + "/*")
    for image in images:
        df_train.loc[len(df_train)] = [index, image, i, name]
import cv2
#import matplotlib.pyplot as plt
#import matplotlib.patches as patches

from align import AlignDlib

#get_ipython().run_line_magic('matplotlib', 'inline')

def load_image(path):
    img = cv2.imread(path, 1)
    # OpenCV loads images with color channels
    # in BGR order. So we need to reverse them
    return img[...,::-1]

# Initialize the OpenFace face alignment utility
alignment = AlignDlib('models/landmarks.dat')

# Load an image of Jacques Chirac
jc_orig = load_image(metadata[6].image_path())

# Detect face and return bounding box
bb = alignment.getLargestFaceBoundingBox(jc_orig)

# Transform image using specified face landmark indices and crop image to 96x96
jc_aligned = alignment.align(96, jc_orig, bb, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)

# Show original image
# plt.subplot(131)
# plt.imshow(jc_orig)

# # Show original image with bounding box
def load_image(path):
    img = cv2.imread(path, 1)
    # OpenCV 默认使用 BGR 通道加载图像,转换为 RGB 图像
    return img[..., ::-1]


metadata = load_metadata('images')

# 人脸检测、对齐和提取
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from align import AlignDlib

# 初始化 OpenFace 人脸对齐工具,使用 Dlib 提供的 68 个关键点
alignment = AlignDlib('landmarks.dat')

# 加载一张训练图像
img = load_image(metadata[0].image_path())
# 检测人脸并返回边框
bb = alignment.getLargestFaceBoundingBox(img)
# 使用指定的人脸关键点转换图像并截取 96x96 的人脸图像
aligned_img = alignment.align(96,
                              img,
                              bb,
                              landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
# 绘制原图
# plt.figure(1)
# plt.subplot(131)
# plt.imshow(img)
# plt.xticks([])
Exemplo n.º 22
0
from keras.models import load_model
from face_recognition1 import find_id
from eye_detect_recog_final import eye_ext
from model import create_model
import os

start=time.time()
from align import AlignDlib

def sortKeyFunc(s):
    return int(os.path.basename(s)[6:-4])

files = sorted(glob(r"E:\projectImplementation\projectFiles\frames\testcase1\*.jpg"))
files.sort(key=sortKeyFunc)
# print(os.path.basename(files[1])[6:-4])
alignment = AlignDlib('models/landmarks.dat')
eye_cascade_left= cv2.CascadeClassifier("E:\\projectImplementation\\projectFiles\\left_eye_haar.xml")
trainedModel = load_model('my-model1-20190419-230910.h5')

def load_image(path):
    img = cv2.imread(path, 1)
    im = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    return im

i=1
abhi_att=[]
aman_att=[]
anomaly=[]
abhi_emotion=[]
aman_emotion=[]
for file in files:
    if choice == 3:
        # Rotate 180 and flip horizontally
        src = cv2.rotate(src, rotateCode=cv2.ROTATE_180)
        src = cv2.flip(src, flipCode=1)
    if choice == 4:
        # Rotate 90 counter-clockwise
        src = cv2.rotate(src, rotateCode=cv2.ROTATE_90_COUNTERCLOCKWISE)
    if choice == 5:
        # Rotate 90 counter-clockwise and flip horizontally
        src = cv2.rotate(src, rotateCode=cv2.ROTATE_90_COUNTERCLOCKWISE)
        src = cv2.flip(src, flipCode=1)
    return src


alignment = AlignDlib(
    '/Users/siddhartham/Sid/PycharmProjects/Deep-Face-Recognition-Using-Inception-Model-Keras-OpenCV-Dlib/models/landmarks.dat'
)


def align_image(img):
    return alignment.align(96,
                           img,
                           alignment.getLargestFaceBoundingBox(img),
                           landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)


from glob import glob


def assure_path_exists(path):
    dir = os.path.dirname(path)
import os
import numpy as np
import cv2
import dlib
from align import AlignDlib
from model import create_model
import pickle

alignment = AlignDlib('models/landmarks.dat')
model = '/home/legion/Documents/DeepLearning/face-recognition/Caffe model/res10_300x300_ssd_iter_140000.caffemodel'
prototxt = '/home/legion/Documents/DeepLearning/face-recognition/Caffe model/deploy.prototxt.txt'
nn4_small2_pretrained = create_model()
nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')

knn = pickle.load(open('knn_model.sav', 'rb'))
svc = pickle.load(open('svc_model.sav', 'rb'))

# landmark indices
INNER_EYES_AND_BOTTOM_LIP = [39, 42, 57]
OUTER_EYES_AND_NOSE = [36, 45, 33]

net = cv2.dnn.readNetFromCaffe(prototxt, model)
"""
def recognise(img):
    (h, w) = img.shape[:2]
    net.setInput(blob)
    blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0,
    (300, 300), (104.0, 177.0, 123.0))
    detections = net.forward()
    for i in range(0, detections.shape[2]):
    	confidence = detections[0, 0, i, 2]
Exemplo n.º 25
0
class WebcamTest():
    def __init__(self):
        # Initialize the dlib face alignment utility
        self.alignment = AlignDlib('shape_predictor_68_face_landmarks.dat')
        self.detector = dlib.get_frontal_face_detector()
        # Load the model
        self.model = load_model('weights_final.hdf5',
                                custom_objects={
                                    'triplet_loss': self.triplet_loss,
                                    'tf': tf
                                })

        # Get the web camera feed
        self.cap = cv2.VideoCapture(0)

        # Defining variables
        self.threshold = 0.7
        self.base_images = []
        self.distances = []
        self.set_new_person = False
        self.saving = False
        self.pressed = 0
        self.next_base_image = 0
        self.names = []
        self.saved_images = []
        self.counter = 0

        # Defining the path for image saving
        self.path = os.getcwd() + '\persons'
        # Delete previous directories
        if os.path.isdir(self.path):
            shutil.rmtree(self.path, onerror=self.onerror)
        # Create the 'persons' directory
        if not os.path.isdir(self.path):
            os.mkdir(self.path)

    def onerror(self, func, path, exc_info):
        """
        Error handler for `shutil.rmtree`.

        If the error is due to an access error (read only file)
        it attempts to add write permission and then retries.

        If the error is for another reason it re-raises the error.

        Usage : `shutil.rmtree(path, onerror=onerror)`
        """
        import stat
        if not os.access(path, os.W_OK):
            # Is the error an access error ?
            os.chmod(path, stat.S_IWUSR)
            func(path)
        else:
            raise

    # take a bounding predicted by dlib and convert it
    # to the format (x, y, w, h) as we would normally do
    # with OpenCV
    def rect_to_bb(self, rect):
        x = rect.left()
        y = rect.top()
        w = rect.right() - x
        h = rect.bottom() - y

        # return a tuple of (x, y, w, h)
        return (x, y, w, h)

    # The model calculates the distance between the two images
    def prediction(self, network, pic_1, pic_2):
        # increase img dimensions
        img1 = np.expand_dims(pic_1, axis=0)
        img1 = np.expand_dims(img1, axis=3)
        img2 = np.expand_dims(pic_2, axis=0)
        img2 = np.expand_dims(img2, axis=3)
        # calculate the network's prediction on img1 and img2
        preds = network.predict([img1, img2, img1])[0]
        pred1 = preds[:128]
        pred2 = preds[128:256]
        # calculate the distance between the two images
        dist = np.sum(np.square(pred1 - pred2))
        # print("distance between pictures: {:2.4f}".format(dist))
        return dist

    def triplet_loss(self, y_true, y_pred, alpha=0.2):
        """
        Implementation of the triplet loss function
        Arguments:
        y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
        y_pred -- python list containing three objects:
                anchor -- the encodings for the anchor data
                positive -- the encodings for the positive data (similar to anchor)
                negative -- the encodings for the negative data (different from anchor)
        Returns:
        loss -- real number, value of the loss
        """

        anchor = y_pred[:, :128]
        positive = y_pred[:, 128:256]
        negative = y_pred[:, 256:]

        # distance between the anchor and the positive
        pos_dist = K.sum(K.square(anchor - positive), axis=1)

        # distance between the anchor and the negative
        neg_dist = K.sum(K.square(anchor - negative), axis=1)

        # compute loss
        basic_loss = pos_dist - neg_dist + alpha
        loss = K.maximum(basic_loss, 0.0)

        return loss

    # Aligns the given image
    def image_processing(self, image):
        # Detect face and return bounding box
        rect = self.alignment.getLargestFaceBoundingBox(image)
        img_aligned = self.alignment.align(
            64,
            image,
            rect,
            landmarkIndices=AlignDlib.INNER_EYES_AND_BOTTOM_LIP)
        return img_aligned

    # Saves the given image with the given name
    def save_pictures(self, image, path, name, extension):
        img_item = path + '\\' + name + "." + extension
        cv2.imwrite(img_item, image)

    # The main part of the class
    def cycle(self):
        # Capture frame-by-frame
        ret, frame = self.cap.read()
        # Turn the image into a greyscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # Detect the faces
        rects = self.detector(gray, 1)
        for rect in rects:
            (x, y, w, h) = self.rect_to_bb(rect)
            roi_gray = gray[y:y + h, x:x + w]
            roi_color = frame[y:y + h, x:x + w]

            # Align the image
            aligned_image = self.alignment.align(
                64,
                gray,
                rect,
                landmarkIndices=AlignDlib.INNER_EYES_AND_BOTTOM_LIP)

            # If the alignment was succesful
            if aligned_image is not None:
                # If there is a new recording save the recorded images (the saving part is done in the 'webacm_app.py')
                if self.set_new_person:
                    self.counter += 1
                    if self.counter % 10 == 0:
                        if len(self.saved_images) < 10:
                            self.saved_images.append(aligned_image)
                        else:
                            self.counter = 0
                # Create a directory for the new person and save the person's images
                if self.saving:
                    self.path_person = self.path + '\\' + self.names[-1]
                    os.mkdir(self.path_person)
                    for (i, picture) in enumerate(self.base_images[-1]):
                        self.save_pictures(picture, self.path_person,
                                           self.names[-1] + '_' + str(i),
                                           'jpg')
                    self.saving = False
                # Calculate the average distance from the save distance
                for i in range(len(self.base_images)):
                    average_distance = 0
                    for base_image in self.base_images[i]:
                        average_distance += self.prediction(
                            self.model, aligned_image / 255.0,
                            base_image / 255.0)
                    self.distances[i] = average_distance / len(
                        self.base_images[i])
            # If the  picture alignment was unsucessful, then the distance belonging to the picture is four
            else:
                for i in range(len(self.distances)):
                    self.distances[i] = 4
            if self.set_new_person:
                color = (0, 255, 0)
            else:
                color = (255, 0, 0)  # BGR
            stroke = 2
            end_cord_x = x + w
            end_cord_y = y + h
            cv2.rectangle(frame, (x, y), (end_cord_x, end_cord_y), color,
                          stroke)
            min_index = 0
            # Calculate the minimum distances from all person
            for (i, distance) in enumerate(self.distances):
                if distance < self.distances[min_index]:
                    min_index = i
            # Display the distance values on the feed
            if len(self.distances
                   ) > 0 and self.distances[min_index] < self.threshold:
                font = cv2.FONT_HERSHEY_SIMPLEX
                name = self.names[min_index] + "{:2.2f}".format(
                    self.distances[min_index])
                color = (255, 255, 255)
                stroke = 1
                cv2.putText(frame, name, (x, y), font, 1, color, stroke,
                            cv2.LINE_AA)
            else:
                font = cv2.FONT_HERSHEY_SIMPLEX
                color = (255, 255, 255)
                stroke = 1
                if len(self.distances) > 0:
                    name = "{:2.2f}".format(self.distances[min_index])
                    cv2.putText(frame, name, (x, y), font, 1, color, stroke,
                                cv2.LINE_AA)

        return frame
Exemplo n.º 26
0
# It aligns the faces from the pictures. When a picture can not be processed by
# the aligner, the picture is substituted with a previously cropped picture
# downloaded from the 'http://conradsanderson.id.au/lfwcrop/' website
#################################################################################

# This library let's us work with hdf5 format files.
import tables
# Used for image processing and face detection.
import cv2

# Import the necessary packages for face aligning
import dlib
from align import AlignDlib

# Initialize the OpenFace face alignment utility
alignment = AlignDlib('shape_predictor_68_face_landmarks.dat')

# Creating the HDF5 file.
img_dtype = tables.UInt8Atom()
data_shape = (0, 64, 64)
hdf5_path = 'dataset_lfw.hdf5'
hdf5_write = tables.open_file(hdf5_path, mode='w')
storage = hdf5_write.create_earray(hdf5_write.root, 'images', img_dtype, shape=data_shape)

# Defining lists needed for the algorithm
y_labels = []
y_storage = []
x_images = []

# Load the dataset made by the 'setMaker.py' program
hdf5_path = 'dataset_lfw_set.hdf5'
Exemplo n.º 27
0
metadata = load_metadata('images')

# Creating CNN model 
nn4_small2_pretrained = create_model()

print('1. Model Created Successfully')


#Loading pre-trained weights
nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')

print('2. Weights Loaded Successfully')


# Initialize the OpenFace face alignment utility
alignment = AlignDlib('models/landmarks.dat')

print('3. Openface utility intialized Successfully')


#Generating the embedding for dataset images
embedded = np.zeros((metadata.shape[0], 128))

for i, m in enumerate(metadata):
    img = load_image(m.image_path())
    img = align_image(img)

    # scale RGB values to interval [0,1]
    img = (img / 255.).astype(np.float32)

    # obtain embedding vector for image
"""
Created on Sat Dec 14 00:01:58 2019

@author: Bharath
"""

import cv2
import numpy as np
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from align import AlignDlib
import dlib
from model import create_model
import os

alignment = AlignDlib('shape_predictor_68_face_landmarks.dat')
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

nn4_small2_pretrained = create_model()
nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')


def distance(emb1, emb2):
    return np.sum(np.square(emb1 - emb2))


embeddeds = np.zeros((128, 1))

for filename in os.listdir('employees'):
    print(filename)
    img_path = 'employees/' + filename
Exemplo n.º 29
0
def findd():
    import cv2
    import matplotlib.pyplot as plt
    import matplotlib.patches as patches
    import os
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    import PIL.Image as Image
    import numpy as np
    from model import create_model
    from keras import backend as K
    import tensorflow as tf
    from align import AlignDlib
    from keras import backend as K

    tf.keras.backend.clear_session()
    K.clear_session()

    alignment = AlignDlib('models/landmarks.dat')

    # img= load_image('E:\\projectImplementation\\projectFiles\\imageee.png')
    img = cv2.imread('E:\\projectImplementation\\projectFiles\\imageee.png', 1)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    bb = alignment.getLargestFaceBoundingBox(img)

    im_aligned = alignment.align(96,
                                 img,
                                 bb,
                                 landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)

    anjani = np.load('anjani.npy')
    aman = np.load('aman.npy')
    rapaka = np.load('rapaka.npy')
    akc = np.load('akc.npy')
    abhi = np.load('abhishek.npy')
    hemanth = np.load('hemanth.npy')
    subham = np.load('subham.npy')
    vivek = np.load('vivek.npy')
    nn4_small2_pretrained = create_model()
    nn4_small2_pretrained.load_weights('nn4.small2.v1.h5')
    im_aligned = (im_aligned / 255.).astype(np.float32)
    im_aligned = np.expand_dims(im_aligned, axis=0)
    embedded = nn4_small2_pretrained.predict(im_aligned)[0]
    # print(embedded)
    #     tf.keras.backend.clear_session()
    # K.clear_session()
    distances = []
    distances.append(np.linalg.norm(embedded - anjani))
    distances.append(np.linalg.norm(embedded - abhi))
    distances.append(np.linalg.norm(embedded - subham))
    distances.append(np.linalg.norm(embedded - rapaka))
    distances.append(np.linalg.norm(embedded - hemanth))
    distances.append(np.linalg.norm(embedded - aman))
    distances.append(np.linalg.norm(embedded - akc))
    fin = min(distances)
    if (fin == distances[0]):
        return "Anjani"
    if (fin == distances[1]):
        return "Abhishek"
    if (fin == distances[2]):
        return "Subham"
    if (fin == distances[3]):
        return "Rapaka"
    if (fin == distances[4]):
        return "Hemanth"
    if (fin == distances[5]):
        return "Aman"
    if (fin == distances[6]):
        return "Akshay"
Exemplo n.º 30
0
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import os
from align import AlignDlib

# img_path = 'images/Arnold_Schwarzenegger/Arnold_Schwarzenegger_0002.jpg'
# img_path =  'data/n000149/0391_01.jpg'
path = 'vggface2_test/test'
faces = os.listdir(path)
out_path = 'vggface2_test/aligned/'



# Initialize the OpenFace face alignment utility
alignment = AlignDlib('models/landmarks.dat')


for face in faces:
	os.system('mkdir ' + out_path+face)
	pics = os.listdir(path+face)
	for pic in pics:
		img = cv2.imread(path+face+'/'+pic, 1)
		bb = alignment.getLargestFaceBoundingBox(img)
		# img_aligned = alignment.align(96, img, bb, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
		if img_aligned is not None:
			cv2.imshow('img',img)
			cv2.
			# cv2.imwrite(out_path+face+'/'+pic, img_aligned)
		print(pic)