示例#1
0
 def __init__(self, db_name):
     """
     :string db_name: name for sqlite file
     """
     self.embedder = keras_facenet.FaceNet()
     self.db_name = db_name
     self._load_db()
示例#2
0
def run_test(model_name):
    download_and_extract.download_and_extract_file(
        model_name=model_name,
        data_dir=facenet_data_dir
    )
    kfn = keras_facenet.FaceNet(
        key=model_name,
        cache_folder=keras_facenet_data_dir
    )
    emb_kfn = kfn.embeddings(images)
    with tf.Graph().as_default():
        with tf.compat.v1.Session() as sess:
            load_model(os.path.join(facenet_data_dir, model_name))
            images_placeholder = tf.compat.v1.get_default_graph().get_tensor_by_name("input:0")  # noqa: E501
            embeddings = tf.compat.v1.get_default_graph().get_tensor_by_name("embeddings:0")  # noqa: E501
            phase_train_placeholder = tf.compat.v1.get_default_graph().get_tensor_by_name("phase_train:0")  # noqa: E501
            if kfn.metadata['fixed_image_standardization']:
                X = (images - 127.5) / 127.5
            else:
                X = np.array([facenet.prewhiten(image) for image in images])
            feed_dict = {images_placeholder: X, phase_train_placeholder: False}  # noqa: E501
            emb = sess.run(embeddings, feed_dict=feed_dict)
    print('Comparing results for', model_name)
    np.testing.assert_almost_equal(emb, emb_kfn)
示例#3
0
import torch.backends.cudnn as cudnn
import numpy as np
import dlib
from imutils.face_utils import FaceAligner
import keras_facenet
from keras.models import load_model
from fer import FER
from mtcnn.mtcnn import MTCNN

palette = (2**11 - 1, 2**15 - 1, 2**20 - 1)

gender_list = ['Male', 'Female']
age_list = ['0-2', '3-9', '10-20', '21-27', '28-45', '46-65', '66-100']
detector = FER()
face_detector = MTCNN()
embedder = keras_facenet.FaceNet()
gendict = dict()

age_model = load_model('./models/age.h5')
gender_model = load_model('./models/gender.h5')


def getemo(img):
    x = []
    try:
        x = detector.detect_emotions(img)
    except:
        pass
    if len(x) > 0:
        emo = max(x[0]['emotions'], key=x[0]['emotions'].get)
    else:
示例#4
0
import keras_facenet

base_model = keras_facenet.FaceNet().model

from keras.models import Model, Sequential, load_model
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator


def get_model():
    x = base_model.output
    out = Dense(7, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=out)

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional VGG16 layers
    for layer in base_model.layers:
        layer.trainable = False

    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=["acc"])

    return model


def fit(folder, batch_size):
    datagen = ImageDataGenerator(horizontal_flip=True,\
        validation_split = 0.1, rescale=1/255, rotation_range=65,
        zoom_range=[0.5, 1], height_shift_range=0.2)