Пример #1
0
def load_model(model):
    """Loads the FaceNet model from its directory path.
    Checks if the model is a model directory (containing a metagraph and a checkpoint file)
    or if it is a protocol buffer file with a frozen graph.
    Note: This is a modified function from the facenet.py load_model() function in the lib directory to return
    the graph object.
    Args:
        model: model path
    Returns:
        graph: Tensorflow graph object of the model
    """

    model_exp = os.path.expanduser(model)
    if os.path.isfile(model_exp):
        print('Model filename: %s' % model_exp)
        with gfile.FastGFile(model_exp, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            graph = tf.import_graph_def(graph_def, name='')
            return graph
    else:
        print('Model directory: %s' % model_exp)
        meta_file, ckpt_file = get_model_filenames(model_exp)

        print('Metagraph file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)

        saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
        graph = saver.restore(tf.get_default_session(),
                              os.path.join(model_exp, ckpt_file))
        return graph
Пример #2
0
def main(args):
    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model metagraph and checkpoint
            print("Model directory: %s" % args.model_dir)
            meta_file, ckpt_file = fc.get_model_filenames(os.path.expanduser(args.model_dir))

            print("Metagraph file: %s" % meta_file)
            print("Checkpoint file: %s" % ckpt_file)

            model_dir_exp = os.path.expanduser(args.model_dir)
            saver = tf.train.import_meta_graph(os.path.join(model_dir_exp, meta_file), clear_devices=True)
            tf.get_default_session().run(tf.global_variables_initializer())
            tf.get_default_session().run(tf.local_variables_initializer())
            saver.restore(tf.get_default_session(), os.path.join(model_dir_exp, ckpt_file))

            # Retrieve the protobuf graph definition and fix the batch norm nodes
            input_graph_def = sess.graph.as_graph_def()

            # Freeze the graph def
            output_graph_def = freeze_graph_def(sess, input_graph_def, "embeddings,label_batch")

        # Serialize and dump the output graph to the filesystem
        with tf.gfile.GFile(args.output_file, "wb") as f:
            f.write(output_graph_def.SerializeToString())
        print("%d ops in the final graph: %s" % (len(output_graph_def.node), args.output_file))
Пример #3
0
def load_facenet(model_dir):
    meta_file, ckpt_file = facenet.get_model_filenames(model_dir)
    print('Metagraph file: %s' % meta_file)
    print('Checkpoint file: %s' % ckpt_file)
    facenet.load_model(model_dir, meta_file, ckpt_file)
    images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
    embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
    phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
        "phase_train:0")
    image_size = int(images_placeholder.get_shape()[1])
    embedding_size = int(embeddings.get_shape()[1])
    return (images_placeholder, embeddings,
            phase_train_placeholder), (image_size, embedding_size)
Пример #4
0
def getFacenetFeatures(splits, facenetModelDir, lfwAlignedDir):
    print("  + Loading Facenet features.")

    video_features = {}

    # For every video in the pairs, create list for features
    for split in splits:
        for pair in split:
            if not pair[0] in video_features:
                video_features[pair[0]] = []
            if not pair[1] in video_features:
                video_features[pair[1]] = []
    
    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model
            meta_file, ckpt_file = facenet.get_model_filenames(facenetModelDir)
            facenet.load_model(facenetModelDir, meta_file, ckpt_file)

           # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            # For every video get the aligned first 100 frames and create features of them
            for video in video_features:
                repCache = {}
                videoDir = os.path.join(lfwAlignedDir, video)
                image_paths = os.listdir(videoDir)
                images = loadFacenetImages(image_paths, videoDir, image_size)

                # Feed single batch of 100 images to network for features
                feed_dict = { images_placeholder:images }
                emb_array = sess.run(embeddings, feed_dict=feed_dict)

                video_features[video] = emb_array
    return video_features
reload(sys)
sys.setdefaultencoding("utf-8")
# fileConfig('logger_config.ini')
# logger_error = logging.getLogger('errorhandler')

minsize = 60  # minimum size of face
threshold = [0.6, 0.7, 0.7]  # three steps's threshold
factor = 0.709  # scale factor
margin = 44
image_size = 160
detect_model_dir = '/home/liubo-it/FaceRecognization/facenet/data'
recognize_model_dir = '/home/liubo-it/FaceRecognization/facenet/models/casia_facenet/20170208-100636/valid'


sess = tf.Session()
meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(recognize_model_dir))
saver = tf.train.import_meta_graph(os.path.join(recognize_model_dir, meta_file))
saver.restore(sess, os.path.join(recognize_model_dir, ckpt_file))
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")


def extract_feature_from_file(pic_path):
    # 包含人脸检测和人脸识别
    # align_face(pic_path)
    images = facenet.load_data([pic_path], False, False, image_size)
    if images.shape[-1] != 3:
        return None
    feed_dict = {images_placeholder: images}
    face_feature = sess.run(embeddings, feed_dict=feed_dict)
    return face_feature
Пример #6
0
def facenetExp(lfwAligned, facenetModelDir, cls):
    df = pd.DataFrame(columns=('nPpl', 'nImgs',
                               'trainTimeSecMean', 'trainTimeSecStd',
                               'predictTimeSecMean', 'predictTimeSecStd',
                               'accsMean', 'accsStd'))


    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model
            meta_file, ckpt_file = facenet.get_model_filenames(facenetModelDir)
            facenet.load_model(facenetModelDir, meta_file, ckpt_file)

           # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            repCache = {}

            df_i = 0
            for nPpl in nPplVals:
                print(" + nPpl: {}".format(nPpl))
                (X, y) = getFacenetData(lfwAligned, nPpl, nImgs, image_size)
                nSampled = X.shape[0]
                ss = ShuffleSplit(nSampled, n_iter=10, test_size=0.1, random_state=0)

                allTrainTimeSec = []
                allPredictTimeSec = []
                accs = []

                for train, test in ss:
                    X_train = []
                    for img in X[train]:
                        h = hash(str(img.data))
                        if h in repCache:
                            rep = repCache[h]
                        else:
                            imgs = [img]
                            imgs = np.array(imgs)
                            feed_dict = { images_placeholder:imgs }
                            emb = sess.run(embeddings, feed_dict=feed_dict)
                            rep = emb[0]
                            repCache[h] = rep
                        X_train.append(rep)

                    start = time.time()
                    X_train = np.array(X_train)
                    cls.fit(X_train, y[train])
                    trainTimeSec = time.time() - start
                    allTrainTimeSec.append(trainTimeSec)

                    start = time.time()
                    X_test = []
                    for img in X[test]:
                        imgs = [img]
                        imgs = np.array(imgs)
                        feed_dict = { images_placeholder:imgs }
                        emb = sess.run(embeddings, feed_dict=feed_dict)
                        X_test.append(emb[0])
                    y_predict = cls.predict(X_test)
                    predictTimeSec = time.time() - start
                    allPredictTimeSec.append(predictTimeSec / len(test))
                    y_predict = np.array(y_predict)

                    acc = accuracy_score(y[test], y_predict)
                    accs.append(acc)

                df.loc[df_i] = [nPpl, nImgs,
                                np.mean(allTrainTimeSec), np.std(allTrainTimeSec),
                                np.mean(allPredictTimeSec), np.std(allPredictTimeSec),
                                np.mean(accs), np.std(accs)]
                df_i += 1

    return df
Пример #7
0
def facenetExp(lfwAligned, facenetModelDir, cls):
    df = pd.DataFrame(columns=('nPpl', 'nImgs', 'trainTimeSecMean',
                               'trainTimeSecStd', 'predictTimeSecMean',
                               'predictTimeSecStd', 'accsMean', 'accsStd'))

    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model
            meta_file, ckpt_file = facenet.get_model_filenames(facenetModelDir)
            facenet.load_model(facenetModelDir, meta_file, ckpt_file)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            repCache = {}

            df_i = 0
            for nPpl in nPplVals:
                print(" + nPpl: {}".format(nPpl))
                (X, y) = getFacenetData(lfwAligned, nPpl, nImgs, image_size)
                nSampled = X.shape[0]
                ss = ShuffleSplit(nSampled,
                                  n_iter=10,
                                  test_size=0.1,
                                  random_state=0)

                allTrainTimeSec = []
                allPredictTimeSec = []
                accs = []

                for train, test in ss:
                    X_train = []
                    for img in X[train]:
                        h = hash(str(img.data))
                        if h in repCache:
                            rep = repCache[h]
                        else:
                            imgs = [img]
                            imgs = np.array(imgs)
                            feed_dict = {images_placeholder: imgs}
                            emb = sess.run(embeddings, feed_dict=feed_dict)
                            rep = emb[0]
                            repCache[h] = rep
                        X_train.append(rep)

                    start = time.time()
                    X_train = np.array(X_train)
                    cls.fit(X_train, y[train])
                    trainTimeSec = time.time() - start
                    allTrainTimeSec.append(trainTimeSec)

                    start = time.time()
                    X_test = []
                    for img in X[test]:
                        imgs = [img]
                        imgs = np.array(imgs)
                        feed_dict = {images_placeholder: imgs}
                        emb = sess.run(embeddings, feed_dict=feed_dict)
                        X_test.append(emb[0])
                    y_predict = cls.predict(X_test)
                    predictTimeSec = time.time() - start
                    allPredictTimeSec.append(predictTimeSec / len(test))
                    y_predict = np.array(y_predict)

                    acc = accuracy_score(y[test], y_predict)
                    accs.append(acc)

                df.loc[df_i] = [
                    nPpl, nImgs,
                    np.mean(allTrainTimeSec),
                    np.std(allTrainTimeSec),
                    np.mean(allPredictTimeSec),
                    np.std(allPredictTimeSec),
                    np.mean(accs),
                    np.std(accs)
                ]
                df_i += 1

    return df
reload(sys)
sys.setdefaultencoding("utf-8")
# fileConfig('logger_config.ini')
# logger_error = logging.getLogger('errorhandler')

minsize = 60  # minimum size of face
threshold = [0.6, 0.7, 0.7]  # three steps's threshold
factor = 0.709  # scale factor
margin = 44
image_size = 160
detect_model_dir = '/home/liubo-it/FaceRecognization/facenet/data'
recognize_model_dir = '/home/liubo-it/FaceRecognization/facenet/models/casia_facenet/20170208-100636/valid'

sess = tf.Session()
meta_file, ckpt_file = facenet.get_model_filenames(
    os.path.expanduser(recognize_model_dir))
saver = tf.train.import_meta_graph(os.path.join(recognize_model_dir,
                                                meta_file))
saver.restore(sess, os.path.join(recognize_model_dir, ckpt_file))
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")


def extract_feature_from_file(pic_path):
    # 包含人脸检测和人脸识别
    # align_face(pic_path)
    images = facenet.load_data([pic_path], False, False, image_size)
    if images.shape[-1] != 3:
        return None
    feed_dict = {images_placeholder: images}
    face_feature = sess.run(embeddings, feed_dict=feed_dict)
Пример #9
0
def facenetExp(ytfAligned, lfwAligned, facenetModelDir, cls):
    df = pd.DataFrame(columns=('nPpl', 'nImgs', 'trainTimeSecMean',
                               'trainTimeSecStd', 'predictTimeSecMean',
                               'predictTimeSecStd', 'accsMean', 'accsStd'))

    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model
            meta_file, ckpt_file = facenet.get_model_filenames(facenetModelDir)
            facenet.load_model(facenetModelDir, meta_file, ckpt_file)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            repCache = {}

            df_i = 0
            for nPpl in nPplVals:
                print(" + nPpl: {}".format(nPpl))
                (X, y) = getFacenetData(ytfAligned, lfwAligned, nPpl, nImgs,
                                        image_size)
                #nSampled = X.shape[0]
                #print 'nSampled:', nSampled
                #ss = ShuffleSplit(nSampled, n_iter=10, test_size=0.1, random_state=0)

                allTrainTimeSec = []
                allPredictTimeSec = []
                accs = []

                X_train = []
                Y_train = []
                X_test = []
                Y_test = []

                for index, (train, test) in enumerate(X):
                    #print 'split:', train, test
                    imgs = np.array(train)  # use vid as batch and one forward
                    feed_dict = {images_placeholder: imgs}
                    emb = sess.run(embeddings, feed_dict=feed_dict)
                    X_train.extend(rep)
                    Y_train.extend([y[index] for img in train])

                    imgs = np.array(test)  # use vid as batch and one forward
                    feed_dict = {images_placeholder: imgs}
                    emb = sess.run(embeddings, feed_dict=feed_dict)
                    X_test.extend(rep)
                    Y_test.extend([y[index] for img in test])

                start = time.time()
                X_train = np.array(X_train)
                Y_train = np.array(Y_train)
                cls.fit(X_train, Y_train)
                trainTimeSec = time.time() - start
                allTrainTimeSec.append(trainTimeSec)

                start = time.time()

                #for vid in test:
                #    for img in vid:
                #        imgs = [img]
                #        imgs = np.array(imgs)
                #        feed_dict = { images_placeholder:imgs }
                #        emb = sess.run(embeddings, feed_dict=feed_dict)
                #        X_test.append(emb[0])
                #        Y_test.append(y[index])

                y_predict = cls.predict(X_test)
                predictTimeSec = time.time() - start
                allPredictTimeSec.append(predictTimeSec / len(test))
                y_predict = np.array(y_predict)
                Y_test = np.array(Y_test)
                acc = accuracy_score(Y_test, y_predict)
                accs.append(acc)

                print 'accs:', accs
                df.loc[df_i] = [
                    nPpl, nImgs,
                    np.mean(allTrainTimeSec),
                    np.std(allTrainTimeSec),
                    np.mean(allPredictTimeSec),
                    np.std(allPredictTimeSec),
                    np.mean(accs),
                    np.std(accs)
                ]
                df_i += 1

    return df
Пример #10
0
def facenetExp(lfwAligned, facenetModelDir, cls):
    df = pd.DataFrame(columns=('nPpl', 'nImgs',
                               'trainTimeSecMean', 'trainTimeSecStd',
                               'predictTimeSecMean', 'predictTimeSecStd',
                               'accsMean', 'accsStd'))


    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model
            meta_file, ckpt_file = facenet.get_model_filenames(facenetModelDir)
            facenet.load_model(facenetModelDir, meta_file, ckpt_file)

           # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            repCache = {}

            df_i = 0
            for nPpl in nPplVals:
                print(" + nPpl: {}".format(nPpl))
                (X, y) = getFacenetData(lfwAligned, nPpl, nImgs, image_size)
                nSampled = X.shape[0]
                print 'nSampled:', nSampled
                ss = ShuffleSplit(nSampled, n_iter=10, test_size=0.1, random_state=0)

                allTrainTimeSec = []
                allPredictTimeSec = []
                accs = []

                for train, test in ss:
                    #print 'split:', train, test
                    X_train = []
                    Y_train = []
                    for index, vid in zip(train, X[train]):
                        imgs = vid # use vid as batch and one forward
                        imgs = np.array(imgs)
                        feed_dict = { images_placeholder:imgs }
                        rep_array = sess.run(embeddings, feed_dict=feed_dict)
                        rep_array = np.array(rep_array)
                        print 'train', rep_array.shape, rep_array.mean(axis=0).shape
                        X_train.append(rep_array.mean(axis=0))
                        Y_train.append(y[index])

                    start = time.time()
                    X_train = np.array(X_train)
                    Y_train = np.array(Y_train)
                    cls.fit(X_train, Y_train)
                    trainTimeSec = time.time() - start
                    allTrainTimeSec.append(trainTimeSec)

                    start = time.time()
                    X_test = []
                    Y_test = []
                    for index, vid in zip(test, X[test]):
                        imgs = vid
                        imgs = np.array(imgs)
                        feed_dict = { images_placeholder:imgs }
                        rep_array = sess.run(embeddings, feed_dict=feed_dict)
                        rep_array = np.array(rep_array)
                        print 'test', rep_array.shape, rep_array.mean(axis=0).shape 
                        X_test.append(rep_array.mean(axis=0))
                        Y_test.append(y[index])

                    y_predict = cls.predict(X_test)
                    predictTimeSec = time.time() - start
                    allPredictTimeSec.append(predictTimeSec / len(test))
                    y_predict = np.array(y_predict)
                    Y_test = np.array(Y_test)
                    acc = accuracy_score(Y_test, y_predict)
                    accs.append(acc)
                
                print 'accs:', accs
                df.loc[df_i] = [nPpl, nImgs,
                                np.mean(allTrainTimeSec), np.std(allTrainTimeSec),
                                np.mean(allPredictTimeSec), np.std(allPredictTimeSec),
                                np.mean(accs), np.std(accs)]
                df_i += 1

    return df