Ejemplo n.º 1
0
    def __init__(self, args):
        self.args = args
        self.model = FaceNet(args).model
        self.train_datasets, self.nrof_train = create_datasets_from_tfrecord(
            tfrcd_dir=args.datasets, batch_size=args.batch_size, phase='train')

        self.val_datasets, self.nrof_val = create_datasets_from_tfrecord(
            tfrcd_dir=args.datasets, batch_size=args.batch_size, phase='val')
        self.lr_schedule = schedules.ExponentialDecay(args.learning_rate,
                                                      decay_steps=10000,
                                                      decay_rate=0.96,
                                                      staircase=True)

        self.optimizer = Adam(learning_rate=self.lr_schedule,
                              beta_1=0.9,
                              beta_2=0.999,
                              epsilon=0.1)
        self.checkpoint = tf.train.Checkpoint(
            epoch=tf.Variable(0, dtype=tf.int64),
            n_iter=tf.Variable(0, dtype=tf.int64),
            best_pred=tf.Variable(0.0, dtype=tf.float32),
            optimizer=self.optimizer,
            model=self.model)
        self.manager = tf.train.CheckpointManager(self.checkpoint,
                                                  args.checkpoint_dir,
                                                  max_to_keep=3)
        check_folder(args.log_dir)
        self.train_summary_writer = tf.summary.create_file_writer(args.log_dir)
Ejemplo n.º 2
0
class Classifier():
    def __init__(self):
        self.facenet = FaceNet("./facenet_keras.h5")
        self.model = None
        self.classes = None

    def train(self, path):
        self.classes = {idx:path for idx,path in enumerate(os.listdir(path))}
        pickle.dump(self.classes, open("class.pkl", "wb"))
        x_train = []
        y_train = []
        for key,_class in self.classes.items():
            for file in os.listdir(os.path.join(path, _class)):
                image = cv2.imread(os.path.join(path, _class, file))
                _, face = self.facenet.get_all_faces(image, 0.6)
                x_train.append(self.facenet.get_embeddings(face[0]))
                y_train.append(key)
        if len(self.classes) is 1:
            for _ in range(3):
                x_train.append(np.random.rand(128))
                y_train.append(len(self.classes))
        self.model = SVC(kernel="linear", probability=True)
        self.model.fit(x_train, y_train)
        pickle.dump(self.model, open("model.pkl", "wb"))
    
    def predict(self, image, threshold):
#   Returns image with names annoted it
#   image -> np.array
#   threshold -> int 
        if self.model is None:
            self.model = pickle.load(open("model.pkl", "rb"))
        if self.classes is None:
            self.classes = pickle.load(open("class.pkl", "rb"))
        boxes, faces = self.facenet.get_all_faces(image, 0.8)
        x_test = []
        if len(faces) is 0:
            return image
        for face in faces:
            x_test.append(self.facenet.get_embeddings(face))
        if len(x_test) is 1:
            faces = np.ravel(faces)
        probs = self.model.predict_proba(x_test)
        max_prob = np.argmax(probs, axis=1)
        for i, p in enumerate(max_prob):
            name = ""
            if probs[i][p] > threshold:
                name = self.classes[p]
            else:
                name = "Not recognized"
            font = cv2.FONT_HERSHEY_SIMPLEX
            pos_x = boxes[i][0]
            pos_y = boxes[i][1]+ boxes[i][3]
            cv2.putText(image, name, (pos_x, pos_y), font, 3.5, (255, 0, 0), 2, cv2.LINE_AA)
            cv2.imshow("image", image)
            if cv2.waitKey(0):
                cv2.destroyAllWindows()
        return image
Ejemplo n.º 3
0
 def __init__(self, ref_im, loss_str, eps):
     super(LossBuilder, self).__init__()
     assert ref_im.shape[2] == ref_im.shape[3]
     im_size = ref_im.shape[2]
     factor = 1024 // im_size
     assert im_size * factor == 1024
     self.D = BicubicDownSample(factor=factor)
     self.ref_im = ref_im
     self.parsed_loss = [
         loss_term.split('*') for loss_term in loss_str.split('+')
     ]
     self.eps = eps
     self.facenet = FaceNet()
Ejemplo n.º 4
0
import sys

sys.path.insert(1, "../")

from facenet import FaceNet
from util.common import ON_CUDA, ON_JETSON


if __name__ == "__main__":
    detector = "mtcnn"  # "trt-mtcnn" if ON_CUDA else "mtcnn"
    graphics = True  # not ON_JETSON
    mtcnn_stride = 7 if ON_JETSON else 3
    resize = 1 if ON_JETSON else 0.6

    facenet = FaceNet()
    facenet.real_time_recognize(
        detector=detector, graphics=graphics, mtcnn_stride=mtcnn_stride, resize=resize
    )
Ejemplo n.º 5
0
from datasets import CelebA
from facenet import FaceNet
from euclidean import euclidean_distance

#np.random.seed(196)
dataset_root = '/datasets/CelebA'

if __name__ == '__main__':
    parse = sys.argv[1]  # 'train'
    print(parse)
    face_size = (96, 96)

    if parse == 'train':
        batch_size = 8
        celebA = CelebA(dataset_root, face_size=face_size)
        facenet = FaceNet(batch_size=batch_size, face_size=face_size, alpha=0.5)
        # Restore from:
        #   1. 0.2943756_7_8.npz test loss: 0
        #   2. 0.2817538_2_0.8.npz test loss: 0
        #   3. 0.28175184_22_0.08.npz test loss: 0
        #   4. 0.2740853_5_0.008.npz  test loss: 0

        #   1. 0.52165955_1_0.8.npz
        facenet.sess.restore(osp.join(facenet.model_root, '0.33327728476725504_94_1.npz'))
        # Train stage:
        #   1. epoch: 100, lr: 1
        #   2. epoch: 100, lr: 0.1
        #   3. epoch: 100, lr: 0.1
        #   4. epoch: 100, lr: 0.1
        #   5. epoch: 100, lr: 0.01
        #   6. epoch: 100, lr: 0.01
Ejemplo n.º 6
0
 def __init__(self):
     self.facenet = FaceNet("./facenet_keras.h5")
     self.model = None
     self.classes = None
Ejemplo n.º 7
0
import sys

sys.path.insert(1, "../")

from facenet import FaceNet
from util.wsocket import WebSocket, SocketError
from util.common import ON_CUDA, ON_JETSON


if __name__ == "__main__":
    detector = "trt-mtcnn" if ON_CUDA else "mtcnn"
    graphics = not ON_JETSON
    mtcnn_stride = 7 if ON_JETSON else 3
    resize = 1 if ON_JETSON else 0.6

    facenet = FaceNet()

    def run():
        ws = WebSocket("10.56.9.186:8000", 1, facenet)
        ws.connect(
            detector=detector,
            graphics=graphics,
            mtcnn_stride=mtcnn_stride,
            resize=resize,
        )

    i = 0
    while True:
        i += 1
        print(f"------ RESETTING ({i}) ------\n\n\n")
        try:
Ejemplo n.º 8
0
def train(opt):
    facenet = FaceNet(opt)
    model = facenet.model
    lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
        opt.learning_rate, decay_steps=10000, decay_rate=0.96, staircase=True)
    optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule,
                                         beta_1=0.9,
                                         beta_2=0.999,
                                         epsilon=0.1)

    checkpoint = tf.train.Checkpoint(step=tf.Variable(0),
                                     epoch=tf.Variable(0),
                                     optimizer=optimizer,
                                     model=model)
    manager = tf.train.CheckpointManager(checkpoint,
                                         opt.checkpoint_dir,
                                         max_to_keep=3)
    check_folder(opt.train_log_dir)
    train_summary_writer = tf.summary.create_file_writer(opt.train_log_dir)

    start_epoch = 0
    if opt.restore:
        start_epoch = checkpoint.restore(manager.latest_checkpoint)
    loss_fun = Center_Loss(alpha=opt.center_loss_alfa,
                           nrof_classes=opt.nrof_classes,
                           embedding_size=opt.embedding_size)
    train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
    val_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
    train_datasets, val_datasets, nrof_train, nrof_val = create_datasets_from_tfrecord(
        opt.datasets, opt.batch_size, opt.split_ratio)

    for epoch in range(start_epoch, opt.max_nrof_epochs):
        widgets = [
            'train :',
            Percentage(), ' ',
            Bar('#'), ' ',
            Timer(), ' ',
            ETA(), ' '
        ]
        pbar = ProgressBar(widgets=widgets,
                           max_value=int(nrof_train // opt.batch_size) +
                           1).start()
        for batch_id, batch_examples in pbar(enumerate(train_datasets)):
            center_loss, total_loss = train_one_step(model, train_acc_metric,
                                                     loss_fun, optimizer,
                                                     batch_examples,
                                                     opt.center_loss_weight)
            checkpoint.step.assign_add(1)
            step = int(checkpoint.step)
            if step % 400 == 0:
                with train_summary_writer.as_default():
                    tf.summary.scalar('center_loss', center_loss, step=step)
                    tf.summary.scalar('total_loss', total_loss, step=step)
        pbar.close()
        train_acc = train_acc_metric.result()
        print('Training acc over epoch {}: %s' % (epoch, float(train_acc)))

        widgets = [
            'validate :',
            Percentage(), ' ',
            Bar('#'), ' ',
            Timer(), ' ',
            ETA(), ' '
        ]
        pbar = ProgressBar(widgets=widgets,
                           max_value=int(nrof_val // opt.batch_size) +
                           1).start()
        for batch_id, (batch_images_validate,
                       batch_labels_validate) in pbar(enumerate(val_datasets)):
            prediction = model(batch_images_validate)
            val_acc_metric(batch_labels_validate, prediction)
        pbar.close()
        val_acc = train_acc_metric.result()
        print('validate acc over epoch {}: %s' % (epoch, float(val_acc)))

        with train_summary_writer.as_default():
            tf.summary.scalar('train_acc',
                              train_acc_metric.result(),
                              step=step)
            tf.summary.scalar('val_acc', train_acc_metric.result(), step=step)
        train_acc_metric.reset_states()
        val_acc_metric.reset_states()
        save_path = manager.save()
        print('save checkpoint to {}'.format(save_path))
        checkpoint.epoch.assign_add(1)
Ejemplo n.º 9
0
# --------------------------------------------------------
# SMNet FaceNet
# Licensed under The MIT License [see LICENSE for details]
# Copyright 2019 smarsu. All Rights Reserved.
# --------------------------------------------------------

import os.path as osp
import cv2
from facenet import FaceNet

face_size = (96, 96)
facenet = FaceNet(face_size=face_size)
facenet.sess.restore(osp.join(facenet.model_root, 'nan_99_1.npz'))


def extract_feature(img):
    """Extract features of image.

    Args:
        img: ndarray, [h, w, c]
    """
    return facenet.test(img)


def main():
    lfw_detected_root = '/datasets/lfw_detected'
    with open('lfw_person.txt') as fb:
        lines = fb.readlines()
        lfw_persons = [osp.join(lfw_detected_root, line.strip()) for line in lines]

    with open('features.txt', 'w') as fb:
Ejemplo n.º 10
0
from facenet import FaceNet
from util.loader import dump_and_embed
from util.detection import FaceDetector
from util.encryptions import NAMES
from util.distance import DistMetric
from util.common import ON_CUDA

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--img_dir", help="image dir to embed")
    parser.add_argument("--dump_path", help="dump path for embeddings")
    parser.add_argument("--mean", help="use mean or not", action="store_true")
    args = parser.parse_args()

    facenet = FaceNet(data_path=None)
    facenet.dist_metric = DistMetric("cosine", normalize=True)
    facenet.img_norm = "fixed"
    facenet.alpha = 0.33

    detector = FaceDetector("trt-mtcnn" if ON_GPU else "mtcnn",
                            facenet.img_shape)
    no_faces = dump_and_embed(
        facenet,
        args.img_dir,
        args.dump_path,
        to_encrypt=NAMES,
        detector=detector,
        full_overwrite=True,
        use_mean=args.mean,
        verbose=False,
Ejemplo n.º 11
0
class Trainer(object):
    def __init__(self, args):
        self.args = args
        self.model = FaceNet(args).model
        self.train_datasets, self.nrof_train = create_datasets_from_tfrecord(
            tfrcd_dir=args.datasets, batch_size=args.batch_size, phase='train')

        self.val_datasets, self.nrof_val = create_datasets_from_tfrecord(
            tfrcd_dir=args.datasets, batch_size=args.batch_size, phase='val')
        self.lr_schedule = schedules.ExponentialDecay(args.learning_rate,
                                                      decay_steps=10000,
                                                      decay_rate=0.96,
                                                      staircase=True)

        self.optimizer = Adam(learning_rate=self.lr_schedule,
                              beta_1=0.9,
                              beta_2=0.999,
                              epsilon=0.1)
        self.checkpoint = tf.train.Checkpoint(
            epoch=tf.Variable(0, dtype=tf.int64),
            n_iter=tf.Variable(0, dtype=tf.int64),
            best_pred=tf.Variable(0.0, dtype=tf.float32),
            optimizer=self.optimizer,
            model=self.model)
        self.manager = tf.train.CheckpointManager(self.checkpoint,
                                                  args.checkpoint_dir,
                                                  max_to_keep=3)
        check_folder(args.log_dir)
        self.train_summary_writer = tf.summary.create_file_writer(args.log_dir)

    # @tf.function()
    def train_one_step(self, train_acc_metric, loss_layer, batch_examples,
                       trainable_variables):
        with tf.GradientTape() as tape:
            batch_images, batch_labels = batch_examples
            features = self.model(batch_images)
            embedding = tf.math.l2_normalize(features, axis=1, epsilon=1e-10)
            logits = loss_layer(embedding, batch_labels)
            loss = SparseCategoricalCrossentropy(from_logits=True)(
                batch_labels, logits)
            train_acc_metric(batch_labels, logits)
        gradients = tape.gradient(loss, trainable_variables)
        self.optimizer.apply_gradients(zip(gradients, trainable_variables))
        return loss

    def training(self, epoch):
        opt = self.args
        loss_layer = ArcFaceSoftmaxLinear(opt.nrof_classes, opt.embedding_size,
                                          opt.margin, opt.feature_scale)
        trainable_variables = []
        trainable_variables.extend(loss_layer.trainable_variables)
        trainable_variables.extend(self.model.trainable_variables)
        train_acc_metric = SparseCategoricalAccuracy()
        widgets = [
            'train :',
            Percentage(), ' ',
            Bar('#'), ' ',
            Timer(), ' ',
            ETA(), ' '
        ]
        pbar = ProgressBar(widgets=widgets,
                           max_value=int(self.nrof_train // opt.batch_size) +
                           1).start()
        for batch_id, batch_examples in pbar(enumerate(self.train_datasets)):
            loss = self.train_one_step(train_acc_metric, loss_layer,
                                       batch_examples, trainable_variables)
            with self.train_summary_writer.as_default():
                tf.summary.scalar('total_loss', loss, self.checkpoint.n_iter)
            self.checkpoint.n_iter.assign_add(1)
        pbar.finish()
        train_acc = train_acc_metric.result()
        print('\nTraining acc over epoch {}: {:.4f}'.format(epoch, train_acc))
        with self.train_summary_writer.as_default():
            tf.summary.scalar('train/acc', train_acc_metric.result(),
                              self.checkpoint.epoch)
        train_acc_metric.reset_states()
        save_path = self.manager.save()
        print('save checkpoint to {}'.format(save_path))

    def validate(self, epoch):
        widgets = [
            'validate :',
            Percentage(), ' ',
            Bar('#'), ' ',
            Timer(), ' ',
            ETA(), ' '
        ]
        pbar = ProgressBar(
            widgets=widgets,
            max_value=int(self.nrof_val // self.args.batch_size) + 1).start()
        val_acc_metric = SparseCategoricalAccuracy()
        for batch_id, (batch_images_validate, batch_labels_validate) in pbar(
                enumerate(self.val_datasets)):
            prediction = self.model(batch_images_validate)
            val_acc_metric(batch_labels_validate, prediction)
        pbar.finish()
        val_acc = val_acc_metric.result()
        print('\nvalidate acc over epoch {}: {:.4f}'.format(epoch, val_acc))
        with self.train_summary_writer.as_default():
            tf.summary.scalar('val/acc', val_acc_metric.result(),
                              self.checkpoint.epoch)
        self.checkpoint.epoch.assign_add(1)

        val_acc_metric.reset_states()

        if (val_acc > self.checkpoint.best_pred):
            self.checkpoint.best_pred = val_acc
            with open(os.path.join(self.checkpoint_dir, 'best_pred.txt'),
                      'w') as f:
                f.write(str(self.best_pred))
            self.model.save(os.path.join(self.checkpoint_dir, 'best_model.h5'))
Ejemplo n.º 12
0
"""Main production script for kiosk/aisecurity."""

import sys

sys.path.insert(1, "../")

from facenet import FaceNet
from util.network import WebSocket
from util.common import ON_CUDA, ON_JETSON

if __name__ == "__main__":
    WebSocket.run(
        FaceNet(),
        detector="trt-mtcnn" if ON_CUDA else "mtcnn",
        graphics=not ON_JETSON,
        mtcnn_stride=7 if ON_JETSON else 3,
        resize=1 if ON_JETSON else 0.6,
    )