Beispiel #1
0
    def __init__(self, sess, args):
        self.session = sess
        self.image_size = 160
        facenet2.load_model(args.model)

        # Get input and output tensors
        self.images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        self.embeddings = tf.get_default_graph().get_tensor_by_name(
            "embeddings:0")
        self.phase_train_placeholder = tf.get_default_graph(
        ).get_tensor_by_name("phase_train:0")
        self.embedding_size = self.embeddings.get_shape()[1]
Beispiel #2
0
    def __init__(self, proxy_map):
        super(SpecificWorker, self).__init__(proxy_map)

        ### Checking if embedding file exists or not
        files = os.listdir('.')
        self.file_name = 'data_embeddings.npy'
        if (self.file_name in files):
            print('Embeddings file found')
            self.neural_embeddings = np.load(self.file_name, allow_pickle=True)
        else:
            ### Creating an embedding file if no embeddings exist already
            print(
                'No File Found. Creating an empty numpy file for neural embeddings'
            )
            self.neural_embeddings = np.zeros((1, 2), dtype=np.ndarray)
            self.neural_embeddings[0, 0] = np.zeros((1, 512), dtype=np.float32)
            self.neural_embeddings[0, 1] = '####'
            np.save(self.file_name, self.neural_embeddings)

        self.timer.timeout.connect(self.compute)
        self.Period = 50
        self.timer.start(self.Period)

        ##### Defining the face recognition model and parameters
        self.model_path = './assets/20180408-102900/'
        ##### Thresholds for matching the neural embeddings for label prediction
        self.threshold_1 = 1.10
        self.threshold_2 = 0.55
        ##### Relaxed threshold to check if incorrect label is given
        self.threshold_3 = 1.25
        ##### Threshold to remove redundant data
        self.threshold_4 = 3
        self.image_size = 160

        #### Loading the Face Recognition Model
        with tf.Graph().as_default():
            self.config = tf.ConfigProto()
            self.config.gpu_options.per_process_gpu_memory_fraction = 0.3
            self.sess = tf.InteractiveSession(config=self.config)
            facenet.load_model(self.model_path)
            self.images_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("input:0")
            self.images_placeholder = tf.image.resize_images(
                self.images_placeholder, (self.image_size, self.image_size))
            self.embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            self.phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
Beispiel #3
0
# print saver.saver_def.filename_tensor_name
    # ckpt = tf.train.get_checkpoint_state('./data/model-20170511-185253.ckpt-80000.data-00000-of-00001')
    # saver.restore(sess, ckpt.model_checkpoint_path)
# with sess.as_default():
# load_model('./models/20170511-185253')
# model_checkpoint_path = './models/model-20160506.ckpt-500000'
# saver.restore(sess, model_checkpoint_path)

# saver = tf.train.import_meta_graph('./models/20170511-185253/model-20170511-185253.meta')
# saver.restore(sess, './models/20170511-185253/model-20170511-185253.ckpt-80000')
# print('Facenet embedding restore success')

# load_model('./models/20170511-185253/')
with tf.Graph().as_default():
    with tf.Session() as sess:
        facenet.load_model('./models/20170512-110547/')
        images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
        # images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, 3), name='input')
        # phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        # embeddings = network.inference(images_placeholder, pool_type, use_lrn, 1.0, phase_train=phase_train_placeholder)

        train_x = []
        train_y = []
        sess.run(tf.global_variables_initializer())
        for index in range(len(keys)):
            for x in data[keys[index]]:
                x_color = to_rgb(x)
                bounding_boxes, _ = detect_face.detect_face(x_color, minsize, pnet, rnet, onet, threshold, factor)
                # nrof_faces = bounding_boxes.shape[0]  # number of faces
Beispiel #4
0
# restore mtcnn model
print 'Creating networks and loading parameters'
gpu_memory_fraction = 0.8
with tf.Graph().as_default():
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=gpu_memory_fraction)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                            log_device_placement=False))
    with sess.as_default():
        pnet, rnet, onet = detect_face.create_mtcnn(sess, './data/')

# restore facenet model
print('Restore facenet embedding model')
with tf.Graph().as_default():
    with tf.Session() as sess:
        facenet.load_model('/home/ubuntu/Code/face/models/20170511-185253/')
        images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
            "phase_train:0")
        print('Facenet embedding success')

        model = joblib.load(
            '/home/ubuntu/Code/face/knn_20170512-110547_2.model')

        video_capture = cv2.VideoCapture(2)
        # c = 0
        find_result = 'other'
        keys = update_dir('./train_data/')
        sess.run(tf.global_variables_initializer())
def main(args):

    print('Creating networks and loading parameters')

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face2.create_mtcnn(sess, None)

            minsize = 20  # minimum size of face
            threshold = [0.6, 0.7, 0.7]  # three steps's threshold
            factor = 0.709  # scale factor

            try:
                img = cv2.imread(args.input_image)
            except (IOError, ValueError, IndexError) as e:
                errorMessage = '{}: {}'.format(args.input_image, e)
                print(errorMessage)
                return

            if img.ndim < 2:
                print('Unable to align "%s"' % image_path)
                text_file.write('%s\n' % (output_filename))
                return
            if img.ndim == 2:
                img = facenet2.to_rgb(img)
            img = img[:, :, 0:3]

            bounding_boxes, _ = align.detect_face2.detect_face(
                img, minsize, pnet, rnet, onet, threshold, factor)
            nrof_faces = bounding_boxes.shape[0]
            detected_faces = []
            detected_bb = []
            if nrof_faces > 0:
                det = bounding_boxes[:, 0:4]
                det_arr = []
                img_size = np.asarray(img.shape)[0:2]
                if nrof_faces > 1:
                    for i in range(nrof_faces):
                        det_arr.append(np.squeeze(det[i]))

                for i, det in enumerate(det_arr):
                    det = np.squeeze(det)
                    bb = np.zeros(4, dtype=np.int32)
                    bb[0] = np.maximum(det[0] - args.margin / 2, 0)
                    bb[1] = np.maximum(det[1] - args.margin / 2, 0)
                    bb[2] = np.minimum(det[2] + args.margin / 2, img_size[1])
                    bb[3] = np.minimum(det[3] + args.margin / 2, img_size[0])
                    cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                    #scaled = scipy.misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
                    scaled = cv2.resize(cropped,
                                        (args.image_size, args.image_size))
                    detected_faces.append(scaled)
                    detected_bb.append(bb)
                print(nrof_faces, 'faces are detected')
                print(detected_bb)
            source_image = img

            print('Loading feature extraction model')
            facenet2.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Calculating features for images')

            # preprocessing
            images = []
            for img in detected_faces:
                if img.ndim == 2:
                    img = facenet2.to_rgb(img)
                img = facenet2.prewhiten(img)
                images.append(img)
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            emb_array = sess.run(embeddings, feed_dict=feed_dict)

            print('Testing classifier')
            classifier_filename_exp = os.path.expanduser(
                args.classifier_filename)
            with open(classifier_filename_exp, 'rb') as infile:
                (model, class_names) = pickle.load(infile)

            print('Loaded classifier model from file "%s"' %
                  classifier_filename_exp)

            predictions = model.predict_proba(emb_array)
            best_class_indices = np.argmax(predictions, axis=1)
            best_class_probabilities = predictions[
                np.arange(len(best_class_indices)), best_class_indices]

            for i in range(len(best_class_indices)):
                print('%4d  %s: %.3f' % (i, class_names[best_class_indices[i]],
                                         best_class_probabilities[i]))

            source_image = np.array(source_image)
            source_image = cv2.cvtColor(source_image, cv2.COLOR_BGR2RGB)

            for i in range(len(detected_bb)):
                bb = detected_bb[i]
                cv2.rectangle(source_image, (bb[0], bb[1]), (bb[2], bb[3]),
                              (0, 255, 0), 3)
                cv2.putText(source_image, class_names[best_class_indices[i]],
                            (bb[0], bb[1] - 20), cv2.FONT_HERSHEY_SIMPLEX, 1,
                            (0, 255, 0), 2)
                cv2.putText(source_image, '%.3f' % best_class_probabilities[i],
                            (bb[0], bb[3] + 40), cv2.FONT_HERSHEY_SIMPLEX, 1,
                            (0, 255, 0), 2)
            cv2.imwrite('result.png', source_image)