示例#1
0
    def __init__(self, modelfile):
        modelfile_exp = os.path.expanduser(modelfile)
        if not os.path.exists(modelfile_exp):
            sys.stderr.write("error: no such file: %s \n" % modelfile_exp)
            exit(1)

        sys.stderr.write("note: NAN module initializing...\n")
        self.graph = tf.Graph()
        with self.graph.as_default():
            with tf.Session() as sess:
                start = time()
                facenet.load_model(modelfile_exp)
                self.input_place_holder = tf.get_default_graph(
                ).get_tensor_by_name("x_input:0")
                self.score = tf.get_default_graph().get_tensor_by_name(
                    "score:0")
                self.NAN_feature = tf.get_default_graph().get_tensor_by_name(
                    "NAN_feature:0")
                self.phase_train_placeholder = tf.get_default_graph(
                ).get_tensor_by_name("output:0")
                shape = self.phase_train_placeholder.shape.as_list()
                global nrof_output
                nrof_output = shape[1]
                end = time()
                sys.stderr.write(
                    "note: NAN model loaded success \ncost time: %ds\n" %
                    (end - start))

        sys.stderr.write("note: NAN module initialized success\n")
        self.train_feed = np.zeros((1, nrof_output))
示例#2
0
 def __init__(self, model_path=None):
     if model_path == None:
         return
     self.sess = tf.Session()
     self.detect = face.Detection()  # make a detector (using triple CNN)
     with self.sess.as_default():
         facenet.load_model(model_path)
    def predict(self, connection, frames_face_boxes, frames_reader):
        self._graph = tf.Graph()
        self._sess = tf.Session()

        print("Recognizing the face")

        # Load the model
        with self._sess.as_default():
            facenet.load_model(self._facenet_model)

        infile = open(self._facenet_classifier, 'rb')
        (model_emb, class_names, labels) = pickle.load(infile)
        print('Loaded classifier model from file "%s"' %
              self._facenet_classifier)

        n_ngbr = 10
        nbrs = NearestNeighbors(n_neighbors=n_ngbr,
                                algorithm='ball_tree').fit(model_emb)

        bar = Bar('Processing', max=len(frames_face_boxes))

        faces_names = []
        i = 0

        frames_generator = frames_reader.get_frames(1)

        for frames_data, frames_pts in frames_generator:
            boxes = frames_face_boxes[i]
            i += 1

            frame_names = []

            if len(boxes):
                faces = utils.extract_boxes(frames_data, boxes)

                emb_array = self.calculate_embeddings(faces)

                if len(faces):
                    distances, indices = nbrs.kneighbors(emb_array)

                    for f in range(len(faces)):
                        inds = indices[f]
                        classes = np.array([labels[i] for i in inds])
                        label = Counter(classes).most_common(1)[0][0]

                        person_name = class_names[label]
                        confidence = np.sum(classes == label) / n_ngbr

                        if confidence <= 0.3:
                            person_name = "Unknown"

                        frame_names.append((person_name, confidence))

            bar.next()

            faces_names.append(frame_names)

        connection.send(faces_names)

        bar.finish()
示例#4
0
def creat_npy(crop_dir, npy_dir, model):
    ''' create npy file for tfrecord '''
    from facenet.src.facenet import load_model, prewhiten
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.8
    with tf.Graph().as_default():
        with tf.Session(config = config) as sess:
            print("Now loading the model...")
            load_model(model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            subfolders = [f.path for f in os.scandir(crop_dir) if f.is_dir() ]
            for sub_dir in subfolders:
                count = 0
                sub_dir_basename = os.path.basename(sub_dir)
                print("CreatNpy step, Now is processing: {} ...".format(sub_dir_basename))
                onlyfiles = [f for f in os.listdir(sub_dir) if os.path.isfile(os.path.join(sub_dir, f))]
                for f in onlyfiles:
                    image_name = os.path.join(sub_dir, f)
                    print(image_name)
                    try:
                        img = cv2.imdecode(np.fromfile(os.path.expanduser(image_name),dtype=np.uint8), cv2.IMREAD_COLOR)
                        prewhitened = prewhiten(img)
                        feed_dict = { images_placeholder: [prewhitened], phase_train_placeholder:False }
                        emb = sess.run(embeddings, feed_dict=feed_dict)
                        store_img_as_npy(os.path.join(npy_dir, sub_dir_basename), sub_dir_basename + '_' + str(count), emb)
                        count = count + 1
                    except TypeError as e:
                        print("having the {}, so passing it".format(e))
示例#5
0
 def __init__(self, model_path,):
     facenet.load_model(str(model_path))
     self.input_image_size = 160
     self.sess = tf.Session()
     self.images_placeholder = tf.get_default_graph().get_tensor_by_name('input:0')
     self.embeddings = tf.get_default_graph().get_tensor_by_name('embeddings:0')
     self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name('phase_train:0')
     self.embedding_size = self.embeddings.get_shape()[1]
示例#6
0
 def __init__(self, path=None, path_training=None, training=False):
     import tensorflow as tf               # lazy loading
     import facenet.src.facenet as facenet # lazy loading
     self.path_training = path_training
     self._sess = tf.Session()
     with self._sess.as_default():
         facenet.load_model(path + INPUT_FACENET_MODEL)
     if training == False:
         self.clf = pickle.loads(open(self.path_training + OUTPUT_FACENET_CLASSIFIER, "rb").read())
         self.label_encoder = pickle.loads(open(self.path_training + OUTPUT_FACENET_LABELER, "rb").read())
def main_func(face_feature_req):
    FACE_FEATURE_REQUIRED = face_feature_req #should be set by the user -- True/False. True/1 means Face Localization + Feature Extraction and False/0 means only Face Localization is performed
    batch_size = config["batch_size"] #batch_size = 32, user param in config
    margin = config["margin"]         #add to config -- developer
    image_size = config["image_size"] #add to config -- developer --image size used to resize faces which will be passed to Facenet for face feature extraction
    BBox_Thresh = config["BBox_Thresh"] #add to config -- developer
    image_paths = config["image_paths"] #Input path
    dest_path = config["dest_path"] #Output Folder
    dest_path = create_Dir(dest_path) #create output DIR
    logger.debug("Output directory created.")
    img_dest_path = create_Dir(dest_path,'Localized_Images') #create DIR to store localized images within output/Localized_Images
    discard_folder_path = create_Dir(dest_path,'Discarded_Images') #create DIR to store discarded images

    if FACE_FEATURE_REQUIRED:
        model_path = config["model_path"] #add to config --model_path: "Required for face extraction alone"
        csv_name = config["csv_name"] #Output CSV file name
        csv_dest_path = create_Dir(dest_path,'csv_output') #Create csv folder within output folder
        csv_dest_file_path = os.path.join(csv_dest_path,csv_name)

    # To perform face localize
    logger.info("Face localization is in process...")
    pnet, rnet, onet  = create_network_face_detection(gpu_memory_fraction=1.0)
    train_images, image_paths = load_image_align_data(img_dest_path, image_paths, image_size, margin, pnet, rnet, onet, discarded_folder_path = discard_folder_path, bbox_thresh = BBox_Thresh)
    logger.info("Face Localization executed successfully.")

    # To perform Facial feature extraction
    if FACE_FEATURE_REQUIRED:
        logger.info("Face Feature Extraction is in process...")
        temp_tr_images, temp_image_paths = [], []  # temp vars required for batch process
        list_image_paths, list_train_embs = [], []  # to collate into a single list post batch process
        with tf.Graph().as_default():
            with tf.Session() as sess:
                facenet.load_model(model_path)
                images_placeholder = sess.graph.get_tensor_by_name("input:0")
                embeddings = sess.graph.get_tensor_by_name("embeddings:0")
                phase_train_placeholder = sess.graph.get_tensor_by_name("phase_train:0")
                bt_sz = batch_size
                logger.debug("Face Feature Extraction model's batch size is set to " + str(batch_size))
                for i in range(0, len(train_images), bt_sz):
                    temp_tr_images = train_images[i : i+bt_sz]
                    temp_image_paths = image_paths[i : i+bt_sz]
                    feed_dict = {images_placeholder: temp_tr_images, phase_train_placeholder: False}
                    logging.debug('len(temp_tr_images): ' + str(len(temp_tr_images)))
                    train_embs = sess.run(embeddings, feed_dict=feed_dict)
                    list_train_embs.extend(train_embs)
                    list_image_paths.extend(temp_image_paths)
                embs_dict = dict(zip(list_image_paths, list_train_embs))
                df_train = pd.DataFrame.from_dict(embs_dict, orient='index')
                logger.debug('Face Embedded: No. of images: ' + str(len(image_paths)) + ' within ' + str(len(train_images)) + ' Localized Images')
                df_train.to_csv(csv_dest_file_path)  # output CSV files -- {img_names,features}
                logger.info("Face Feature Extraction executed successfully.")

    logger.info("Path of output folder is: " + dest_path)
    return dest_path
def get_embeddings(aligned_imgs_path, trained_model_path, tmp_dir, is_debug):
    assert (os.path.isdir(tmp_dir))
    embeddings_filename = EMBEDDINGS_FILE
    if is_debug:
        embeddings_filename += DEBUG_EXT
    embeddings_path = os.path.join(tmp_dir, embeddings_filename)
    if os.path.isfile(embeddings_path):
        with open(embeddings_path, 'rb') as f:
            return pickle.load(f)

    aligned_imgs_paths = get_img_paths(aligned_imgs_path)
    if is_debug:
        aligned_imgs_paths = aligned_imgs_paths[:DEBUG_COUNT]
    img_idxs = [get_filename_wo_ext(x) for x in aligned_imgs_paths]

    with tf.Graph().as_default():
        with tf.Session() as sess:
            facenet.load_model(trained_model_path)

            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            embedding_size = embeddings.get_shape()[1]
            nrof_images = len(aligned_imgs_paths)
            nrof_batches = int(math.ceil(1.0 * nrof_images / BATCH_SIZE))
            emb_array = np.zeros((nrof_images, embedding_size))

            for i in range(nrof_batches):
                start_index = i * BATCH_SIZE
                end_index = min((i + 1) * BATCH_SIZE, nrof_images)
                paths_batch = aligned_imgs_paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False, IMG_SIZE)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            with open(embeddings_path, 'wb') as f:
                pickle.dump((emb_array, img_idxs), f)
            logging.info('saved embeddings: {}'.format(embeddings_path))

            for i in range(len(emb_array)):
                logging.debug('embedding: {}: {}'.format(
                    i, aligned_imgs_paths[i]))

            return emb_array, img_idxs
示例#9
0
def load_facenet(model_dir):
    meta_file, ckpt_file = facenet.get_model_filenames(model_dir)
    print('Metagraph file: %s' % meta_file)
    print('Checkpoint file: %s' % ckpt_file)
    facenet.load_model(model_dir, meta_file, ckpt_file)
    images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
    embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
    phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
        "phase_train:0")
    image_size = int(images_placeholder.get_shape()[1])
    embedding_size = int(embeddings.get_shape()[1])
    return (images_placeholder, embeddings,
            phase_train_placeholder), (image_size, embedding_size)
    def __init__(self,
                 facenet_model=parentDir + "/model/facenet_20180408.pb",
                 facenet_classifier=parentDir +
                 "/model/facenet_classifier.pkl"):
        super().__init__
        self._facenet_model = facenet_model
        self._facenet_classifier = facenet_classifier

        self._graph = tf.Graph()
        self._sess = tf.Session()

        # Load the model
        with self._sess.as_default():
            facenet.load_model(self._facenet_model)
示例#11
0
def load_facenet(my_graph, my_sess):
    """Loads the FaceNet model into the Graph `my_graph` and the Session
    `my_sess`.
    """
    with my_graph.as_default():
        with my_sess.as_default():
            facenet.load_model(MODEL_ID)

    # We don't need the contexts to be able to extract tensors from the graph
    images_pholder = my_graph.get_tensor_by_name('input:0')
    phase_train_pholder = my_graph.get_tensor_by_name('phase_train:0')
    embeddings_var = my_graph.get_tensor_by_name('embeddings:0')

    return images_pholder, phase_train_pholder, embeddings_var
示例#12
0
    def __init__(self, modelpath, image_size=160):
        self.graph = tf.Graph()
        self.sess = tf.Session(graph=self.graph)

        with self.graph.as_default():
            facenet.load_model(modelpath)

        self._input = self.graph.get_tensor_by_name("input:0")
        self._embeddings = self.graph.get_tensor_by_name("embeddings:0")
        self._phase_train = self.graph.get_tensor_by_name("phase_train:0")

        # define input and output sizes
        self.image_size = image_size
        self.embedding_size = self._embeddings.get_shape()[1]
示例#13
0
def main(args):
    mypath = "/images/**"
    from os import listdir
    from os.path import isfile, join
    import glob
    print([f for f in glob.glob(mypath, recursive=True)])
    all_images = [f for f in glob.glob(mypath, recursive=True) if isfile(f)]
    image_files = random.sample(all_images, 10)

    images, cout_per_image, nrof_samples = load_and_align_data(
        image_files, args.image_size, args.margin, args.gpu_memory_fraction)
    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Load the model
            facenet.load_model("/root/model/20180408-102900.pb")
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            emb = sess.run(embeddings, feed_dict=feed_dict)
            classifier_filename_exp = os.path.expanduser(
                args.classifier_filename)
            with open(classifier_filename_exp, 'rb') as infile:
                (model, class_names) = pickle.load(infile)
            print('Loaded classifier model from file "%s"\n' %
                  classifier_filename_exp)
            predictions = model.predict_proba(emb)
            best_class_indices = np.argmax(predictions, axis=1)
            best_class_probabilities = predictions[
                np.arange(len(best_class_indices)), best_class_indices]
            k = 0
            #print predictions
            for i in range(nrof_samples):
                print("\npeople in image %s :" % ("/images/"))
                for j in range(cout_per_image[i]):
                    print('%s: %.3f' % (class_names[best_class_indices[k]],
                                        best_class_probabilities[k]))
                    k += 1
示例#14
0
def faces_to_vectors(inpath, modelpath, outpath, imgsize, batchsize=100):
    '''
    Given a folder and a model, loads images and performs forward pass to get a vector for each face
    results go to a JSON, with filenames mapped to their facevectors
    :param inpath: Where are your images? Must be cropped to faces (use MTCNN!)
    :param modelpath: Where is the tensorflow model we'll use to create the embedding?
    :param outpath: Full path to output file (better give it a JSON extension)
    :return: Number of faces converted to vectors
    '''
    results = dict()

    with tf.Graph().as_default():
        with tf.Session() as sess:

            load_model(modelpath)
            mdl = None

            image_paths = get_image_paths(inpath)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Let's do them in batches, don't want to run out of memory
            for i in range(0, len(image_paths), batchsize):
                images = load_data(image_paths=image_paths[i:i + batchsize],
                                   do_random_crop=False,
                                   do_random_flip=False,
                                   image_size=imgsize,
                                   do_prewhiten=True)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }

                emb_array = sess.run(embeddings, feed_dict=feed_dict)
                for j in range(0, len(emb_array)):
                    relpath = os.path.relpath(image_paths[i + j], inpath)
                    results[relpath] = emb_array[j].tolist()

    # All done, save for later!
    json.dump(results, open(outpath, "w"))

    return len(results.keys())
示例#15
0
def getModel():
    sess = tf.Session()
    # read pnet, rnet, onet models from align directory and files are det1.npy, det2.npy, det3.npy
    pnet, rnet, onet = detect_face.create_mtcnn(sess, 'app/models/align')

    # read 20170512-110547 model file downloaded from https://drive.google.com/file/d/0B5MzpY9kBtDVZ2RpVDYwWmxoSUk
    facenet.load_model("app/models/model/tfmodel.pb")

    # Get input and output tensors
    images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
    embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
    phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
        "phase_train:0")

    return (sess, pnet, rnet, onet, images_placeholder, embeddings,
            phase_train_placeholder)
示例#16
0
    def run(self):
        with tf.Graph().as_default():
            with tf.Session() as sess:
                facenet.load_model(self.model_path)
                images_placeholder = tf.get_default_graph().get_tensor_by_name(
                    "input:0")
                embeddings = tf.get_default_graph().get_tensor_by_name(
                    "embeddings:0")
                phase_train_placeholder = tf.get_default_graph(
                ).get_tensor_by_name("phase_train:0")
                all_keys = self.group_name_pics.keys()
                for group in all_keys:
                    name_pic_dict = self.group_name_pics[group]
                    for name in name_pic_dict.keys():
                        feed_dict = {
                            images_placeholder: name_pic_dict[name],
                            phase_train_placeholder: False
                        }
                        emb0 = sess.run(embeddings, feed_dict=feed_dict)
                        self.group_name_pics[group][name] = emb0

                while True:
                    image_dict = self.i_q.get()
                    dist_white_ends = image_dict["dst"]
                    group = image_dict["group"]
                    label_distance = []
                    for white_end in dist_white_ends:
                        label = ""
                        f_min = np.Inf
                        image = white_end[None]
                        feed_dict = {
                            images_placeholder: image,
                            phase_train_placeholder: False
                        }
                        emb = sess.run(embeddings, feed_dict=feed_dict)
                        name_pic_dict = self.group_name_pics[group]
                        for k, v in name_pic_dict.items():
                            distance = np.sqrt(
                                np.sum(np.square(np.subtract(emb[0:], v[0:]))))
                            if distance < f_min:
                                f_min = distance
                                label = k
                        label_distance.append((label, f_min))
                    image_dict["ret"] = label_distance
                    self.o_q.put(image_dict)
示例#17
0
def main(args):
    images = load_and_align_data(args.image_files, args.image_size,
                                 args.margin, args.gpu_memory_fraction)
    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Load the model
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            emb = sess.run(embeddings, feed_dict=feed_dict)

            nrof_images = len(args.image_files)

            print('Images:')
            for i in range(nrof_images):
                print('%1d: %s' % (i, args.image_files[i]))
            print('')

            # Print distance matrix
            print('Distance matrix')
            print('    ', end='')
            for i in range(nrof_images):
                print('    %1d     ' % i, end='')
            print('')
            for i in range(nrof_images):
                print('%1d  ' % i, end='')
                for j in range(nrof_images):
                    dist = np.sqrt(
                        np.sum(np.square(np.subtract(emb[i, :], emb[j, :]))))
                    print('  %1.4f  ' % dist, end='')
                print('')
示例#18
0
 def __face_embedding_thread(self, in_queue, out_queue, model):
     face_embeddings = []
     with tf.Graph().as_default():
         sess = tf.Session()
         with sess.as_default():
             # Load the model
             facenet.load_model(model)
         while True:
             face_image = in_queue.get()
             prewhiten_face = facenet.prewhiten(face_image)
             # Get input and output tensors
             images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
             embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
             phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
             # Run forward pass to calculate embeddings
             feed_dict = { images_placeholder: [prewhiten_face], phase_train_placeholder:False }
             face_embeddings = sess.run(embeddings, feed_dict=feed_dict)
             out_queue.put(face_embeddings)
示例#19
0
def main(args):

    images, cout_per_image, nrof_samples = load_and_align_data(
        args.image_files, args.image_size, args.margin,
        args.gpu_memory_fraction)
    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Load the model
            facenet.load_model(args.model)
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            emb = sess.run(embeddings, feed_dict=feed_dict)
            classifier_filename_exp = os.path.expanduser(
                args.classifier_filename)
            with open(classifier_filename_exp, 'rb') as infile:
                (model, class_names) = pickle.load(infile)
            print('Loaded classifier model from file "%s"\n' %
                  classifier_filename_exp)
            predictions = model.predict_proba(emb)
            best_class_indices = np.argmax(predictions, axis=1)
            best_class_probabilities = predictions[
                np.arange(len(best_class_indices)), best_class_indices]
            k = 0
            #print predictions
            for i in range(nrof_samples):
                print("\npeople in image %s :" % (args.image_files[i]))
                for j in range(cout_per_image[i]):
                    print('%s: %.3f' % (class_names[best_class_indices[k]],
                                        best_class_probabilities[k]))
                    k += 1
def get_index(aligned_user_img_path, trained_model_path):
    embeddings_arr = None
    with tf.Graph().as_default():
        with tf.Session() as sess:
            facenet.load_model(trained_model_path)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            images = facenet.load_data([aligned_user_img_path], False, False,
                                       IMG_SIZE)
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            embeddings_arr = sess.run(embeddings, feed_dict=feed_dict)
    return embeddings_arr[0]
示例#21
0
    def __init__(self, model):
        print("model path:{0}".format(model))
        self.gpu_memory_fraction = 0.4
        self.minsize = 20  # minimum size of face
        self.threshold = [0.6, 0.7, 0.7]  # three steps's threshold
        self.factor = 0.709  # scale factor
        self.margin = 44
        self.image_size = 160
        self.compare_threshold = 0.99

        print('Creating networks and loading parameters')
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=self.gpu_memory_fraction, allow_growth = True)
        self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(self.sess, None)
        # Load the model
        facenet.load_model(model)

        # Get input and output tensors
        self.images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
        self.embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
示例#22
0
    def run(self):
        with tf.Graph().as_default():
            with tf.Session() as sess:
                facenet.load_model(self.model_path)
                images_placeholder = tf.get_default_graph().get_tensor_by_name(
                    "input:0")
                embeddings = tf.get_default_graph().get_tensor_by_name(
                    "embeddings:0")
                phase_train_placeholder = tf.get_default_graph(
                ).get_tensor_by_name("phase_train:0")
                for k in self.key_value.keys():
                    feed_dict = {
                        images_placeholder: self.key_value[k],
                        phase_train_placeholder: False
                    }
                    emb0 = sess.run(embeddings, feed_dict=feed_dict)
                    self.key_value[k] = emb0

                while True:
                    image_dict = self.i_q.get()
                    dist_white_ends = image_dict["dst"]
                    label_distance = []
                    for white_end in dist_white_ends:
                        label = ""
                        f_max = np.Inf
                        image = white_end[None]
                        feed_dict = {
                            images_placeholder: image,
                            phase_train_placeholder: False
                        }
                        emb = sess.run(embeddings, feed_dict=feed_dict)
                        for k, v in self.key_value.items():
                            distance = np.sqrt(
                                np.sum(np.square(np.subtract(emb[0:], v[0:]))))
                            if distance < f_max:
                                f_max = distance
                                label = k
                        label_distance.append((label, f_max))
                    image_dict["ret"] = label_distance
                    self.o_q.put(image_dict)
示例#23
0
def getFacenetFeatures(splits, facenetModelDir, lfwAlignedDir):
    print("  + Loading Facenet features.")

    video_features = {}

    # For every video in the pairs, create list for features
    for split in splits:
        for pair in split:
            if not pair[0] in video_features:
                video_features[pair[0]] = []
            if not pair[1] in video_features:
                video_features[pair[1]] = []
    
    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model
            meta_file, ckpt_file = facenet.get_model_filenames(facenetModelDir)
            facenet.load_model(facenetModelDir, meta_file, ckpt_file)

           # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            # For every video get the aligned first 100 frames and create features of them
            for video in video_features:
                repCache = {}
                videoDir = os.path.join(lfwAlignedDir, video)
                image_paths = os.listdir(videoDir)
                images = loadFacenetImages(image_paths, videoDir, image_size)

                # Feed single batch of 100 images to network for features
                feed_dict = { images_placeholder:images }
                emb_array = sess.run(embeddings, feed_dict=feed_dict)

                video_features[video] = emb_array
    return video_features
示例#24
0
 def __init__(self,
              image_size=(160, 160),
              data_path=".\data\ms_faces",
              embeddings_path=".\data\data_facenet.json",
              train=False):
     """
         :param image_size: Image size to which calculates embedding.
         :param data_path: Path to the base of faces. It is assumed that all faces are frontal.
         :param embeddings_path: Path to the base of embeddings.
         :param train: If True than train a new database of embeddings.
     """
     self.image_size = image_size
     facenet.load_model("facenet_model/20180408-102900.pb")
     names = os.listdir(data_path)
     name_face_path = {}
     for name in names:
         faces = os.listdir(os.path.join(data_path, name))
         for face in faces:
             path = os.path.join(data_path, name, face)
             if name in name_face_path.keys():
                 name_face_path[name].append(path)
             else:
                 name_face_path[name] = []
                 name_face_path[name].append(path)
     self.sess = tf.Session()
     if train:
         self.embeddings = dict.fromkeys(name_face_path.keys())
         for name in name_face_path.keys():
             emb = []
             for img_path in name_face_path[name]:
                 image = read_rgb(img_path)
                 emb.append(
                     self.get_embedding(facenet.prewhiten(image)).tolist())
             self.embeddings[name] = emb
         with open(embeddings_path, mode="w") as f:
             json.dump(self.embeddings, f, indent=4)
     else:
         with open(embeddings_path, mode="r") as f:
             self.embeddings = json.load(f)
示例#25
0
    def __init__(self,
                 aligned_img_folder='../aligned_sized/',
                 aligned_usr_img_folder='../tmp/',
                 aligned_img_size=160,
                 pretrained_model='../pretrained_model',
                 debug=False):
        self.debug = debug
        self._aligned_usr_img_folder = aligned_usr_img_folder
        self._aligned_img_folder = aligned_img_folder
        self._aligned_img_size = aligned_img_size
        self._aligned_img_ext = 'png'
        self._raw_img_ext = 'jpg'
        self._img_filenames = self.get_img_filenames()

        path_to_cv2_xmls = os.path.join(cv2.__path__[0], 'data')
        self._face_cascade = cv2.CascadeClassifier(
            os.path.join(path_to_cv2_xmls,
                         'haarcascade_frontalface_default.xml'))
        self._eye_cascade = cv2.CascadeClassifier(
            os.path.join(path_to_cv2_xmls, 'haarcascade_eye.xml'))

        # load model
        self._sess = tf.Session()
        with self._sess.as_default():
            facenet.load_model(pretrained_model)
        self._images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        self._embeddings = tf.get_default_graph().get_tensor_by_name(
            "embeddings:0")
        self._phase_train_placeholder = tf.get_default_graph(
        ).get_tensor_by_name("phase_train:0")

        # index service
        index_requester_url = 'http://localhost' if self.debug else 'http://faces_index_search_1'
        self._index_requester = IndexRequester(url=index_requester_url,
                                               port=8081)
示例#26
0
def facenetExp(lfwAligned, facenetModelDir, cls):
    df = pd.DataFrame(columns=('nPpl', 'nImgs',
                               'trainTimeSecMean', 'trainTimeSecStd',
                               'predictTimeSecMean', 'predictTimeSecStd',
                               'accsMean', 'accsStd'))


    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model
            meta_file, ckpt_file = facenet.get_model_filenames(facenetModelDir)
            facenet.load_model(facenetModelDir, meta_file, ckpt_file)

           # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            repCache = {}

            df_i = 0
            for nPpl in nPplVals:
                print(" + nPpl: {}".format(nPpl))
                (X, y) = getFacenetData(lfwAligned, nPpl, nImgs, image_size)
                nSampled = X.shape[0]
                print 'nSampled:', nSampled
                ss = ShuffleSplit(nSampled, n_iter=10, test_size=0.1, random_state=0)

                allTrainTimeSec = []
                allPredictTimeSec = []
                accs = []

                for train, test in ss:
                    #print 'split:', train, test
                    X_train = []
                    Y_train = []
                    for index, vid in zip(train, X[train]):
                        imgs = vid # use vid as batch and one forward
                        imgs = np.array(imgs)
                        feed_dict = { images_placeholder:imgs }
                        rep_array = sess.run(embeddings, feed_dict=feed_dict)
                        rep_array = np.array(rep_array)
                        print 'train', rep_array.shape, rep_array.mean(axis=0).shape
                        X_train.append(rep_array.mean(axis=0))
                        Y_train.append(y[index])

                    start = time.time()
                    X_train = np.array(X_train)
                    Y_train = np.array(Y_train)
                    cls.fit(X_train, Y_train)
                    trainTimeSec = time.time() - start
                    allTrainTimeSec.append(trainTimeSec)

                    start = time.time()
                    X_test = []
                    Y_test = []
                    for index, vid in zip(test, X[test]):
                        imgs = vid
                        imgs = np.array(imgs)
                        feed_dict = { images_placeholder:imgs }
                        rep_array = sess.run(embeddings, feed_dict=feed_dict)
                        rep_array = np.array(rep_array)
                        print 'test', rep_array.shape, rep_array.mean(axis=0).shape 
                        X_test.append(rep_array.mean(axis=0))
                        Y_test.append(y[index])

                    y_predict = cls.predict(X_test)
                    predictTimeSec = time.time() - start
                    allPredictTimeSec.append(predictTimeSec / len(test))
                    y_predict = np.array(y_predict)
                    Y_test = np.array(Y_test)
                    acc = accuracy_score(Y_test, y_predict)
                    accs.append(acc)
                
                print 'accs:', accs
                df.loc[df_i] = [nPpl, nImgs,
                                np.mean(allTrainTimeSec), np.std(allTrainTimeSec),
                                np.mean(allPredictTimeSec), np.std(allPredictTimeSec),
                                np.mean(accs), np.std(accs)]
                df_i += 1

    return df
示例#27
0
def inference(model, tfRecord, img_paths, top, av):
    # step1 crop the face in the list
    img_list = []
    for image_name in img_paths:
        face_loc = find_rectangle_of_face(image_name)
        if not face_loc:
            print("Not found the Face in the Picture")
        else:
            img_list.append(crop_resize_image(image_name, face_loc, "", av.img_size, av.img_type, store_ = 0))
    # step 2 store img as the np format
    from facenet.src.facenet import load_model, prewhiten
    emb_list = []
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.8
    for i in img_list:
        with tf.Graph().as_default():
            with tf.Session(config = config) as sess:
                load_model(model)
                images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
                embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
                phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
                prewhitened = prewhiten(i)
                feed_dict = { images_placeholder: [prewhitened], phase_train_placeholder:False }
                emb = sess.run(embeddings, feed_dict=feed_dict)
                emb_list.append(emb)
    
    dataset = read_and_decodeEmb(tfRecord, av.batch_size, 512) # 512 is face vector of the model
    iterator = dataset.make_initializable_iterator()
    next_element = iterator.get_next()
    init_op = tf.group(tf.global_variables_initializer(),
        tf.local_variables_initializer())    
    # step 3 calculate the confidence level
    dist_dict  = {}
    label_dict = {}
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.8
    with tf.Session(config = config)  as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        sess.run(iterator.initializer)
        while True:
            try:
                next_example, next_label = sess.run(next_element)
                for emb in emb_list:
                    for i, ele in enumerate(next_example):
                        if str(next_label[i]) not in dist_dict:
                            dist_dict[str(next_label[i])]  = 0
                        if str(next_label[i]) not in label_dict:
                            label_dict[str(next_label[i])] = 0
                        dist = np.sqrt(np.sum(np.square(np.subtract(ele, emb))))
                        dist_dict[str(next_label[i])]  = dist + dist_dict[str(next_label[i])] 
                        label_dict[str(next_label[i])] =    1 + label_dict[str(next_label[i])]
            except tf.errors.OutOfRangeError:
                break
        coord.request_stop()
        coord.join(threads)
        
    for i in range(len(av.list_name)):
        dist_dict[str(i)] = (2 * label_dict[str(i)] - dist_dict[str(i)]) * 50 / label_dict[str(i)]
    from collections import Counter 
    k = Counter(dist_dict) 
    top = av.list_name if top > len(av.list_name) else top
    high = k.most_common(top) 
    for i, ele in enumerate(high):
        print("第 {} 相似為: {}, 相似度: {:.1f}%".format(i + 1, av.rdict_name[ele[0]], ele[1]))
示例#28
0
文件: clu.py 项目: SEJUNHONG/Capstone
def main():
    #pnet, rnet, onet = create_network_face_detection(sess, project_root_folder + "src/align")
    project_root_path = os.path.join(
        os.path.abspath(__file__), "C:/Users/mmlab/PycharmProjects/UI_pyqt/")
    # Feel free to replace this and use actual commandline args instead, the main method will still work
    args = lambda: None
    args.data_dir = project_root_path + 'src/test_file'
    args.model = project_root_path + '20180402-114759/20180402-114759.pb'
    args.out_dir = project_root_path + 'cluster_people'
    args.largest_cluster_only = False
    args.image_size = 160
    args.margin = 44
    args.min_cluster_size = 10
    args.cluster_threshold = 0.57
    args.gpu_memory_fraction = 1.0

    shutil.rmtree('C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people')
    os.mkdir('C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people')

    with tf.Graph().as_default():
        with tf.Session() as sess:
            facenet.load_model(args.model)
            pnet, rnet, onet = src.align.detect_face.create_mtcnn(
                sess, "C:/Users/mmlab/PycharmProjects/UI_pyqt/src/align")
            image_list = load_images_from_folder(args.data_dir)
            print(args.data_dir)
            images = align_data(image_list, args.image_size, args.margin, pnet,
                                rnet, onet)

            images_placeholder = sess.graph.get_tensor_by_name("input:0")
            embeddings = sess.graph.get_tensor_by_name("embeddings:0")
            phase_train_placeholder = sess.graph.get_tensor_by_name(
                "phase_train:0")
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            emb = sess.run(embeddings, feed_dict=feed_dict)

            nrof_images = len(images)

            matrix = np.zeros((nrof_images, nrof_images))

            print('')
            # Print distance matrix
            print('Distance matrix')
            print('    ', end='')
            for i in range(nrof_images):
                print('    %1d     ' % i, end='')
            print('')
            for i in range(nrof_images):
                print('%1d  ' % i, end='')
                for j in range(nrof_images):
                    dist = np.sqrt(
                        np.sum(np.square(np.subtract(emb[i, :], emb[j, :]))))
                    matrix[i][j] = dist
                    print('  %1.4f  ' % dist, end='')
                print('')

            print('')

            # DBSCAN is the only algorithm that doesn't require the number of clusters to be defined.
            db = DBSCAN(eps=args.cluster_threshold,
                        min_samples=args.min_cluster_size,
                        metric='precomputed')
            db.fit(matrix)
            labels = db.labels_

            # get number of clusters
            no_clusters = len(set(labels)) - (1 if -1 in labels else 0)

            print('No of clusters:', no_clusters)

            if no_clusters > 0:
                if args.largest_cluster_only:
                    largest_cluster = 0
                    for i in range(no_clusters):
                        print('Cluster {}: {}'.format(
                            i,
                            np.nonzero(labels == i)[0]))
                        if len(np.nonzero(labels == i)[0]) > len(
                                np.nonzero(labels == largest_cluster)[0]):
                            largest_cluster = i
                    print('Saving largest cluster (Cluster: {})'.format(
                        largest_cluster))
                    cnt = 1
                    for i in np.nonzero(labels == largest_cluster)[0]:
                        misc.imsave(
                            os.path.join(args.out_dir,
                                         str(cnt) + '.png'), images[i])
                        cnt += 1
                else:
                    print('Saving all clusters')
                    for i in range(no_clusters):
                        cnt = 1
                        print('Cluster {}: {}'.format(
                            i,
                            np.nonzero(labels == i)[0]))
                        path = os.path.join(args.out_dir, str(i) + 'human')
                        if not os.path.exists(path):
                            os.makedirs(path)
                            for j in np.nonzero(labels == i)[0]:
                                misc.imsave(
                                    os.path.join(
                                        path,
                                        str(i) + 'human' + str(cnt) + '.png'),
                                    images[j])
                                cnt += 1
                        else:
                            for j in np.nonzero(labels == i)[0]:
                                misc.imsave(
                                    os.path.join(
                                        path,
                                        str(i) + 'human' + str(cnt) + '.png'),
                                    images[j])
                                cnt += 1
示例#29
0
def main(classifierpath, slotid, imagepath, pretrained_model):

    MINSIZE = 20
    THRESHOLD = [0.6, 0.7, 0.7]
    FACTOR = 0.709
    IMAGE_SIZE = 182
    INPUT_IMAGE_SIZE = 160
    CLASSIFIER_PATH = classifierpath
    SLOTID = slotid
    IMAGE_PATH = imagepath
    FACENET_MODEL_PATH = pretrained_model

    # IMAGE_PATH = IMAGE_PATH[1:-1].split(",")
    # return

    # Load The Custom Classifier
    with open(CLASSIFIER_PATH, "rb") as file:
        model, class_names = pickle.load(file)
    # print("Custom Classifier, Successfully loaded")

    with tf.Graph().as_default():

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))

        with sess.as_default():

            # Load the model
            # print('Loading feature extraction model')
            facenet.load_model(FACENET_MODEL_PATH)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            pnet, rnet, onet = align.detect_face.create_mtcnn(
                sess,
                os.path.dirname(os.path.realpath(__file__)) + "/align/")

            # people_detected = set()
            person_detected = collections.Counter()

            ctr = 1
            face_list = {}
            unknown_list = {}
            unknown_count = 0
            detected_face = {}
            for IMG_ADDR in IMAGE_PATH:
                ctr = ctr + 1
                frame = imageio.imread(IMG_ADDR)

                bounding_boxes, _ = align.detect_face.detect_face(
                    frame, MINSIZE, pnet, rnet, onet, THRESHOLD, FACTOR)

                faces_found = bounding_boxes.shape[0]
                print("Faces found :", faces_found)
                try:
                    if faces_found > 0:
                        det = bounding_boxes[:, 0:4]
                        bb = np.zeros((faces_found, 4), dtype=np.int32)
                        for i in range(faces_found):
                            bb[i][0] = det[i][0]
                            bb[i][1] = det[i][1]
                            bb[i][2] = det[i][2]
                            bb[i][3] = det[i][3]

                            cropped = frame[bb[i][1]:bb[i][3],
                                            bb[i][0]:bb[i][2], :]
                            # print(type(cropped))
                            scaled = cv2.resize(
                                cropped, (INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE),
                                interpolation=cv2.INTER_CUBIC)
                            scaled = facenet.prewhiten(scaled)
                            scaled_reshape = scaled.reshape(
                                -1, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE, 3)
                            feed_dict = {
                                images_placeholder: scaled_reshape,
                                phase_train_placeholder: False
                            }
                            emb_array = sess.run(embeddings,
                                                 feed_dict=feed_dict)
                            predictions = model.predict_proba(emb_array)
                            best_class_indices = np.argmax(predictions, axis=1)
                            prob_claa = {}
                            best_prob = 0
                            second_best_prob = 0
                            third_best_prob = 0
                            best_name = ""
                            second_best_name = ""
                            third_best_name = ""
                            for iterr in range(len(predictions[0])):
                                prob_claa[str(class_names[iterr]
                                              )] = predictions[0][iterr]
                                if predictions[0][iterr] > best_prob:
                                    best_prob = predictions[0][iterr]
                                    best_name = str(class_names[iterr])
                            for key in prob_claa.keys():
                                if (prob_claa[key] > second_best_prob) and (
                                        prob_claa[key] < best_prob):
                                    second_best_name = key
                                    second_best_prob = prob_claa[key]
                            for key in prob_claa.keys():
                                if (prob_claa[key] > third_best_prob) and (
                                        prob_claa[key] < second_best_prob):
                                    third_best_name = key
                                    third_best_prob = prob_claa[key]
                            best_class_probabilities = predictions[
                                np.arange(len(best_class_indices)),
                                best_class_indices]
                            best_name = class_names[best_class_indices[0]]
                            cv2.rectangle(frame, (bb[i][0], bb[i][1]),
                                          (bb[i][2], bb[i][3]), (0, 255, 0), 2)
                            text_x = bb[i][0]
                            text_y = bb[i][3] + 20

                            if best_class_probabilities > 0.20:
                                name = class_names[best_class_indices[0]]
                                # print(name)
                                id = uuid.uuid1().int
                                entry = {
                                    "name": name,
                                    "prob": best_class_probabilities[0],
                                    "best_name": best_name,
                                    "second_best_name": second_best_name,
                                    "third_best_name": third_best_name,
                                    "prob_list": prob_claa,
                                }
                                detected_face[id] = cropped
                                # imageio.imwrite("detected_faces/{}-{}.jpg".format(entry['name'], entry['prob']), cropped)
                                face_list[id] = entry
                                # cv2.putText(frame, name[10:], (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255), thickness=1, lineType=2)
                            else:
                                id = uuid.uuid1().int
                                detected_face[id] = cropped
                                name = "Unknown" + str(unknown_count)
                                guess = class_names[best_class_indices[0]]
                                prob = best_class_probabilities[0]
                                entry = {
                                    "name": name,
                                    "guess": guess,
                                    "prob": prob,
                                }
                                unknown_count += 1
                                unknown_list[id] = entry
                                # cv2.putText(frame, name, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255), thickness=1, lineType=2)

                            # cv2.putText(frame, str(round(best_class_probabilities[0], 3)), (text_x, text_y + 17), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255), thickness=1, lineType=2)
                            person_detected[best_name] += 1
                except:
                    pass

                # cv2.imshow('Face Recognition', frame)
                if cv2.waitKey(1) & 0xFF == ord("q"):
                    break

            # cap.release()
            duplicate_keys = find_duplicate(face_list)
            present_list = []
            absent_list = []
            # for key in duplicate_keys:
            #     face_list[key] = replace_with_second_best(face_list[key])
            for key in detected_face.keys():
                if key in face_list:
                    imageio.imwrite(
                        "./static/{}--{}-{}.jpg".format(
                            SLOTID, face_list[key]["name"],
                            face_list[key]["prob"]),
                        detected_face[key],
                    )
                    present_list.append(face_list[key]["name"].split(" ")[0])
                else:
                    imageio.imwrite(
                        "./static/{}_{}-{}.jpg".format(
                            SLOTID, unknown_list[key]["name"],
                            unknown_list[key]["prob"]),
                        detected_face[key],
                    )
                    absent_list.append(face_list[key]["name"].split(" ")[0])
            print(json.dumps(face_list, indent=4))
            return {"regno": present_list}
            cv2.destroyAllWindows()
recognition_model = '20180402-114759/'  # Name of the folder containing the recognition model inside the specified models folder
image_size = 160  # check recognition model input layer before changing this value
margin = 20  # Number of margin pixels to crop faces function

# Don't change the next set of parameters unless necessary
base_url = "http://" + get_default_gateway_linux(
) + ":8443/"  # Detection service base URL
rec_model_folder = '/root/models/recognition/' + recognition_model  # path to the folder contating the recognition model
dataset_binary = '/root/data/features_data.npy'  # path to the file containing the recognition dataset features
labels_binary = '/root/data/labels.npy'  # path to the file containing the recognition dataset labels

app = Flask(__name__)

with tf.Graph().as_default():
    with tf.Session() as sess:
        facenet.load_model(rec_model_folder)
        # Get input and output tensors
        images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
            "phase_train:0")
        # embedding_size = embeddings.get_shape()[1]

        emb_array = []
        labels = []
        try:
            emb_array = np.load(dataset_binary)
            labels = np.load(labels_binary)
        except:
            print(
示例#31
0
def facenetExp(lfwAligned, facenetModelDir, cls):
    df = pd.DataFrame(columns=('nPpl', 'nImgs',
                               'trainTimeSecMean', 'trainTimeSecStd',
                               'predictTimeSecMean', 'predictTimeSecStd',
                               'accsMean', 'accsStd'))


    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model
            meta_file, ckpt_file = facenet.get_model_filenames(facenetModelDir)
            facenet.load_model(facenetModelDir, meta_file, ckpt_file)

           # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            repCache = {}

            df_i = 0
            for nPpl in nPplVals:
                print(" + nPpl: {}".format(nPpl))
                (X, y) = getFacenetData(lfwAligned, nPpl, nImgs, image_size)
                nSampled = X.shape[0]
                ss = ShuffleSplit(nSampled, n_iter=10, test_size=0.1, random_state=0)

                allTrainTimeSec = []
                allPredictTimeSec = []
                accs = []

                for train, test in ss:
                    X_train = []
                    for img in X[train]:
                        h = hash(str(img.data))
                        if h in repCache:
                            rep = repCache[h]
                        else:
                            imgs = [img]
                            imgs = np.array(imgs)
                            feed_dict = { images_placeholder:imgs }
                            emb = sess.run(embeddings, feed_dict=feed_dict)
                            rep = emb[0]
                            repCache[h] = rep
                        X_train.append(rep)

                    start = time.time()
                    X_train = np.array(X_train)
                    cls.fit(X_train, y[train])
                    trainTimeSec = time.time() - start
                    allTrainTimeSec.append(trainTimeSec)

                    start = time.time()
                    X_test = []
                    for img in X[test]:
                        imgs = [img]
                        imgs = np.array(imgs)
                        feed_dict = { images_placeholder:imgs }
                        emb = sess.run(embeddings, feed_dict=feed_dict)
                        X_test.append(emb[0])
                    y_predict = cls.predict(X_test)
                    predictTimeSec = time.time() - start
                    allPredictTimeSec.append(predictTimeSec / len(test))
                    y_predict = np.array(y_predict)

                    acc = accuracy_score(y[test], y_predict)
                    accs.append(acc)

                df.loc[df_i] = [nPpl, nImgs,
                                np.mean(allTrainTimeSec), np.std(allTrainTimeSec),
                                np.mean(allPredictTimeSec), np.std(allPredictTimeSec),
                                np.mean(accs), np.std(accs)]
                df_i += 1

    return df
示例#32
0
def facenetExp(lfwAligned, facenetModelDir, cls):
    df = pd.DataFrame(columns=('nPpl', 'nImgs', 'trainTimeSecMean',
                               'trainTimeSecStd', 'predictTimeSecMean',
                               'predictTimeSecStd', 'accsMean', 'accsStd'))

    with tf.Graph().as_default():
        with tf.Session() as sess:
            # Load the model
            meta_file, ckpt_file = facenet.get_model_filenames(facenetModelDir)
            facenet.load_model(facenetModelDir, meta_file, ckpt_file)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            repCache = {}

            df_i = 0
            for nPpl in nPplVals:
                print(" + nPpl: {}".format(nPpl))
                (X, y) = getFacenetData(lfwAligned, nPpl, nImgs, image_size)
                nSampled = X.shape[0]
                ss = ShuffleSplit(nSampled,
                                  n_iter=10,
                                  test_size=0.1,
                                  random_state=0)

                allTrainTimeSec = []
                allPredictTimeSec = []
                accs = []

                for train, test in ss:
                    X_train = []
                    for img in X[train]:
                        h = hash(str(img.data))
                        if h in repCache:
                            rep = repCache[h]
                        else:
                            imgs = [img]
                            imgs = np.array(imgs)
                            feed_dict = {images_placeholder: imgs}
                            emb = sess.run(embeddings, feed_dict=feed_dict)
                            rep = emb[0]
                            repCache[h] = rep
                        X_train.append(rep)

                    start = time.time()
                    X_train = np.array(X_train)
                    cls.fit(X_train, y[train])
                    trainTimeSec = time.time() - start
                    allTrainTimeSec.append(trainTimeSec)

                    start = time.time()
                    X_test = []
                    for img in X[test]:
                        imgs = [img]
                        imgs = np.array(imgs)
                        feed_dict = {images_placeholder: imgs}
                        emb = sess.run(embeddings, feed_dict=feed_dict)
                        X_test.append(emb[0])
                    y_predict = cls.predict(X_test)
                    predictTimeSec = time.time() - start
                    allPredictTimeSec.append(predictTimeSec / len(test))
                    y_predict = np.array(y_predict)

                    acc = accuracy_score(y[test], y_predict)
                    accs.append(acc)

                df.loc[df_i] = [
                    nPpl, nImgs,
                    np.mean(allTrainTimeSec),
                    np.std(allTrainTimeSec),
                    np.mean(allPredictTimeSec),
                    np.std(allPredictTimeSec),
                    np.mean(accs),
                    np.std(accs)
                ]
                df_i += 1

    return df
示例#33
0
 def __init__(self, facenet_model_checkpoint):
     self.sess = tf.Session()
     with self.sess.as_default():
         facenet.load_model(facenet_model_checkpoint)