Esempio n. 1
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    with tf.Graph().as_default():
        with tf.Session() as sess:
            # create output directory if it doesn't exist
            output_dir = os.path.expanduser(args.output_dir)
            if not os.path.isdir(output_dir):
                os.makedirs(output_dir)
            # load the model
            print("Loading trained model...\n")
            meta_file, ckpt_file = facenet.get_model_filenames(
                os.path.expanduser(args.trained_model_dir))
            facenet.load_model(args.trained_model_dir)

            # grab all image paths and labels
            print("Finding image paths and targets...\n")
            data = load_files(args.data_dir, load_content=False, shuffle=False)
            # labels_array = data['target']
            # paths = data['filenames']
            # print(data)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input_ID:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings_ID:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Generating embeddings from images...\n')
            # emb = np.zeros(embedding_size)
            dirs = os.listdir(args.data_dir)
            dirs.sort()
            for dir in dirs:
                path = os.path.join(args.data_dir, dir)
                if os.path.isdir(path):
                    print("path: ", path)
                    files = os.listdir(path)
                    files.sort()
                    for file in files:
                        output_path = get_output_path(output_dir, dir, file)
                        image_path = os.path.join(path, file)
                        images = facenet.load_image(image_path,
                                                    do_random_crop=False,
                                                    do_random_flip=False,
                                                    image_size=image_size,
                                                    do_prewhiten=True)
                        feed_dict = {
                            images_placeholder: images,
                            phase_train_placeholder: False
                        }
                        emb = sess.run(embeddings, feed_dict=feed_dict)
                        np.save(output_path, emb[0])
def main(args):
    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Get the paths for the corresponding images

            # vinayak = ['datasets/kar_Vin_aligned/vinayak/' + f for f in
            #            os.listdir('datasets/kar_Vin_aligned/vinayak')]
            # karthik = ['datasets/kar_Vin_aligned/karthik/' + f for f in
            #            os.listdir('datasets/kar_Vin_aligned/karthik')]
            # ashish = ['datasets/kar_Vin_aligned/Ashish/' + f for f in
            #           os.listdir('datasets/kar_Vin_aligned/Ashish')]
            # saurabh = ['datasets/kar_Vin_aligned/Saurabh/' + f for f in
            #            os.listdir('datasets/kar_Vin_aligned/Saurabh')]
            # hari = ['datasets/kar_Vin_aligned/Hari/' + f for f in
            #         os.listdir('datasets/kar_Vin_aligned/Hari')]
            flexface = ['outputs/' + f for f in os.listdir('outputs/')]
            # paths = vinayak + karthik + ashish + saurabh + hari
            paths = flexface
            # np.save("images.npy",paths)
            # Load the model
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            images_placeholder = tf.image.resize_images(
                images_placeholder, (160, 160))
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            image_size = args.image_size
            embedding_size = embeddings.get_shape()[1]
            print("Embedding size: {}".format(embedding_size))
            extracted_dict = {}

            # Run forward pass to calculate embeddings
            for i, filename in enumerate(paths):

                images = facenet.load_image(filename, False, False, image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                feature_vector = sess.run(embeddings, feed_dict=feed_dict)
                extracted_dict[filename] = feature_vector
                if i % 100 == 0:
                    print("completed", i, " images")

            with open('extracted_dict.pickle', 'wb') as f:
                pickle.dump(extracted_dict, f)
Esempio n. 3
0
    def get_face_database(self):
        if self.loaded:
            return 1
        else:
            if os.path.exists("data/data_faces_from_camera/"):
                self.metadata = facenet.load_metadata(
                    "data/data_faces_from_camera/")
                self.name_known_cnt = 0
                for i in range(0, len(self.metadata)):
                    for j in range(0, len(self.metadata[i])):
                        self.name_known_cnt += 1
                self.embedded = np.zeros((self.name_known_cnt * 8, 128))

                for i, m in enumerate(self.metadata):
                    for j, n in enumerate(m):
                        for k, p in enumerate(n):
                            img = facenet.load_image(p.image_path().replace(
                                "\\", "/"))
                            # img = align_image(img)
                            img = cv2.resize(img, (96, 96))
                            # scale RGB values to interval [0,1]
                            img = (img / 255.).astype(np.float32)
                            # obtain embedding vector for image
                            self.embedded[i] = nn4_small2_pretrained.predict(
                                np.expand_dims(img, axis=0))[0]
                            # self.embedded[i] = self.embedded[i] / len(m)
                            path = p.image_path().replace("\\", "/")
                        self.name_known_list.append(path.split('/')[-2])
                        self.type_known_list.append(path.split('/')[-3])
                # print(self.embedded.shape)
                for i in range(len(self.name_known_list)):
                    if self.type_known_list[i] == 'elder':
                        type = 'old'
                    elif self.type_known_list[i] == 'volunteer':
                        type = 'employee'
                    self.name_known_list[i] = requests.get(
                        "http://zhuooyu.cn:8000/api/person/" + str(type) +
                        "/" + str(self.name_known_list[i]) + "/").text

                self.loaded = True
                # print(self.name_known_list)

                return 1
            else:
                print('##### Warning #####', '\n')
                print("'features_all.csv' not found!")
                print(
                    "Please run 'get_faces_from_camera.py' before 'face_reco_from_camera.py'",
                    '\n')
                print('##### End Warning #####')
                return 0
Esempio n. 4
0
 def embed(self, img, do_prewhiten=True):
     if type(img) == str:
         images = facenet.load_image([img], False, False,
                                     IMAGE_SIZE)
     elif type(
             img
     ) == np.ndarray and img.ndim == 2:  # if 1 channel image (grayscale)
         w, h = img.shape
         ret = np.empty((w, h, 3), dtype=np.uint8)
         ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
         img = ret
     if do_prewhiten:
         img = facenet.prewhiten(img)
     img = cv2.resize(img, (self.IMAGE_SIZE, self.IMAGE_SIZE))
     images = img.reshape(1, self.IMAGE_SIZE, self.IMAGE_SIZE,
                          3)
     feed_dict = {
         self.images_placeholder: images,
         self.phase_train_placeholder: False
     }
     feature_vector = sess.run(self.embeddings,
                               feed_dict=feed_dict)
     return feature_vector
def main(args):

    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Get the paths for the corresponding images

            #vinayak =  ['datasets/kar_Vin_aligned/vinayak/' + f for f in os.listdir('datasets/kar_Vin_aligned/vinayak')]
            #karthik =  ['datasets/kar_Vin_aligned/karthik/' + f for f in os.listdir('datasets/kar_Vin_aligned/karthik')]
            #ashish = ['datasets/kar_Vin_aligned/Ashish/' + f for f in os.listdir('datasets/kar_Vin_aligned/Ashish')]
            #saurabh = ['datasets/kar_Vin_aligned/Saurabh/' + f for f in os.listdir('datasets/kar_Vin_aligned/Saurabh')]
            #hari = ['datasets/kar_Vin_aligned/Hari/' + f for f in os.listdir('datasets/kar_Vin_aligned/Hari')]
            #paths = vinayak+karthik+ashish+saurabh+hari
            facedir = args.dir
            print("dir: " + facedir)
            names = [
                os.path.join(facedir, name) for name in os.listdir(facedir)
                if os.path.isdir(os.path.join(facedir, name))
            ]
            paths = []
            for name_dir in names:
                p = [os.path.join(name_dir, f) for f in os.listdir(name_dir)]
                for f in p:
                    print("file: " + f)
                paths += p
            #np.save("images.npy",paths)
            # Load the model
            facenet.load_model(args.model)

            image_size = args.image_size
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            images_placeholder = tf.image.resize_images(
                images_placeholder, (image_size, image_size))
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            embedding_size = embeddings.get_shape()[1]
            extracted_dict = {}

            # Run forward pass to calculate embeddings
            for i, filename in enumerate(paths):
                if os.path.basename(filename).startswith('.'):
                    # skip files starting with dot
                    continue
                print(os.path.dirname(filename))
                images = facenet.load_image(filename, False, False, image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                feature_vector = sess.run(embeddings, feed_dict=feed_dict)
                extracted_dict[filename] = feature_vector
                if (i % 100 == 0):
                    print("completed", i, " images")

            with open('extracted_dict.pickle', 'wb') as f:
                pickle.dump(extracted_dict, f)
        # Get input and output tensors
        images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        images_placeholder = tf.image.resize_images(images_placeholder,
                                                    (160, 160))
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
            "phase_train:0")

        image_size = 160
        embedding_size = embeddings.get_shape()[1]
        extracted_dict = {}

        # Run forward pass to calculate embeddings
        for i, filename in enumerate(paths):
            # print(i, filename)
            images = facenet.load_image(filename, False, False, image_size)
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            feature_vector = sess.run(embeddings, feed_dict=feed_dict)
            extracted_dict[filename] = feature_vector
            if (i % 100 == 0):
                print("completed", i, " images")

        with open(extracted_dict_output_path, 'wb') as f:
            pickle.dump(extracted_dict, f)
        # print('Done dumping')