Exemplo n.º 1
0
    def main_train(self):
        with tf.Graph().as_default():
            with tf.Session() as sess:
                #                img_data = facenet.get_dataset_all(self.datadir)
                #                path, label = facenet.get_image_paths_and_labels(img_data)
                labs, imgs = aug.Make_Data(self.datadir)
                total_imgs_len = len(imgs) * len(imgs[0])
                print('Classes: %d' % len(imgs))
                print('Images: %d' % total_imgs_len)

                facenet.load_model(self.modeldir)
                images_placeholder = tf.get_default_graph().get_tensor_by_name(
                    "input:0")
                embeddings = tf.get_default_graph().get_tensor_by_name(
                    "embeddings:0")
                phase_train_placeholder = tf.get_default_graph(
                ).get_tensor_by_name("phase_train:0")
                embedding_size = embeddings.get_shape()[1]

                print('Extracting features of images for model')
                batch_size = 1000
                #                image_size = 160
                nrof_images = total_imgs_len
                nrof_batches_per_epoch = int(
                    math.ceil(1.0 * nrof_images / batch_size))
                emb_array = np.zeros((nrof_images, embedding_size))
                imgs = np.array(imgs).reshape(total_imgs_len, 200, 200, 3)

                for i in range(nrof_batches_per_epoch):
                    start_index = i * batch_size
                    end_index = min((i + 1) * batch_size, nrof_images)
                    #                    paths_batch = path[start_index:end_index]
                    imgs_batch = imgs[start_index:end_index]
                    #                    images = facenet.load_data(paths_batch, False, False, image_size)
                    #                    feed_dict = {images_placeholder: images, phase_train_placeholder: False}
                    feed_dict = {
                        images_placeholder: imgs_batch,
                        pahse_train_placeholder: False
                    }
                    emb_array[start_index:end_index, :] = sess.run(
                        embeddings, feed_dict=feed_dict)

                classifier_file_name = os.path.expanduser(
                    self.classifier_filename)

                # Training Started
                print('Training Started')
                model = SVC(kernel='linear', probability=True)
                model.fit(emb_array, label)

                class_names = [cls.name.replace('_', ' ') for cls in img_data]

                # Saving model
                with open(classifier_file_name, 'wb') as outfile:
                    pickle.dump((model, class_names), outfile)
                return classifier_file_name
Exemplo n.º 2
0
import imgaug as ia
import cv2
import augmentation as aug

label_faces, aug_faces = aug.Make_Data("avengers")

for i in range(len(aug_faces)):
    ia.imshow(ia.draw_grid(aug_faces[i], cols=8))

# cv2.imshow("img", aug_faces[0][0])
# cv2.waitKey(0)
# cv2.destroyAllWindows()
Exemplo n.º 3
0
import imgaug as ia
import cv2
import augmentation as aug

name_faces, aug_faces = aug.Make_Data("avengers")

for i in range(len(aug_faces)):
    ia.imshow(ia.draw_grid(name_faces[i], cols = 8))

# cv2.imshow("img", aug_faces[0][0])
# cv2.waitKey(0)
# cv2.destroyAllWindows()