Ejemplo n.º 1
0
    def classifier_dataset(self, data_path, modelName):
        init_value.init_value.init(self)
        with tf.Graph().as_default():
            with tf.Session() as sess:
                dataset = facenet.get_dataset(data_path)
                paths, labels = facenet.get_image_paths_and_labels(dataset)
                print('Number of classes: %d' % len(dataset))
                print('Number of images: %d' % len(paths))

                print('Loading feature extraction model')
                # get Model Path
                facenet.get_pre_model_path(self.pre_model_url,
                                           self.pre_model_zip, self.model_path,
                                           self.pre_model_name)
                facenet.load_model(self.pre_model_name)

                images_placeholder = tf.get_default_graph().get_tensor_by_name(
                    "input:0")
                embeddings = tf.get_default_graph().get_tensor_by_name(
                    "embeddings:0")
                phase_train_placeholder = tf.get_default_graph(
                ).get_tensor_by_name("phase_train:0")
                embedding_size = embeddings.get_shape()[1]

                # Run forward pass to calculate embeddings
                nrof_images = len(paths)
                nrof_batches_per_epoch = int(
                    math.ceil(1.0 * nrof_images / self.batch_size))
                emb_array = np.zeros((nrof_images, embedding_size))
                print('Calculating features for images:(' +
                      str(len(range(nrof_batches_per_epoch))) + ')')
                for i in range(nrof_batches_per_epoch):
                    print('features :' + str(i))
                    start_index = i * self.batch_size
                    end_index = min((i + 1) * self.batch_size, nrof_images)
                    paths_batch = paths[start_index:end_index]
                    images = facenet.load_data(paths_batch, False, False,
                                               self.image_size)
                    feed_dict = {
                        images_placeholder: images,
                        phase_train_placeholder: False
                    }
                    emb_array[start_index:end_index, :] = sess.run(
                        embeddings, feed_dict=feed_dict)

                classifier_filename = modelName
                classifier_filename_exp = os.path.expanduser(
                    classifier_filename)

                # Train classifier
                print('Training classifier')
                model = SVC(kernel='linear', probability=True)
                model.fit(emb_array, labels)

                # Create a list of class names
                class_names = [cls.name.replace('_', ' ') for cls in dataset]

                # Saving classifier model
                with open(classifier_filename_exp, 'wb') as outfile:
                    pickle.dump((model, class_names), outfile)
                print('Saved classifier model to file "%s"' %
                      classifier_filename_exp)
Ejemplo n.º 2
0
    def roc(self, data_path, modelName, eval_path):
        init_value.init_value.init(self)
        with tf.Graph().as_default():
            with tf.Session() as sess:
                dataset = facenet.get_dataset(data_path)
                paths, labels = facenet.get_image_paths_and_labels(dataset)
                print('Number of classes: %d' % len(dataset))
                print('Number of images: %d' % len(paths))

                eval_dataset = facenet.get_dataset(eval_path)
                eval_paths, eval_labels = facenet.get_image_paths_and_labels(
                    eval_dataset)
                print('Number of classes: %d' % len(eval_dataset))
                print('Number of images: %d' % len(eval_paths))

                eval_labels = np.asarray(eval_labels)
                classes = []
                for i in range(len(dataset)):
                    classes.append(i)
                eval_labels = label_binarize(eval_labels, classes=classes)

                print('Loading feature extraction model')
                # get Model Path
                facenet.get_pre_model_path(self.pre_model_url,
                                           self.pre_model_zip, self.model_path,
                                           self.pre_model_name)
                facenet.load_model(self.pre_model_name)

                images_placeholder = tf.get_default_graph().get_tensor_by_name(
                    "input:0")
                embeddings = tf.get_default_graph().get_tensor_by_name(
                    "embeddings:0")
                phase_train_placeholder = tf.get_default_graph(
                ).get_tensor_by_name("phase_train:0")
                embedding_size = embeddings.get_shape()[1]

                # Run forward pass to calculate embeddings
                nrof_images = len(paths)
                nrof_batches_per_epoch = int(
                    math.ceil(1.0 * nrof_images / self.batch_size))
                emb_array = np.zeros((nrof_images, embedding_size))
                print('Calculating features for images:(' +
                      str(len(range(nrof_batches_per_epoch))) + ')')
                for i in range(nrof_batches_per_epoch):
                    print('features :' + str(i))
                    start_index = i * self.batch_size
                    end_index = min((i + 1) * self.batch_size, nrof_images)
                    paths_batch = paths[start_index:end_index]
                    images = facenet.load_data(paths_batch, False, False,
                                               self.image_size)
                    feed_dict = {
                        images_placeholder: images,
                        phase_train_placeholder: False
                    }
                    emb_array[start_index:end_index, :] = sess.run(
                        embeddings, feed_dict=feed_dict)

                # eval_nrof_images = len(eval_paths)
                # eval_nrof_batches_per_epoch = int(math.ceil(1.0 * eval_nrof_images / self.batch_size))
                # eval_emb_array = np.zeros((eval_nrof_images, embedding_size))
                # print('Calculating features for images:(' + str(len(range(eval_nrof_batches_per_epoch))) + ')')
                # for i in range(eval_nrof_batches_per_epoch):
                #     print('features :' + str(i))
                #     start_index = i * self.batch_size
                #     end_index = min((i + 1) * self.batch_size, eval_nrof_images)
                #     paths_batch = eval_paths[start_index:end_index]
                #     images = facenet.load_data(paths_batch, False, False, self.image_size)
                #     feed_dict = {images_placeholder: images, phase_train_placeholder: False}
                #     eval_emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)

                classifier_filename = modelName
                classifier_filename_exp = os.path.expanduser(
                    classifier_filename)

                # Train classifier
                print('Training classifier')
                model = SVC(kernel='linear', probability=True)
                model.fit(emb_array, labels)

                # Learn to predict each class against the other
                random_state = np.random.RandomState(0)
                classifier = OneVsRestClassifier(
                    svm.SVC(kernel='linear',
                            probability=True,
                            random_state=random_state))
                #y_score = classifier.fit(emb_array, labels).decision_function(eval_emb_array)
                y_score = classifier.fit(emb_array,
                                         labels).decision_function(emb_array)

                # Compute ROC curve and ROC area for each class
                fpr = dict()
                tpr = dict()
                roc_auc = dict()
                #for i in range(n_classes):
                fpr[len(dataset) - 1], tpr[len(dataset) - 1], _ = roc_curve(
                    eval_labels[:, len(dataset) - 1],
                    y_score[:, len(dataset) - 1])
                roc_auc[len(dataset) - 1] = auc(fpr[len(dataset) - 1],
                                                tpr[len(dataset) - 1])

                # Compute micro-average ROC curve and ROC area
                fpr["micro"], tpr["micro"], _ = roc_curve(
                    eval_labels.ravel(), y_score.ravel())
                roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])

                plt.figure()
                lw = 2
                plt.plot(fpr[len(dataset) - 1],
                         tpr[len(dataset) - 1],
                         color='darkorange',
                         lw=lw,
                         label='ROC curve (area = %0.2f)' %
                         roc_auc[len(dataset) - 1])
                plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
                plt.xlim([0.0, 1.0])
                plt.ylim([0.0, 1.05])
                plt.xlabel('False Positive Rate')
                plt.ylabel('True Positive Rate')
                plt.title('Receiver operating characteristic example')
                plt.legend(loc="lower right")
                plt.show()
Ejemplo n.º 3
0
    def realtime_run(self, modelName, detectType=None, evalType=None):
        '''
        :param modelName: 사용할 자신의 Classifier Model 
                           self.model_name_detect : detect만 사용한 모델이다.
                           self.model_name_rotdet : Rotate->Detect를 사용한 모델이다.
        :param detectType: 예측할 영상의 이미지 전처리 선택
                           detect : 예측 영상의 얼굴 Detect만 한다.
                           rotdet : 예측 영상의 얼굴 Rotate->Detect를 한다.
        :param evalType: 출력을 어떻게 지정할 지 선택을 해준다.
                         eval : eval_data에 있는 전체 데이터를 가져와 평가를 수행한다.
                         test : eval_data에 있는 폴더별 첫번째 데이터만 예측하여 보여준다. 
                         real : 실제 영상을 예측하여 보여준다.
        :return: 
        '''
        self.detectType = detectType
        self.evalType = evalType

        with tf.Graph().as_default():
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
            sess = tf.Session(config=tf.ConfigProto(
                gpu_options=gpu_options, log_device_placement=False))
            with sess.as_default():
                self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(
                    sess, self.dets_path)

                # get Model Path
                facenet.get_pre_model_path(self.pre_model_url,
                                           self.pre_model_zip, self.model_path,
                                           self.pre_model_name)
                facenet.load_model(self.pre_model_name)

                self.images_placeholder = tf.get_default_graph(
                ).get_tensor_by_name("input:0")
                self.embeddings = tf.get_default_graph().get_tensor_by_name(
                    "embeddings:0")
                self.embedding_size = self.embeddings.get_shape()[1]
                self.phase_train_placeholder = tf.get_default_graph(
                ).get_tensor_by_name("phase_train:0")

                classifier_filename = modelName
                classifier_filename_exp = os.path.expanduser(
                    classifier_filename)
                with open(classifier_filename_exp, 'rb') as infile:
                    (self.model, class_names) = pickle.load(infile)
                    print('load classifier file-> %s' %
                          classifier_filename_exp)
                    print('')
                # Sort를 해주는 이유는 기존에 Sequnce를 위해 앞에 붙여둔 것을 제거 하기 위함이다.(L00001_)
                self.HumanNamesSort = sorted(os.listdir(self.train_data_path))
                self.HumanNames = []
                for h in self.HumanNamesSort:
                    h_split = h.split('_')
                    self.HumanNames.append(h_split[1])

                self.predictor, self.detector = AlignDatasetRotation(
                ).face_rotation_predictor_download()

                if evalType == "eval":
                    self.facenet_eval(sess)
                else:
                    self.facenet_capture(sess)