def main(argv): dataset_path = os.path.expanduser(argv.dataset_path) dataset_path = os.path.abspath(dataset_path) model_file = os.path.expanduser(argv.model) model_file = os.path.abspath(model_file) classifier_filename_exp = os.path.expanduser(argv.output_classifier) classifier_filename_exp = os.path.abspath(classifier_filename_exp) assert os.path.exists(dataset_path) is True, 'please correct path...' np.random.seed(seed=777) # TF gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) pnet, rnet, onet = facenet.align.detect_face.create_mtcnn(sess, None) fn.load_model(model_file) images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name( "phase_train:0") embedding_size = embeddings.get_shape()[1] dataset = fn.get_dataset(dataset_path) for cls in dataset: assert len( cls.image_paths ) > 0, 'there must be at least one image for each class in the dataset' paths, labels = fn.get_image_paths_and_labels(dataset) print('Number of classes: %d' % len(dataset)) print('Number of images: %d' % len(paths)) nrof_images = len(paths) nrof_batches_per_epoch = int(math.ceil(1.0 * nrof_images / 90)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches_per_epoch): start_index = i * 90 end_index = min((i + 1) * 90, nrof_images) paths_batch = paths[start_index:end_index] images = fn.load_data(paths_batch, False, False, 160) feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict) model = SVC(kernel='linear', probability=True) model.fit(emb_array, labels) class_names = [cls.name.replace('_', ' ') for cls in dataset] with open(classifier_filename_exp, 'wb') as outfile: pickle.dump((model, class_names), outfile) print('Saved classifier model to file "%s"' % classifier_filename_exp) exit(0)
def index_faces(self,paths, paths_to_pk, output_dir, video_pk): with tf.Graph().as_default(): config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.15 with tf.Session(config=config) as sess: output_dir = os.path.expanduser(output_dir) if not os.path.isdir(output_dir): os.makedirs(output_dir) logging.info("Loading trained model...\n") meta_file, ckpt_file, model_dir = facenet.get_model_filenames() facenet.load_model(model_dir, meta_file, ckpt_file) images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") image_size = images_placeholder.get_shape()[1] embedding_size = embeddings.get_shape()[1] logging.info('Generating embeddings from images...\n') start_time = time.time() batch_size = 25 nrof_images = len(paths) nrof_batches = int(np.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) count = 0 path_count = {} entries = [] for i in xrange(nrof_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] for eindex, fname in enumerate(paths_batch): count += 1 entry = { 'path': fname, 'detection_primary_key': paths_to_pk[fname], 'index': eindex, 'type': 'detection', 'video_primary_key': video_pk } entries.append(entry) images = facenet.load_data(paths_batch, do_random_crop=False, do_random_flip=False, image_size=image_size, do_prewhiten=True) feed_dict = {images_placeholder: images, phase_train_placeholder: False} emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict) if nrof_images: time_avg_forward_pass = (time.time() - start_time) / float(nrof_images) logging.info("Forward pass took avg of %.3f[seconds/image] for %d images\n" % ( time_avg_forward_pass, nrof_images)) logging.info("Finally saving embeddings and gallery to: %s" % (output_dir)) feat_fname = os.path.join(output_dir, "facenet.npy") entries_fname = os.path.join(output_dir, "facenet.json") np.save(feat_fname, emb_array) fh = open(entries_fname, 'w') json.dump(entries, fh) fh.close() return path_count, emb_array, entries, feat_fname, entries_fname
def main(): with tf.Graph().as_default() as _: with tf.Session() as sess: if get_num_of_files_in_dir(data_dir) == 0: print('No train data!') return dataset = facenet.get_dataset(data_dir) for cls in dataset: assert(len(cls.image_paths)>0, 'There must be at least one image for each class in the dataset') paths, labels = facenet.get_image_paths_and_labels(dataset) print('Number of classes: %d' % len(dataset)) print('Number of images: %d' % len(paths)) # Load the model print('Loading feature extraction model') facenet.load_model(model_path) # Get input and output tensors images_placeholder = sess.graph.get_tensor_by_name("input:0") embeddings = sess.graph.get_tensor_by_name("embeddings:0") phase_train_placeholder = sess.graph.get_tensor_by_name("phase_train:0") embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Calculating features for images') nrof_images = len(paths) nrof_batches_per_epoch = int(math.ceil(1.0*nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches_per_epoch): start_index = i*batch_size end_index = min((i+1)*batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder:images, phase_train_placeholder:False } emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict) classifier_filename_exp = os.path.expanduser(classifier_filename) x, y = SMOTETomek(random_state=4).fit_sample(emb_array, labels) print('Training classifier') model = SVC(kernel='linear', probability=True) model.fit(x, y) # Create a list of class names class_names = [ cls.name.replace('_', ' ') for cls in dataset] # Saving classifier model with open(classifier_filename_exp, 'wb') as outfile: pickle.dump((model, class_names), outfile) print('Saved classifier model to file "%s"' % classifier_filename_exp)
def main(args, self): self.update_state(state='RUNNING') with tf.Graph().as_default(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25, allow_growth = True) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: np.random.seed(seed=args.seed) if args.use_split_dataset: dataset_tmp = facenet.get_dataset(args.data_dir) train_set, test_set = split_dataset(dataset_tmp, args.min_nrof_images_per_class, args.nrof_train_images_per_class) if (args.mode=='TRAIN'): dataset = train_set elif (args.mode=='CLASSIFY'): dataset = test_set else: dataset = facenet.get_dataset(args.data_dir) # Check that there are at least one training image per class for cls in dataset: assert len(cls.image_paths)>0, 'There must be at least one image for each class in the dataset' paths, labels = facenet.get_image_paths_and_labels(dataset) print('Number of classes: %d' % len(dataset)) print('Number of images: %d' % len(paths)) # Load the model print('Loading feature extraction model') facenet.load_model(args.model) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") embedding_size = embeddings.get_shape()[1] # Run forward pass to calculate embeddings print('Calculating features for images') nrof_images = len(paths) nrof_batches_per_epoch = int(math.ceil(1.0*nrof_images / args.batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches_per_epoch): start_index = i*args.batch_size end_index = min((i+1)*args.batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, args.image_size) feed_dict = { images_placeholder:images, phase_train_placeholder:False } emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict) classifier_filename_exp = os.path.expanduser(args.classifier_filename) if (args.mode=='TRAIN'): # Train classifier print('Training classifier') model = SVC(kernel='linear', probability=True) model.fit(emb_array, labels) # Create a list of class names class_names = [ cls.name.replace('_', ' ') for cls in dataset] # Saving classifier model with open(classifier_filename_exp, 'wb') as outfile: pickle.dump((model, class_names), outfile) print('Saved classifier model to file "%s"' % classifier_filename_exp) elif (args.mode=='CLASSIFY'): # Classify images print('Testing classifier') with open(classifier_filename_exp, 'rb') as infile: (model, class_names) = pickle.load(infile) print('Loaded classifier model from file "%s"' % classifier_filename_exp) predictions = model.predict_proba(emb_array) best_class_indices = np.argmax(predictions, axis=1) best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices] for i in range(len(best_class_indices)): print('%4d %s: %.3f' % (i, class_names[best_class_indices[i]], best_class_probabilities[i])) accuracy = np.mean(np.equal(best_class_indices, labels)) print('Accuracy: %.3f' % accuracy) sess.close()
def classifier(data_dir, model_path, classifier_path): with tf.Graph().as_default(): with tf.Session() as sess: if (get_num_of_files_in_dir(data_dir) == 0): print('There is no train data') return 0 dataset = facenet.get_dataset(data_dir) paths, labels = facenet.get_image_paths_and_labels(dataset) # Load pretrained model's train parameter facenet.load_model(model_path) images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") embedding_size = embeddings.get_shape()[1] # Preprocess images nrof_images = len(paths) nrof_batches_per_epoch = int( math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches_per_epoch): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) # Load classifier model, class-label dict with open(classifier_path, 'rb') as infile: (model, class_names) = pickle.load(infile) print(class_names) # print('Loaded classifier model from file "%s"' % classifier_path) predictions = model.predict_proba(emb_array) # print(len(paths)) # print(prediction_time_elapsed/len(paths)) best_class_indices = np.argmax(predictions, axis=1) best_class_probabilities = predictions[ np.arange(len(best_class_indices)), best_class_indices] label_to_class_map = create_label_to_class_map(class_names) print(label_to_class_map) data_dir_result_label_map = create_data_dir_result_label_map( paths, best_class_indices, best_class_probabilities) data_dir_result_class_map = {} for dir_name in data_dir_result_label_map: label = data_dir_result_label_map[dir_name] data_dir_result_class_map[dir_name] = label_to_class_map[label] print('dir_name : ' + dir_name + '\t recognition : ' + label_to_class_map[label]) rename_all_files_by_details(best_class_indices, best_class_probabilities, paths, label_to_class_map) return data_dir_result_class_map
def classify_process(sess_name, sess, graph, model, class_names): start = time.time() sess_dir = 'tmp/' + sess_name align_result = align_dataset_mtcnn(sess_name) if align_result == None: print('Face not detected!') shutil.rmtree(sess_dir) return print(align_result) tracking(align_result[0], align_result[1], sess_name) try: with graph.as_default(): dataset = facenet.get_dataset(sess_dir + '_t') paths, labels = facenet.get_image_paths_and_labels(dataset) # Get input and output tensors images_placeholder = graph.get_tensor_by_name("input:0") embeddings = graph.get_tensor_by_name("embeddings:0") phase_train_placeholder = graph.get_tensor_by_name("phase_train:0") embedding_size = embeddings.get_shape()[1] # Preprocess images nrof_images = len(paths) nrof_batches_per_epoch = int( math.ceil(1.0 * nrof_images / batch_size)) emb_array = np.zeros((nrof_images, embedding_size)) for i in range(nrof_batches_per_epoch): start_index = i * batch_size end_index = min((i + 1) * batch_size, nrof_images) paths_batch = paths[start_index:end_index] images = facenet.load_data(paths_batch, False, False, image_size) feed_dict = { images_placeholder: images, phase_train_placeholder: False } emb_array[start_index:end_index, :] = sess.run( embeddings, feed_dict=feed_dict) predictions = model.predict_proba(emb_array) best_class_indices = np.argmax(predictions, axis=1) best_class_probabilities = predictions[ np.arange(len(best_class_indices)), best_class_indices] label_to_class_map = create_label_to_class_map(class_names) print(label_to_class_map) data_dir_result_label_map = create_data_dir_result_label_map( paths, best_class_indices, best_class_probabilities) data_dir_result_class_map = {} for dir_name in data_dir_result_label_map: label = data_dir_result_label_map[dir_name] data_dir_result_class_map[dir_name] = label_to_class_map[label] print(data_dir_result_class_map) end = time.time() print('Total time elapsed:', (end - start)) processed_names = [] for name in data_dir_result_class_map.values(): if name == 'unknown' or name in processed_names: continue processed_names.append(name) try: update_attendance(name) except Exception as e: print(e) sio.emit('attend', name) for i in range(len(paths)): filename = paths[i].split('/')[-1] path = paths[i][:-len(filename)] acc = round(best_class_probabilities[i] * 50, 4) acc = pow(acc, 3) acc = math.sqrt(acc) acc = round(acc) if acc >= 200: for imgname in dataset: t = str(time.time()) shutil.move( path + '/' + imgname, 'dataset/{}/{}.jpg'.format( label_to_class_map[best_class_indices[i]], t)) except Exception as e: print(e)