コード例 #1
0
def evaluate_accuracy(sess, images_placeholder, phase_train_placeholder,
                      image_size, embeddings, paths, actual_issame,
                      augment_images, aug_value, batch_size, orig_image_size,
                      seed):
    nrof_images = len(paths)
    nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size))
    emb_list = []
    for i in range(nrof_batches):
        start_index = i * batch_size
        end_index = min((i + 1) * batch_size, nrof_images)
        paths_batch = paths[start_index:end_index]
        images = facenet.load_data(paths_batch, False, False, orig_image_size)
        images_aug = augment_images(images, aug_value, image_size)
        feed_dict = {
            images_placeholder: images_aug,
            phase_train_placeholder: False
        }
        emb_list += sess.run([embeddings], feed_dict=feed_dict)
    emb_array = np.vstack(
        emb_list
    )  # Stack the embeddings to a nrof_examples_per_epoch x 128 matrix

    thresholds = np.arange(0, 4, 0.01)
    embeddings1 = emb_array[0::2]
    embeddings2 = emb_array[1::2]
    _, _, accuracy = facenet.calculate_roc(thresholds, embeddings1,
                                           embeddings2,
                                           np.asarray(actual_issame), seed)
    return accuracy
コード例 #2
0
ファイル: clustering.py プロジェクト: wenh06/facenet
def compute_facial_encodings(sess, images_placeholder, embeddings,
                             phase_train_placeholder, image_size,
                             embedding_size, nrof_images, nrof_batches,
                             emb_array, batch_size, paths):
    """ Compute Facial Encodings

        Given a set of images, compute the facial encodings of each face detected in the images and
        return them. If no faces, or more than one face found, return nothing for that image.

        Inputs:
            image_paths: a list of image paths

        Outputs:
            facial_encodings: (image_path, facial_encoding) dictionary of facial encodings

    """

    for i in range(nrof_batches):
        start_index = i * batch_size
        end_index = min((i + 1) * batch_size, nrof_images)
        paths_batch = paths[start_index:end_index]
        images = facenet.load_data(paths_batch, False, False, image_size)
        feed_dict = {
            images_placeholder: images,
            phase_train_placeholder: False
        }
        emb_array[start_index:end_index, :] = sess.run(embeddings,
                                                       feed_dict=feed_dict)

    facial_encodings = {}
    for x in range(nrof_images):
        facial_encodings[paths[x]] = emb_array[x, :]

    return facial_encodings
コード例 #3
0
def main():
  
    with tf.Graph().as_default():
        args={
            'lfw_dir':'/Users/unclewang/Downloads/lfw_mtcnnpy_60',
            'lfw_batch_size':128,
            'model':'/Users/unclewang/PycharmProjects/facenet/models/facenet/20170512-110547/20170512-110547.pb',
            'image_size':160,
            'lfw_pairs':'/Users/unclewang/PycharmProjects/facenet/data/pairs.txt',
            'lfw_file_ext':'png',
            'lfw_nrof_folds':10
        }
        with tf.Session() as sess:
            
            # Read the file containing the pairs used for testing
            pairs = lfw.read_pairs(os.path.expanduser(args['lfw_pairs']))

            # Get the paths for the corresponding images
            paths, actual_issame = lfw.get_paths(os.path.expanduser(args['lfw_dir']), pairs, args['lfw_file_ext'])

            # Load the model
            facenet.load_model(args['model'])
            
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            
            #image_size = images_placeholder.get_shape()[1]  # For some reason this doesn't work for frozen graphs
            image_size = args['image_size']
            embedding_size = embeddings.get_shape()[1]
        
            # Run forward pass to calculate embeddings
            print('Runnning forward pass on LFW images')
            batch_size = args['lfw_batch_size']
            nrof_images = len(paths)
            nrof_batches = int(math.ceil(1.0*nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches):
                start_index = i*batch_size
                end_index = min((i+1)*batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False, image_size)
                feed_dict = { images_placeholder:images, phase_train_placeholder:False }
                emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)
        
            tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(emb_array, 
                actual_issame, nrof_folds=args['lfw_nrof_folds'])

            print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
            print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))

            auc = metrics.auc(fpr, tpr)
            print('Area Under Curve (AUC): %1.3f' % auc)
            eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
            print('Equal Error Rate (EER): %1.3f' % eer)
コード例 #4
0
def main(args):

    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Get the paths for the images
            paths = []
            for imagePath in os.listdir(args.face_dir):
                imagePath = os.path.join(args.face_dir, imagePath)
                if os.path.isfile(imagePath):
                    print(imagePath)
                    paths.append(imagePath)

            # Load the model
            print('Model directory: %s' % args.model_dir)
            meta_file, ckpt_file = facenet.get_model_filenames(
                os.path.expanduser(args.model_dir))

            print('Metagraph file: %s' % meta_file)
            print('Checkpoint file: %s' % ckpt_file)
            facenet.load_model(args.model_dir, meta_file, ckpt_file)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            image_size = images_placeholder.get_shape()[1]
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Runnning forward pass on images')
            batch_size = args.batch_size
            nrof_images = len(paths)
            nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches):
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, True, True, image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            # Save embeddings to disk
            for i in range(nrof_images):
                np.save(paths[i], emb_array[i, :], allow_pickle=False)
コード例 #5
0
def main(args):
	"""
	"""
	with tf.Graph().as_default():

		with tf.Session() as sess:

			# create output directory if it doesn't exist
			output_dir = os.path.expanduser(args.output_dir)
			if not os.path.isdir(output_dir):
				os.makedirs(output_dir)

			# load the model
			print("Loading trained model...\n")
			facenet.load_model(args.trained_model_dir)

			# grab all image paths and labels
			print("Finding image paths and targets...\n")
			data = load_files(args.data_dir, load_content=False, shuffle=False)
			labels_array = data['target']
			paths = data['filenames']

			# Get input and output tensors
			images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
			embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
			phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

			image_size = images_placeholder.get_shape()[1]
			embedding_size = embeddings.get_shape()[1]

			# Run forward pass to calculate embeddings
			print('Generating embeddings from images...\n')
			start_time = time.time()
			batch_size = args.batch_size
			nrof_images = len(paths)
			nrof_batches = int(np.ceil(1.0*nrof_images / batch_size))
			emb_array = np.zeros((nrof_images, embedding_size))
			for i in xrange(nrof_batches):
				start_index = i*batch_size
				end_index = min((i+1)*batch_size, nrof_images)
				paths_batch = paths[start_index:end_index]
				images = facenet.load_data(paths_batch, do_random_crop=False, do_random_flip=False, image_size=image_size, do_prewhiten=True)
				feed_dict = { images_placeholder:images, phase_train_placeholder:False}
				emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)

			time_avg_forward_pass = (time.time() - start_time) / float(nrof_images)
			print("Forward pass took avg of %.3f[seconds/image] for %d images\n" % (time_avg_forward_pass, nrof_images))

			print("Finally saving embeddings and gallery to: %s" % (output_dir))
			# save the gallery and embeddings (signatures) as numpy arrays to disk
			np.save(os.path.join(output_dir, "gallery.npy"), labels_array)
			np.save(os.path.join(output_dir, "signatures.npy"), emb_array)
コード例 #6
0
ファイル: test_align.py プロジェクト: wenh06/facenet
def main():
    image_size = 96
    old_dataset = '/home/david/datasets/facescrub/fs_aligned_new_oean/'
    new_dataset = '/home/david/datasets/facescrub/facescrub_110_96/'
    eq = 0
    num = 0
    l = []
    dataset = facenet.get_dataset(old_dataset)
    for cls in dataset:
        new_class_dir = os.path.join(new_dataset, cls.name)
        for image_path in cls.image_paths:
            try:
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                new_filename = os.path.join(new_class_dir, filename + '.png')
                #print(image_path)
                if os.path.exists(new_filename):
                    a = facenet.load_data([image_path, new_filename],
                                          False,
                                          False,
                                          image_size,
                                          do_prewhiten=False)
                    if np.array_equal(a[0], a[1]):
                        eq += 1
                    num += 1
                    err = np.sum(np.square(np.subtract(a[0], a[1])))
                    #print(err)
                    l.append(err)
                    if err > 2000:
                        fig = plt.figure(1)
                        p1 = fig.add_subplot(121)
                        p1.imshow(a[0])
                        p2 = fig.add_subplot(122)
                        p2.imshow(a[1])
                        print('%6.1f: %s\n' % (err, new_filename))
                        pass
                else:
                    pass
                    #print('File not found: %s' % new_filename)
            except:
                pass
コード例 #7
0
def main(args):
    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Read the file containing the pairs used for testing
            # 1. 读入之前的pairs.txt文件
            # 读入后如[['Abel_Pacheco','1','4']]
            pairs = lfw.read_pairs(os.path.expanduser(
                args.lfw_pairs))  # 剪裁好了的图片

            # Get the paths for the corresponding images
            # 获取文件路径和是否匹配关系对
            all_paths, actual_issame = lfw.get_paths(
                os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
            paths = all_paths[0::2]
            print("paths shape =", len(paths))  # 12000
            print("paths=", paths[0:200])
            print('actual_issame shape=', len(actual_issame))  # 6000
            print('actual_issame', actual_issame[0:200])

            # Load the model
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # image_size = images_placeholder.get_shape()[1]  # For some reason this doesn't work for frozen graphs
            image_size = args.image_size
            embedding_size = embeddings.get_shape()[1]  # 128

            # Run forward pass to calculate embeddings
            print('Runnning forward pass on LFW images')
            batch_size = args.lfw_batch_size
            nrof_images = len(paths)  # 12000
            nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))  # 12000* 128
            for i in range(nrof_batches):
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False,
                                           image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            # # 输出调试信息
            # embeddings1 = emb_array[0::2]  # 6000张图片 是每一个Paris中的第一张
            # embeddings2 = emb_array[1::2]  # 6000张图片 是每一个Paris中的第2张
            # diff = np.subtract(embeddings1, embeddings2)  # 每一个Paris中的两个向量相减
            # dist = np.sum(np.square(diff), 1)  # 向量二范数的平方,两个向量之间距离的平方。 每一个Pari之间的欧几里得距离的平方
            # # #也可以说是两张图片之间特征向量的相似度
            # print('------------------------------------------------------------')
            # print('dist.len=', len(dist))
            need_embeddings = emb_array
            # 使用embedding的信息计算相似度。
            f = open(r'E:\MyPythonProjects\captchaOneChineseTrain/result.txt',
                     'w')
            for number_of_tests in range(10000):
                f.write(str(number_of_tests).zfill(4) + ',')
                # 每个里面有13张图片 :9 10 11 12
                for i in range(9, 13):
                    emb_i = need_embeddings[(number_of_tests * 13) + i]
                    min_index = 0
                    min_dist = 999999
                    for j in range(9):
                        emb_j = need_embeddings[(number_of_tests * 13) + j]
                        # 这里计算相似度使用的是欧式距离
                        diff = np.subtract(emb_i, emb_j)
                        dist = np.sum(np.square(diff))
                        # 使用余弦相似度
                        # dist=np.sum(emb_i*emb_j)/(np.linalg.norm(emb_i)*np.linalg.norm(emb_j))
                        if dist < min_dist:
                            min_dist = dist
                            min_index = j
                    f.write(str(min_index))
                f.write('\n')
            f.close()
コード例 #8
0
def main(args):
    with tf.Graph().as_default():

        with tf.Session() as sess:

            np.random.seed(seed=args["seed"])

            if args["use_split_dataset"]:
                dataset_tmp = facenet.get_dataset(args["data_dir"])
                train_set, test_set = split_dataset(
                    dataset_tmp, args["min_nrof_images_per_class"], args["nrof_train_images_per_class"])
                if (args["mode"] == 'TRAIN'):
                    dataset = train_set
                elif (args["mode"] == 'CLASSIFY'):
                    dataset = test_set
            else:
                dataset = facenet.get_dataset(args["data_dir"])

            # Check that there are at least one training image per class
            for cls in dataset:
                assert(len(cls.image_paths) > 0,
                       'There must be at least one image for each class in the dataset')

            paths, labels = facenet.get_image_paths_and_labels(dataset)

            print('Number of classes: %d' % len(dataset))
            print('Number of images: %d' % len(paths))

            # Load the model
            print('Loading feature extraction model')
            facenet.load_model(args["model"])

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Calculating features for images')
            nrof_images = len(paths)
            nrof_batches_per_epoch = int(
                math.ceil(1.0*nrof_images / args["batch_size"]))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches_per_epoch):
                start_index = i*args["batch_size"]
                end_index = min((i+1)*args["batch_size"], nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(
                    paths_batch, False, False, args["image_size"])
                feed_dict = {images_placeholder: images,
                             phase_train_placeholder: False}
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            classifier_filename_exp = os.path.expanduser(
                args["classifier_filename"])

            if (args["mode"] == 'TRAIN'):
                # Train classifier
                print('Training classifier')
                model = SVC(kernel='linear', probability=True)
                model.fit(emb_array, labels)

                # Create a list of class names
                class_names = [cls.name.replace('_', ' ') for cls in dataset]

                # Saving classifier model
                with open(classifier_filename_exp, 'wb') as outfile:
                    pickle.dump((model, class_names), outfile)
                print('Saved classifier model to file "%s"' %
                      classifier_filename_exp)

            elif (args["mode"] == 'CLASSIFY'):
                # Classify images
                print('Testing classifier')
                with open(classifier_filename_exp, 'rb') as infile:
                    (model, class_names) = pickle.load(infile)

                print('Loaded classifier model from file "%s"' %
                      classifier_filename_exp)

                predictions = model.predict_proba(emb_array)
                best_class_indices = np.argmax(predictions, axis=1)
                best_class_probabilities = predictions[np.arange(
                    len(best_class_indices)), best_class_indices]

                for i in range(len(best_class_indices)):
                    print('%4d  %s: %.3f' % (
                        i, class_names[best_class_indices[i]], best_class_probabilities[i]))

                accuracy = np.mean(np.equal(best_class_indices, labels))
                print('Accuracy: %.3f' % accuracy)
コード例 #9
0
def main(args):

    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Read the file containing the pairs used for testing
            # 1. 读入之前的pairs.txt文件
            # 读入后如[['Abel_Pacheco','1','4']]
            pairs = lfw.read_pairs(os.path.expanduser(
                args.lfw_pairs))  # 剪裁好了的图片

            # Get the paths for the corresponding images
            # 获取文件路径和是否匹配关系对
            paths, actual_issame = lfw.get_paths(
                os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
            print("paths shape =", len(paths))  # 12000
            print("paths=", paths[0:200])
            print('actual_issame shape=', len(actual_issame))  # 6000
            print('actual_issame', actual_issame[0:200])

            # Load the model
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            #image_size = images_placeholder.get_shape()[1]  # For some reason this doesn't work for frozen graphs
            image_size = args.image_size
            embedding_size = embeddings.get_shape()[1]  # 128

            # Run forward pass to calculate embeddings
            print('Runnning forward pass on LFW images')
            batch_size = args.lfw_batch_size
            nrof_images = len(paths)  # 12000
            nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))  # 12000* 128
            for i in range(nrof_batches):
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False,
                                           image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            # 输出调试信息
            embeddings1 = emb_array[0::2]  # 6000张图片 是每一个Paris中的第一张
            embeddings2 = emb_array[1::2]  # 6000张图片 是每一个Paris中的第2张
            diff = np.subtract(embeddings1, embeddings2)  # 每一个Paris中的两个向量相减
            dist = np.sum(np.square(diff),
                          1)  # 向量二范数的平方,两个向量之间距离的平方。 每一个Pari之间的欧几里得距离的平方
            # #也可以说是两张图片之间特征向量的相似度
            print(
                '------------------------------------------------------------')
            print('dist.len=', len(dist))

            # 把特征向量保存到文件中去 ,由于这里处理的数据不是很靠谱,所以,这里输出的特征无法直接用于聚类处理
            f = open(
                r'E:\MyPythonProjects\HWDB1\train1\pairs/embeddingsOfChinesepairs.txt',
                'w')
            for embedding in emb_array:
                for data in embedding:
                    f.write(str(data) + ' ')
                f.write('\n')
            f.close()

            # 把误差保存到文件中去
            f = open(
                r'E:\MyPythonProjects\HWDB1\train1\pairs/distInChinesePairs.txt',
                'w')
            for d in dist:
                f.write(str(d) + '\n')
            f.close()
            # ***************************************************************************

            tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
                emb_array, actual_issame, nrof_folds=args.lfw_nrof_folds)

            print('Accuracy: %1.3f+-%1.3f' %
                  (np.mean(accuracy), np.std(accuracy)))
            print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %
                  (val, val_std, far))

            auc = metrics.auc(fpr, tpr)
            print('Area Under Curve (AUC): %1.3f' % auc)
            eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x),
                         0., 1.)
            print('Equal Error Rate (EER): %1.3f' % eer)
コード例 #10
0
ファイル: ForCluster.py プロジェクト: shuyuandeqipa/mycaptcha
def main(args):
    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Read the file containing the pairs used for testing
            # 1. 读入之前的pairs.txt文件
            # 读入后如[['Abel_Pacheco','1','4']]
            pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))  # 剪裁好了的图片

            # Get the paths for the corresponding images
            # 获取文件路径和是否匹配关系对
            paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
            print("paths shape =", len(paths))  # 12000
            print("paths=", paths[0:200])
            print('actual_issame shape=', len(actual_issame))  # 6000
            print('actual_issame', actual_issame[0:200])

            # Load the model
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # image_size = images_placeholder.get_shape()[1]  # For some reason this doesn't work for frozen graphs
            image_size = args.image_size
            embedding_size = embeddings.get_shape()[1]  # 128

            # Run forward pass to calculate embeddings
            print('Runnning forward pass on LFW images')
            batch_size = args.lfw_batch_size
            nrof_images = len(paths)  # 12000
            nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))  # 12000* 128
            for i in range(nrof_batches):
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False, image_size)
                feed_dict = {images_placeholder: images, phase_train_placeholder: False}
                emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)

            # 输出调试信息
            embeddings1 = emb_array[0::2]  # 6000张图片 是每一个Paris中的第一张
            embeddings2 = emb_array[1::2]  # 6000张图片 是每一个Paris中的第2张
            diff = np.subtract(embeddings1, embeddings2)  # 每一个Paris中的两个向量相减
            dist = np.sum(np.square(diff), 1)  # 向量二范数的平方,两个向量之间距离的平方。 每一个Pari之间的欧几里得距离的平方
            # #也可以说是两张图片之间特征向量的相似度
            print('------------------------------------------------------------')
            print('dist.len=', len(dist))

            # 把特征向量保存到文件中去
            f = open(r'E:\MyPythonProjects\HWDB1\train1\pairs/embeddingsOfCluster.txt', 'w')
            for embedding in emb_array:
                for data in embedding:
                    f.write(str(data) + ' ')
                f.write('\n')
            f.close()

            # 把误差保存到文件中去
            f = open(r'E:\MyPythonProjects\HWDB1\train1\pairs/distForCluster.txt', 'w')
            for d in dist:
                f.write(str(d) + '\n')
            f.close()

            # 对数据做聚类 k-means 10个类别
            from sklearn.cluster import KMeans
            kmeans=KMeans(n_clusters=10,random_state=0).fit(emb_array)
            #cluster_centers_   cluster_centers_kmeans
            f=open(r'E:\MyPythonProjects\HWDB1\train1\pairs/cluster_centers_kmeans.txt','w')
            for centers in kmeans.cluster_centers_:
                for i in centers:
                    f.write(str(i)+' ')
                f.write('\n')
            f.close()

            f=open(r'E:\MyPythonProjects\HWDB1\train1\pairs/labelsForKmeansCluster.txt','w')
            for label in kmeans.labels_:
                f.write(str(label)+'\n') # 从数据中可以看出来,是有一定的聚类的作用。不过效果不是特别好
            f.close()

            print("Done")

            # # 绘制三维图像
            #
            # import matplotlib.pyplot as plt
            # from mpl_toolkits.mplot3d import Axes3D
            # data =emb_array[:,0:3]
            #
            # x, y, z = data[:, 0], data[:, 1], data[:, 2]
            #
            # fig = plt.figure()
            # ax = fig.add_subplot(111, projection='3d')  # 创建一个三维的绘图工程
            # #  将数据点分成三部分画,在颜色上有区分度
            # # b: blue
            # # g: green
            # # r: red
            # # c: cyan
            # # m: magenta
            # # y: yellow
            # # k: black
            # # w: white
            #
            # ax.scatter(x[0:200], y[0:200], z[0:200], c='b')  # 绘制数据点
            # ax.scatter(x[200:400], y[200:400], z[200:400], c='r')
            # ax.scatter(x[400:600], y[400:600], z[400:600], c='g')
            # ax.scatter(x[600:800], y[600:800], z[600:800], c='k')
            #
            # ax.set_zlabel('Z')  # 坐标轴
            # ax.set_ylabel('Y')
            # ax.set_xlabel('X')
            # plt.show()

            '''
コード例 #11
0
		# Get input and output tensors
		images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
		embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
		phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
		embedding_size = embeddings.get_shape()[1]
		
		# Run forward pass to calculate embeddings
		print('Calculating features for images')
		nrof_images = len(paths)
		nrof_batches_per_epoch = int(math.ceil(1.0*nrof_images / batch_size))
		emb_array = np.zeros((nrof_images, embedding_size))
		for i in range(nrof_batches_per_epoch):
			start_index = i*batch_size
			end_index = min((i+1)*batch_size, nrof_images)
			paths_batch = paths[start_index:end_index]
			images = facenet.load_data(paths_batch, False, False, image_size)
			feed_dict = { images_placeholder:images, phase_train_placeholder:False }
			emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)

		classifier_filename_exp = os.path.expanduser(classifier_filename)

		if (mode=='TRAIN'):
			# Train classifier
			print('Training classifier')
			model = SVC(kernel='linear', probability=True)
			model.fit(emb_array, labels)
		
			# Create a list of class names
			class_names = [ cls.name.replace('_', ' ') for cls in dataset]

			# Saving classifier model
コード例 #12
0
ファイル: export_embeddings.py プロジェクト: wenh06/facenet
def main(args):
    train_set = facenet.get_dataset(args.data_dir)
    image_list, label_list = facenet.get_image_paths_and_labels(train_set)
    # fetch the classes (labels as strings) exactly as it's done in get_dataset
    path_exp = os.path.expanduser(args.data_dir)
    classes = [path for path in os.listdir(path_exp) \
               if os.path.isdir(os.path.join(path_exp, path))]
    classes.sort()
    # get the label strings
    label_strings = [name for name in classes if \
       os.path.isdir(os.path.join(path_exp, name))]

    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Load the model
            facenet.load_model(args.model_dir)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            nrof_images = len(image_list)
            print('Number of images: ', nrof_images)
            batch_size = args.image_batch
            if nrof_images % batch_size == 0:
                nrof_batches = nrof_images // batch_size
            else:
                nrof_batches = (nrof_images // batch_size) + 1
            print('Number of batches: ', nrof_batches)
            embedding_size = embeddings.get_shape()[1]
            emb_array = np.zeros((nrof_images, embedding_size))
            start_time = time.time()

            for i in range(nrof_batches):
                if i == nrof_batches - 1:
                    n = nrof_images
                else:
                    n = i * batch_size + batch_size
                # Get images for the batch
                if args.is_aligned is True:
                    images = facenet.load_data(image_list[i * batch_size:n],
                                               False, False, args.image_size)
                else:
                    images = load_and_align_data(image_list[i * batch_size:n],
                                                 args.image_size, args.margin,
                                                 args.gpu_memory_fraction)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                # Use the facenet model to calcualte embeddings
                embed = sess.run(embeddings, feed_dict=feed_dict)
                emb_array[i * batch_size:n, :] = embed
                print('Completed batch', i + 1, 'of', nrof_batches)

            run_time = time.time() - start_time
            print('Run time: ', run_time)

            #   export emedings and labels
            label_list = np.array(label_list)

            np.save(args.embeddings_name, emb_array)
            np.save(args.labels_name, label_list)
            label_strings = np.array(label_strings)
            np.save(args.labels_strings_name, label_strings[label_list])