示例#1
0
def main(args):

    dataset = facenet.get_dataset(args.dir)
    paths, _ = facenet.get_image_paths_and_labels(dataset)
    t = np.zeros((len(paths)))
    x = time.time()
    for i, path in enumerate(paths):
        start_time = time.time()
        with open(path, mode='rb') as f:
            _ = f.read()
        duration = time.time() - start_time
        t[i] = duration
        if i % 1000 == 0 or i == len(paths) - 1:
            print('File %d/%d  Total time: %.2f  Avg: %.3f  Std: %.3f' %
                  (i, len(paths), time.time() - x, np.mean(t[0:i]) * 1000,
                   np.std(t[0:i]) * 1000))
示例#2
0
def main(args):
    train_set = facenet.get_dataset(args.data_dir)
    image_list, label_list = facenet.get_image_paths_and_labels(train_set)

    # fetch the classes (labels as strings) exactly as it's done in get_dataset
    path_exp = os.path.expanduser(args.data_dir)
    classes = [path for path in os.listdir(path_exp) \
               if os.path.isdir(os.path.join(path_exp, path))]
    classes.sort()
    # get the label strings
    label_strings = [name for name in classes if \
       os.path.isdir(os.path.join(path_exp, name))]

    images = load_and_align_data(image_list, args.image_size, args.margin,
                                 args.gpu_memory_fraction)
    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Load the model
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            emb = sess.run(embeddings, feed_dict=feed_dict)

            nrof_images = len(image_list)

            #   export emedings and labels
            label_list = np.array(label_list)

            np.save(args.embeddings_name, emb)
            label_strings = np.array(label_strings)
            np.save(args.labels_strings_name, label_strings[label_list])
示例#3
0
def main():
    image_size = 96
    old_dataset = '/home/david/datasets/facescrub/fs_aligned_new_oean/'
    new_dataset = '/home/david/datasets/facescrub/facescrub_110_96/'
    eq = 0
    num = 0
    l = []
    dataset = facenet.get_dataset(old_dataset)
    for cls in dataset:
        new_class_dir = os.path.join(new_dataset, cls.name)
        for image_path in cls.image_paths:
            try:
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                new_filename = os.path.join(new_class_dir, filename + '.png')
                #print(image_path)
                if os.path.exists(new_filename):
                    a = facenet.load_data([image_path, new_filename],
                                          False,
                                          False,
                                          image_size,
                                          do_prewhiten=False)
                    if np.array_equal(a[0], a[1]):
                        eq += 1
                    num += 1
                    err = np.sum(np.square(np.subtract(a[0], a[1])))
                    #print(err)
                    l.append(err)
                    if err > 2000:
                        fig = plt.figure(1)
                        p1 = fig.add_subplot(121)
                        p1.imshow(a[0])
                        p2 = fig.add_subplot(122)
                        p2.imshow(a[1])
                        print('%6.1f: %s\n' % (err, new_filename))
                        pass
                else:
                    pass
                    #print('File not found: %s' % new_filename)
            except:
                pass
def train(server, cluster_spec, args, ctx):
  task_index = ctx.task_index
  num_workers = len(cluster_spec['worker'])
  is_chief = task_index == 0

  local_data_path = args.local_data_path
  data_dir = os.path.join(local_data_path, "train")
  val_dir = os.path.join(local_data_path, "val")
  val_pairs = os.path.join(local_data_path, "pairs.txt")

  subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
  log_dir = args.workspace + "/logs/" + subdir

  checkpoint_dir = args.checkpoint_dir if args.checkpoint_dir else (args.workspace + "/models/" + subdir)

  if task_index == 0:
    if not tf.gfile.Exists(args.workspace):
      tf.gfile.MakeDirs(args.workspace)
    if not tf.gfile.Exists(checkpoint_dir):
      tf.gfile.MakeDirs(checkpoint_dir)
    if not tf.gfile.Exists(log_dir):
        tf.gfile.MakeDirs(log_dir)

  seed = random.SystemRandom().randint(0, 10240)
  print("Random seed: " + str(seed))
  np.random.seed(seed=seed)
  train_set = facenet.get_dataset(data_dir)

  print('Model directory: %s' % checkpoint_dir)
  print('Log directory: %s' % log_dir)

  # Read the file containing the pairs used for testing
  pairs = read_pairs(val_pairs)
  # Get the paths for the corresponding images
  val_image_paths, actual_issame = get_paths(val_dir, pairs)

  with tf.device(tf.train.replica_device_setter(worker_device="/job:worker/task:%d" % task_index, cluster=cluster_spec)):
    tf.set_random_seed(seed)

    # Placeholder for the learning rate
    learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
    batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
    phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
    image_paths_placeholder = tf.placeholder(tf.string, shape=(None, 3), name='image_paths')
    labels_placeholder = tf.placeholder(tf.int64, shape=(None, 3), name='labels')

    input_queue = data_flow_ops.FIFOQueue(capacity=10000,
                                          dtypes=[tf.string, tf.int64],
                                          shapes=[(3,), (3,)],
                                          shared_name=None, name=None)
    enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder])

    nrof_preprocess_threads = 4
    images_and_labels = []
    for _ in range(nrof_preprocess_threads):
      filenames, label = input_queue.dequeue()
      images = []
      for filename in tf.unstack(filenames):
        file_contents = tf.read_file(filename)
        image = tf.image.decode_image(file_contents, channels=3)
        processed_image = vgg_preprocessing.preprocess_image(image, args.image_size, args.image_size, is_training=False, bgr=True)
        if args.random_flip:
          processed_image = tf.image.random_flip_left_right(processed_image)

        images.append(processed_image)
      images_and_labels.append([images, label])

    image_batch, labels_batch = tf.train.batch_join(
      images_and_labels, batch_size=batch_size_placeholder,
      shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
      capacity=4 * nrof_preprocess_threads * args.batch_size,
      allow_smaller_final_batch=True)
    image_batch = tf.identity(image_batch, 'image_batch')
    image_batch = tf.identity(image_batch, 'input')
    labels_batch = tf.identity(labels_batch, 'label_batch')

    with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=args.weight_decay)):
      val_logits, _ = resnet_v1.resnet_v1_101_triplet(image_batch, embedding_size=args.embedding_size, is_training=phase_train_placeholder)

    loader = tf.train.Saver()
    global_step = tf.train.get_or_create_global_step()

    embeddings = tf.squeeze(val_logits['triplet_pre_embeddings'], [1, 2], name='feat_embeddings/squeezed')
    embeddings = tf.nn.l2_normalize(embeddings, 1, 1e-10, name='embeddings')
    # Split embeddings into anchor, positive and negative and calculate triplet loss
    anchor, positive, negative = tf.unstack(tf.reshape(embeddings, [-1, 3, args.embedding_size]), 3, 1)
    triplet_loss = facenet.triplet_loss(anchor, positive, negative, args.alpha)

    learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
                                               args.learning_rate_decay_epochs * args.epoch_size,
                                               args.learning_rate_decay_factor, staircase=True)
    # Calculate the total losses
    regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    total_loss = tf.add_n([triplet_loss] + regularization_losses, name='total_loss')

    tf.summary.scalar('learning_rate', learning_rate)
    tf.summary.scalar('triplet_loss', triplet_loss)
    tf.summary.scalar('total_losses', total_loss)

    train_layers = ['logits', 'mutli_task']
    var_list = []
    for v in tf.global_variables():
      splits = v.name.split("/")
      if len(splits) > 2 and splits[1] in train_layers:
        var_list.append(v)
    
    train_op, opt = facenet.train(total_loss, global_step, args.optimizer,
       learning_rate, args.moving_average_decay, var_list, sync_replicas=args.sync_replicas, replicas_to_aggregate=num_workers)

    summary_op = tf.summary.merge_all()
    saver = tf.train.Saver()
    
    hooks = []
    if args.sync_replicas:
      hooks += [opt.make_session_run_hook(is_chief)]

    sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False,
                                 device_filters=['/job:ps', '/job:worker/task:%d' % task_index])
    #sess_config.operation_timeout_in_ms=80000
    save_path = os.path.join(checkpoint_dir, "model.ckpt")
    
    with tf.train.MonitoredTrainingSession(master=server.target,
                                           is_chief=is_chief,
                                           config=sess_config,
                                           hooks=hooks,
                                           save_summaries_steps=None,
                                           save_summaries_secs=None,
                                           stop_grace_period_secs=30) as sess:
      # Training and validation loop
      summary_writer = tf.summary.FileWriter(log_dir, sess.graph) if is_chief else None
      if is_chief:
        loader.restore(sess, args.pretrained_ckpt)

      step = 0
      while True:
        if is_chief:
          # checkpoint_path = os.path.join(checkpoint_dir, 'model-%s.ckpt' % "test")
          # saver.save(sess._sess._sess._sess._sess, checkpoint_path, global_step=step, write_meta_graph=False)
          evaluate(sess, val_image_paths, embeddings, labels_batch, image_paths_placeholder, labels_placeholder,
                   batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op,
                   actual_issame, args.batch_size, args.lfw_nrof_folds, step, summary_writer, args.embedding_size)
          saver.save(sess._sess._sess._sess._sess, save_path, global_step = step)
        # Train for one epoch
        step = _train(args, sess, train_set, image_paths_placeholder, labels_placeholder, labels_batch,
               batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, input_queue,
               global_step, embeddings, total_loss, train_op, args.embedding_size, triplet_loss, summary_op, summary_writer)

  return checkpoint_dir
def main(args):
  
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    print (args.data_dir)
    train_set = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        train_set = filter_dataset(train_set, os.path.expanduser(args.filter_filename), 
            args.filter_percentile, args.filter_min_nrof_images_per_class)
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list)>0, 'The dataset should not be empty'
        
        # Create a queue that produces indices into the image_list and label_list 
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                             shuffle=True, seed=None, capacity=32)
        
        index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
        
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')

        labels_placeholder = tf.placeholder(tf.int64, shape=(None,1), name='labels')
        
        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                    dtypes=[tf.string, tf.int64],
                                    shapes=[(1,), (1,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder], name='enqueue_op')
        
        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents)
                if args.random_rotate:
                    image = tf.py_func(facenet.random_rotate_image, [image], tf.uint8)
                if args.random_crop:
                    image = tf.random_crop(image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)
    
                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])
    
        image_batch, label_batch = tf.train.batch_join(
            images_and_labels, batch_size=batch_size_placeholder, 
            shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')
        
        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))
        
        print('Building training graph')
        
        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None, 
                weights_initializer=tf.truncated_normal_initializer(stddev=0.1), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                scope='Logits', reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Add center loss
        if args.center_loss_factor>0.0:
            prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch, logits=logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)
        
        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
        
        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, 
                        embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer)
    sess.close()
    return model_dir
def main(args):
    dataset = facenet.get_dataset(args.dataset_dir)

    with tf.Graph().as_default():

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(dataset)
        nrof_images = len(image_list)
        image_indices = range(nrof_images)

        image_batch, label_batch = facenet.read_and_augment_data(
            image_list,
            image_indices,
            args.image_size,
            args.batch_size,
            None,
            False,
            False,
            False,
            nrof_preprocess_threads=4,
            shuffle=False)

        model_exp = os.path.expanduser(args.model_file)
        with gfile.FastGFile(model_exp, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            input_map = {'input': image_batch, 'phase_train': False}
            tf.import_graph_def(graph_def, input_map=input_map, name='net')

        embeddings = tf.get_default_graph().get_tensor_by_name(
            "net/embeddings:0")

        with tf.Session() as sess:
            tf.train.start_queue_runners(sess=sess)

            embedding_size = int(embeddings.get_shape()[1])
            nrof_batches = int(math.ceil(nrof_images / args.batch_size))
            nrof_classes = len(dataset)
            label_array = np.array(label_list)
            class_names = [cls.name for cls in dataset]
            nrof_examples_per_class = [len(cls.image_paths) for cls in dataset]
            class_variance = np.zeros((nrof_classes, ))
            class_center = np.zeros((nrof_classes, embedding_size))
            distance_to_center = np.ones((len(label_list), )) * np.NaN
            emb_array = np.zeros((0, embedding_size))
            idx_array = np.zeros((0, ), dtype=np.int32)
            lab_array = np.zeros((0, ), dtype=np.int32)
            index_arr = np.append(0, np.cumsum(nrof_examples_per_class))
            for i in range(nrof_batches):
                t = time.time()
                emb, idx = sess.run([embeddings, label_batch])
                emb_array = np.append(emb_array, emb, axis=0)
                idx_array = np.append(idx_array, idx, axis=0)
                lab_array = np.append(lab_array, label_array[idx], axis=0)
                for cls in set(lab_array):
                    cls_idx = np.where(lab_array == cls)[0]
                    if cls_idx.shape[0] == nrof_examples_per_class[cls]:
                        # We have calculated all the embeddings for this class
                        i2 = np.argsort(idx_array[cls_idx])
                        emb_class = emb_array[cls_idx, :]
                        emb_sort = emb_class[i2, :]
                        center = np.mean(emb_sort, axis=0)
                        diffs = emb_sort - center
                        dists_sqr = np.sum(np.square(diffs), axis=1)
                        class_variance[cls] = np.mean(dists_sqr)
                        class_center[cls, :] = center
                        distance_to_center[
                            index_arr[cls]:index_arr[cls +
                                                     1]] = np.sqrt(dists_sqr)
                        emb_array = np.delete(emb_array, cls_idx, axis=0)
                        idx_array = np.delete(idx_array, cls_idx, axis=0)
                        lab_array = np.delete(lab_array, cls_idx, axis=0)

                print('Batch %d in %.3f seconds' % (i, time.time() - t))

            print('Writing filtering data to %s' % args.data_file_name)
            mdict = {
                'class_names': class_names,
                'image_list': image_list,
                'label_list': label_list,
                'distance_to_center': distance_to_center
            }
            with h5py.File(args.data_file_name, 'w') as f:
                for key, value in iteritems(mdict):
                    f.create_dataset(key, data=value)
示例#7
0
		paths = cls.image_paths
		# Remove classes with less than min_nrof_images_per_class
		if len(paths)>=min_nrof_images_per_class:
			np.random.shuffle(paths)
			train_set.append(facenet.ImageClass(cls.name, paths[:nrof_train_images_per_class]))
			test_set.append(facenet.ImageClass(cls.name, paths[nrof_train_images_per_class:]))
	return train_set, test_set

with tf.Graph().as_default():
  
	with tf.Session() as sess:
		
		np.random.seed(seed=seed)
		
		if use_split_dataset:
			dataset_tmp = facenet.get_dataset(data_dir)

			train_set, test_set = split_dataset(dataset_tmp, min_nrof_images_per_class, nrof_train_images_per_class)
			if (mode=='TRAIN'):
				dataset = train_set
			elif (mode=='CLASSIFY'):
				dataset = test_set
		else:
			dataset = facenet.get_dataset(data_dir)

		# Check that there are at least one training image per class
		for cls in dataset:
			assert(len(cls.image_paths)>0, 'There must be at least one image for each class in the dataset')            

			 
		paths, labels = facenet.get_image_paths_and_labels(dataset)
示例#8
0
def main(args):
    sleep(random.random())
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)

    print('Creating networks and loading parameters')

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    # Add a random key to the filename to allow alignment using multiple processes
    random_key = np.random.randint(0, high=99999)
    bounding_boxes_filename = os.path.join(
        output_dir, 'bounding_boxes_%05d.txt' % random_key)

    with open(bounding_boxes_filename, "w") as text_file:
        nrof_images_total = 0
        nrof_successfully_aligned = 0
        if args.random_order:
            random.shuffle(dataset)
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            if not os.path.exists(output_class_dir):
                os.makedirs(output_class_dir)
                if args.random_order:
                    random.shuffle(cls.image_paths)
            for image_path in cls.image_paths:
                nrof_images_total += 1
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                output_filename = os.path.join(output_class_dir,
                                               filename + '.png')
                print(image_path)
                if not os.path.exists(output_filename):
                    try:
                        img = misc.imread(image_path)
                    except (IOError, ValueError, IndexError) as e:
                        errorMessage = '{}: {}'.format(image_path, e)
                        print(errorMessage)
                    else:
                        if img.ndim < 2:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))
                            continue
                        if img.ndim == 2:
                            img = facenet.to_rgb(img)
                        img = img[:, :, 0:3]

                        bounding_boxes, _ = align.detect_face.detect_face(
                            img, minsize, pnet, rnet, onet, threshold, factor)
                        nrof_faces = bounding_boxes.shape[0]
                        if nrof_faces > 0:
                            det = bounding_boxes[:, 0:4]
                            det_arr = []
                            img_size = np.asarray(img.shape)[0:2]
                            if nrof_faces > 1:
                                if args.detect_multiple_faces:
                                    for i in range(nrof_faces):
                                        det_arr.append(np.squeeze(det[i]))
                                else:
                                    bounding_box_size = (
                                        det[:, 2] - det[:, 0]) * (det[:, 3] -
                                                                  det[:, 1])
                                    img_center = img_size / 2
                                    offsets = np.vstack([
                                        (det[:, 0] + det[:, 2]) / 2 -
                                        img_center[1],
                                        (det[:, 1] + det[:, 3]) / 2 -
                                        img_center[0]
                                    ])
                                    offset_dist_squared = np.sum(
                                        np.power(offsets, 2.0), 0)
                                    index = np.argmax(
                                        bounding_box_size -
                                        offset_dist_squared * 2.0
                                    )  # some extra weight on the centering
                                    det_arr.append(det[index, :])
                            else:
                                det_arr.append(np.squeeze(det))

                            for i, det in enumerate(det_arr):
                                det = np.squeeze(det)
                                bb = np.zeros(4, dtype=np.int32)
                                bb[0] = np.maximum(det[0] - args.margin / 2, 0)
                                bb[1] = np.maximum(det[1] - args.margin / 2, 0)
                                bb[2] = np.minimum(det[2] + args.margin / 2,
                                                   img_size[1])
                                bb[3] = np.minimum(det[3] + args.margin / 2,
                                                   img_size[0])
                                cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                                scaled = misc.imresize(
                                    cropped,
                                    (args.image_size, args.image_size),
                                    interp='bilinear')
                                nrof_successfully_aligned += 1
                                filename_base, file_extension = os.path.splitext(
                                    output_filename)
                                if args.detect_multiple_faces:
                                    output_filename_n = "{}_{}{}".format(
                                        filename_base, i, file_extension)
                                else:
                                    output_filename_n = "{}{}".format(
                                        filename_base, file_extension)
                                misc.imsave(output_filename_n, scaled)
                                text_file.write('%s %d %d %d %d\n' %
                                                (output_filename_n, bb[0],
                                                 bb[1], bb[2], bb[3]))
                        else:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))

    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' %
          nrof_successfully_aligned)
示例#9
0
def main(args):

    img_mean = np.array([134.10714722, 102.52040863, 87.15436554])
    img_stddev = np.sqrt(
        np.array([3941.30175781, 2856.94287109, 2519.35791016]))

    vae_def = importlib.import_module(args.vae_def)
    vae = vae_def.Vae(args.latent_var_size)
    gen_image_size = vae.get_image_size()

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)
    log_file_name = os.path.join(model_dir, 'logs.h5')

    # Write arguments to a text file
    facenet.write_arguments_to_file(args,
                                    os.path.join(model_dir, 'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, model_dir, ' '.join(sys.argv))

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        train_set = facenet.get_dataset(args.data_dir)
        image_list, _ = facenet.get_image_paths_and_labels(train_set)

        # Create the input queue
        input_queue = tf.train.string_input_producer(image_list, shuffle=True)

        nrof_preprocess_threads = 4
        image_per_thread = []
        for _ in range(nrof_preprocess_threads):
            file_contents = tf.read_file(input_queue.dequeue())
            image = tf.image.decode_image(file_contents, channels=3)
            image = tf.image.resize_image_with_crop_or_pad(
                image, args.input_image_size, args.input_image_size)
            image.set_shape((args.input_image_size, args.input_image_size, 3))
            image = tf.cast(image, tf.float32)
            #pylint: disable=no-member
            image_per_thread.append([image])

        images = tf.train.batch_join(image_per_thread,
                                     batch_size=args.batch_size,
                                     capacity=4 * nrof_preprocess_threads *
                                     args.batch_size,
                                     allow_smaller_final_batch=False)

        # Normalize
        images_norm = (images - img_mean) / img_stddev

        # Resize to appropriate size for the encoder
        images_norm_resize = tf.image.resize_images(
            images_norm, (gen_image_size, gen_image_size))

        # Create encoder network
        mean, log_variance = vae.encoder(images_norm_resize, True)

        epsilon = tf.random_normal((tf.shape(mean)[0], args.latent_var_size))
        std = tf.exp(log_variance / 2)
        latent_var = mean + epsilon * std

        # Create decoder network
        reconstructed_norm = vae.decoder(latent_var, True)

        # Un-normalize
        reconstructed = (reconstructed_norm * img_stddev) + img_mean

        # Create reconstruction loss
        if args.reconstruction_loss_type == 'PLAIN':
            images_resize = tf.image.resize_images(
                images, (gen_image_size, gen_image_size))
            reconstruction_loss = tf.reduce_mean(
                tf.reduce_sum(tf.pow(images_resize - reconstructed, 2)))
        elif args.reconstruction_loss_type == 'PERCEPTUAL':
            network = importlib.import_module(args.model_def)

            reconstructed_norm_resize = tf.image.resize_images(
                reconstructed_norm,
                (args.input_image_size, args.input_image_size))

            # Stack images from both the input batch and the reconstructed batch in a new tensor
            shp = [-1] + images_norm.get_shape().as_list()[1:]
            input_images = tf.reshape(
                tf.stack([images_norm, reconstructed_norm_resize], axis=0),
                shp)
            _, end_points = network.inference(input_images,
                                              1.0,
                                              phase_train=False,
                                              bottleneck_layer_size=128,
                                              weight_decay=0.0)

            # Get a list of feature names to use for loss terms
            feature_names = args.loss_features.replace(' ', '').split(',')

            # Calculate L2 loss between original and reconstructed images in feature space
            reconstruction_loss_list = []
            for feature_name in feature_names:
                feature_flat = slim.flatten(end_points[feature_name])
                image_feature, reconstructed_feature = tf.unstack(tf.reshape(
                    feature_flat, [2, args.batch_size, -1]),
                                                                  num=2,
                                                                  axis=0)
                reconstruction_loss = tf.reduce_mean(tf.reduce_sum(
                    tf.pow(image_feature - reconstructed_feature, 2)),
                                                     name=feature_name +
                                                     '_loss')
                reconstruction_loss_list.append(reconstruction_loss)
            # Sum up the losses in for the different features
            reconstruction_loss = tf.add_n(reconstruction_loss_list,
                                           'reconstruction_loss')
        else:
            pass

        # Create KL divergence loss
        kl_loss = kl_divergence_loss(mean, log_variance)
        kl_loss_mean = tf.reduce_mean(kl_loss)

        total_loss = args.alfa * kl_loss_mean + args.beta * reconstruction_loss

        learning_rate = tf.train.exponential_decay(
            args.initial_learning_rate,
            global_step,
            args.learning_rate_decay_steps,
            args.learning_rate_decay_factor,
            staircase=True)

        # Calculate gradients and make sure not to include parameters for the perceptual loss model
        opt = tf.train.AdamOptimizer(learning_rate)
        grads = opt.compute_gradients(total_loss,
                                      var_list=get_variables_to_train())

        # Apply gradients
        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
        with tf.control_dependencies([apply_gradient_op]):
            train_op = tf.no_op(name='train')

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        facenet_saver = tf.train.Saver(get_facenet_variables_to_restore())

        # Start running operations on the Graph
        gpu_memory_fraction = 1.0
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if args.reconstruction_loss_type == 'PERCEPTUAL':
                if not args.pretrained_model:
                    raise ValueError(
                        'A pretrained model must be specified when using perceptual loss'
                    )
                pretrained_model_exp = os.path.expanduser(
                    args.pretrained_model)
                print('Restoring pretrained model: %s' % pretrained_model_exp)
                facenet_saver.restore(sess, pretrained_model_exp)

            log = {
                'total_loss': np.zeros((0, ), np.float),
                'reconstruction_loss': np.zeros((0, ), np.float),
                'kl_loss': np.zeros((0, ), np.float),
                'learning_rate': np.zeros((0, ), np.float),
            }

            step = 0
            print('Running training')
            while step < args.max_nrof_steps:
                start_time = time.time()
                step += 1
                save_state = step > 0 and (step % args.save_every_n_steps == 0
                                           or step == args.max_nrof_steps)
                if save_state:
                    _, reconstruction_loss_, kl_loss_mean_, total_loss_, learning_rate_, rec_ = sess.run(
                        [
                            train_op, reconstruction_loss, kl_loss_mean,
                            total_loss, learning_rate, reconstructed
                        ])
                    img = facenet.put_images_on_grid(rec_, shape=(16, 8))
                    misc.imsave(
                        os.path.join(model_dir,
                                     'reconstructed_%06d.png' % step), img)
                else:
                    _, reconstruction_loss_, kl_loss_mean_, total_loss_, learning_rate_ = sess.run(
                        [
                            train_op, reconstruction_loss, kl_loss_mean,
                            total_loss, learning_rate
                        ])
                log['total_loss'] = np.append(log['total_loss'], total_loss_)
                log['reconstruction_loss'] = np.append(
                    log['reconstruction_loss'], reconstruction_loss_)
                log['kl_loss'] = np.append(log['kl_loss'], kl_loss_mean_)
                log['learning_rate'] = np.append(log['learning_rate'],
                                                 learning_rate_)

                duration = time.time() - start_time
                print(
                    'Step: %d \tTime: %.3f \trec_loss: %.3f \tkl_loss: %.3f \ttotal_loss: %.3f'
                    % (step, duration, reconstruction_loss_, kl_loss_mean_,
                       total_loss_))

                if save_state:
                    print('Saving checkpoint file')
                    checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                    saver.save(sess,
                               checkpoint_path,
                               global_step=step,
                               write_meta_graph=False)
                    print('Saving log')
                    with h5py.File(log_file_name, 'w') as f:
                        for key, value in iteritems(log):
                            f.create_dataset(key, data=value)
def main(args):

    #此处导入的是:models.inception_resnet_v1模型,以后再看怎么更改模型
    network = importlib.import_module(args.model_def)
    #用当前日期来命名模型
    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    #日志保存在c:\\users\\Administrator\logs\facenet\ 文件夹里
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)  #没有日志文件就创建一个
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # 把参数写在日志文件中
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    #arg_string:'E:/facenet/train_tripletloss.py'   output_dir:'C:\\Users\\Administrator/logs/facenet\\20180314-181556'
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    #获取数据集,train_set是包含文件路径与标签的集合
    #先输入一个父路径 path:'E:/facenet/data/lfw_160',接着输入每个子路径
    # 输出:一个list,每个元素是一个ImageClass,里边包含图片地址的list(image_paths)以及对应的人名(name)[以后可能会直接调用这几个属性]
    train_set = facenet.get_dataset(args.data_dir)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:  #用在判断是否有预训练模型,但是如果有,怎么加载呢?
        print('Pre-trained model: %s' %
              os.path.expanduser(args.pretrained_model))

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    #建立图

#with语句适用于对资源进行访问的场合,确保使用过程中是否发生异常都会执行必要嘚瑟“清理”操作
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # 学习率 Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')
        #批大小
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        #用于判断是训练还是测试
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        #图像路径
        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 3),
                                                 name='image_paths')
        # 图像标签
        labels_placeholder = tf.placeholder(tf.int64,
                                            shape=(None, 3),
                                            name='labels')
        #新建一个队列,数据流操作,fifo先入先出
        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                              dtypes=[tf.string, tf.int64],
                                              shapes=[(3, ), (3, )],
                                              shared_name=None,
                                              name=None)
        #enqueue_many返回的是一个操作
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder])

        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents, channels=3)

                if args.random_crop:
                    image = tf.random_crop(
                        image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(
                        image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)

                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])

        image_batch, labels_batch = tf.train.batch_join(
            images_and_labels,
            batch_size=batch_size_placeholder,
            shapes=[(args.image_size, args.image_size, 3), ()],
            enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        labels_batch = tf.identity(labels_batch, 'label_batch')

        # Build the inference (构造计算图)
        #其中prelogits是最后一层的输出
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)

        #L2正则化(范化)函数
        # embeddings = tf.nn.l2_normalize(输入向量, L2范化的维数(取0(列L2范化)或1(行L2范化)), 泛化的最小值边界, name='embeddings')
        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
        # Split embeddings into anchor, positive and negative and calculate triplet loss
        anchor, positive, negative = tf.unstack(
            tf.reshape(embeddings, [-1, 3, args.embedding_size]), 3, 1)
        triplet_loss = facenet.triplet_loss(anchor, positive, negative,
                                            args.alpha)
        #将指数衰减应用在学习率上
        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # 计算损失
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        #构建L2正则化
        total_loss = tf.add_n([triplet_loss] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        # 确定优化方法并根据损失函数求梯度,在这里,每更行一次参数,global_step会加1
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables())

        # Create a saver创建一个saver用来保存或者从内存中回复一个模型参数
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.能够在GPU上分配的最大内存
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        # Initialize variables
        sess.run(tf.global_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})
        sess.run(tf.local_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})

        #写log文件
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        #获取线程坐标
        coord = tf.train.Coordinator()
        #将队列中的多用sunner开始执行
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                print('Restoring pretrained model: %s' % args.pretrained_model)
                saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            #将所有数据过一遍的次数   默认500
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                #epoch_size是一个epoch中批的个数,这个epoch是全局的批处理个数以一个epoch中。。。这个epoch将用于求学习率
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, train_set, epoch, image_paths_placeholder,
                      labels_placeholder, labels_batch, batch_size_placeholder,
                      learning_rate_placeholder, phase_train_placeholder,
                      enqueue_op, input_queue, global_step, embeddings,
                      total_loss, train_op, summary_op, summary_writer,
                      args.learning_rate_schedule_file, args.embedding_size,
                      anchor, positive, negative, triplet_loss)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, lfw_paths, embeddings, labels_batch,
                             image_paths_placeholder, labels_placeholder,
                             batch_size_placeholder, learning_rate_placeholder,
                             phase_train_placeholder, enqueue_op,
                             actual_issame, args.batch_size,
                             args.lfw_nrof_folds, log_dir, step,
                             summary_writer, args.embedding_size)

    return model_dir
def main(args):

    with tf.Graph().as_default():
      
        with tf.Session() as sess:
            
            np.random.seed(seed=args.seed)

            #입력한 폴더에서 데이터셋 나눠서
            if args.use_split_dataset:
                dataset_tmp = facenet.get_dataset(args.data_dir)
                train_set, test_set = split_dataset(dataset_tmp, args.min_nrof_images_per_class, args.nrof_train_images_per_class)
                if (args.mode=='TRAIN'):
                    dataset = train_set
                elif (args.mode=='CLASSIFY'):
                    dataset = test_set
            #입력한 폴더대로
            else:
                dataset = facenet.get_dataset(args.data_dir)

            # Check that there are at least one training image per class
            for cls in dataset:
                assert(len(cls.image_paths)>0, 'There must be at least one image for each class in the dataset')            

                 
            paths, labels = facenet.get_image_paths_and_labels(dataset)#'[datasets\\test\\yeojingoo\\2019010302124_0.jpg',...] , labels_flat:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
            
            print('Number of classes: %d' % len(dataset))
            print('Number of images: %d' % len(paths))
            
            # Load the model
            print('Loading feature extraction model')
            facenet.load_model(args.model)
            
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")

            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]
            
            # Run forward pass to calculate embeddings
            print('Calculating features for images')
            nrof_images = len(paths)
            nrof_batches_per_epoch = int(math.ceil(1.0*nrof_images / args.batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches_per_epoch):
                start_index = i*args.batch_size
                end_index = min((i+1)*args.batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data_npy(paths_batch, False, False, args.image_size)
                feed_dict = { images_placeholder:images, phase_train_placeholder:False }
                emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)
                # 여기서 임베딩값
                np.save("emb.npy", emb_array)

            classifier_filename_exp = os.path.expanduser(args.classifier_filename)#절대경로로 바꿔줌

            if (args.mode=='TRAIN'):
                # Train classifier
                print('Training classifier')
                model = GaussianNB()
                #model = SVC(kernel='linear', probability=True)
                model.fit(emb_array, labels)
                #분석
                print("분석")
                print(model.classes_)
                print(model.class_count_)
                print(model.class_prior_)

                # Create a list of class names
                class_names = [ cls.name.replace('_', ' ') for cls in dataset]

                # Saving classifier model
                with open(classifier_filename_exp, 'wb') as outfile:
                    pickle.dump((model, class_names), outfile)
                print('Saved classifier model to file "%s"' % classifier_filename_exp)
                
            elif (args.mode=='CLASSIFY'):
                # Classify images
                print('===Testing classifier===')
                with open(classifier_filename_exp, 'rb') as infile:
                    (model, class_names) = pickle.load(infile)

                with open('models\datasets_classifier.pkl', 'rb') as file:
                    data_list = []

                    while True:
                        try:
                            data = pickle.load(file)
                        except EOFError:
                            print("end")
                            break
                    data_list.append(data)
                print(data_list)

                print('Loaded classifier model from file "%s"' % classifier_filename_exp)
                print("model: ", model)
                #예측
                predictions = model.predict_proba(emb_array)
                best_class_indices = np.argmax(predictions, axis=1)
                print(predictions)
                best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]#49장 이미지에 대해 이미지 순서 - 최고 예측 이미지 인덱스
                print("np.arange(len(best_class_indices)): ", np.arange(len(best_class_indices)))
                print("best_class_probabilities: ",best_class_probabilities)

                for i in range(len(best_class_indices)):
                    print('%4d  %s: %.3f' % (i, class_names[best_class_indices[i]], best_class_probabilities[i]))
                    
                accuracy = np.mean(np.equal(best_class_indices, labels))
                print('Accuracy: %.3f' % accuracy)
示例#12
0
def main(args):
  
    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    subdirmaxlin= subdir+'_lin_max' #fbtian_max
    maxlin_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdirmaxlin)#fbtian_max
    if not os.path.exists(maxlin_dir):#fbtian_max
        os.makedirs(maxlin_dir)#fbtian_max

    subdirmax= subdir+'_max' #fbtian_max
    modelmax_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdirmax)#fbtian_max
    if not os.path.exists(modelmax_dir):#fbtian_max
        os.makedirs(modelmax_dir)#fbtian_max

    # Store some git revision info in a text file in the log directory
    if not args.no_store_revision_info:
        src_path,_ = os.path.split(os.path.realpath(__file__))
        facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    

    #for i in range(args.send2):
     #   np.random.shuffle(np.arange(10))


    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:
        print('Pre-trained model: %s' % os.path.expanduser(args.pretrained_model))
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
        
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,3), name='image_paths')
        labels_placeholder = tf.placeholder(tf.int64, shape=(None,3), name='labels')
        
        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                    dtypes=[tf.string, tf.int64],
                                    shapes=[(3,), (3,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder])
        
        nrof_preprocess_threads = 4
        images_and_labels = []
        
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            fb_count=0
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                print('filename:%s'%filename )
                image = tf.image.decode_png(file_contents)
                
                if args.random_crop:
                    print('args.random_crop') #fbtian_add
                    image = tf.random_crop(image, [args.image_size, args.image_size, 3])
                else:
                    #print('else not args.random_crop') #come in fbtian_add
                    image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                if args.random_flip:
                    print('args.random_flip')
                    image = tf.image.random_flip_left_right(image)
                if 1 :
                    image = tf.image.random_brightness(image, max_delta=0.2)  #Random brightness transformation
                    image = tf.image.random_contrast(image, lower=0.2, upper=1.0)#Random contrast transformation
                fb_count+=1
                #pylint: disable=no-member# fbtian_add
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])
            print('fb_count:%d'%fb_count)
    
        image_batch, labels_batch = tf.train.batch_join(
            images_and_labels, batch_size=batch_size_placeholder, 
            shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch, 'input') ##fbtian
        batch_norm_params = {
            # Decay for the moving averages
            'decay': 0.995,
            # epsilon to prevent 0s in variance
            'epsilon': 0.001,
            # force in-place updates of mean and variance estimates
            'updates_collections': None,
            # Moving averages ends up in the trainable variables collection
            'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
            # Only update statistics during training mode
            'is_training': phase_train_placeholder
        }
        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=phase_train_placeholder, weight_decay=args.weight_decay)
        pre_embeddings = slim.fully_connected(prelogits, args.embedding_size, activation_fn=None, 
                weights_initializer=tf.truncated_normal_initializer(stddev=0.1), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params,
                scope='Bottleneck', reuse=False)
        
        embeddings = tf.nn.l2_normalize(pre_embeddings, 1, 1e-10, name='embeddings')
        # Split embeddings into anchor, positive and negative and calculate triplet loss
        anchor, positive, negative = tf.unstack(tf.reshape(embeddings, [-1,3,args.embedding_size]), 3, 1)
        triplet_loss = facenet.triplet_loss(anchor, positive, negative, args.alpha)
        
        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([triplet_loss] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.global_variables())
        
        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)###
        #sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) 
        config = tf.ConfigProto(allow_soft_placement=True) ###########################################
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)###################################
        config.gpu_options.allow_growth = True###########################################
        #sess = tf.Session(config=config)###########################################
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,intra_op_parallelism_threads=8))    

        # Initialize variables
        sess.run(tf.global_variables_initializer(), feed_dict={phase_train_placeholder:True})
        sess.run(tf.local_variables_initializer(), feed_dict={phase_train_placeholder:True})

        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                print('Restoring pretrained model: %s' % args.pretrained_model)
                saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            acc_tmp=0
            val_tmp=0
            while epoch < args.max_nrof_epochs:
                
                step = sess.run(global_step, feed_dict=None)
                print(global_step )
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, train_set, epoch, image_paths_placeholder, labels_placeholder, labels_batch,
                    batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, input_queue, global_step, 
                    embeddings, total_loss, train_op, summary_op, summary_writer, args.learning_rate_schedule_file,
                    args.embedding_size, anchor, positive, negative, triplet_loss)

                # Save variables and the metagraph if it doesn't exist already


                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    acc,val=evaluate(sess, lfw_paths, embeddings, labels_batch, image_paths_placeholder, labels_placeholder, 
                            batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, actual_issame, args.batch_size, 
                            args.lfw_nrof_folds, log_dir, step, summary_writer, args.embedding_size)

                    print('starting to save the maxacc and maxval ') #fbtian_max
                    if acc>acc_tmp:   #fbtian_max
                        maxmodel_path = os.path.join(maxlin_dir, 'model-%s.ckpt_accmax'%subdir) #fbtian_max
                        saver.save(sess, maxmodel_path, write_meta_graph=False)#fbtian_max
                        shutil.copy( maxmodel_path+'.data-00000-of-00001', modelmax_dir)
                        shutil.copy( maxmodel_path+'.index', modelmax_dir)

                        acc_tmp=acc   #fbtian_max
                    if val>val_tmp:   #fbtian_max
                        maxmodel_path = os.path.join(maxlin_dir, 'model-%s.ckpt_valmax'%subdir) #fbtian_max
                        saver.save(sess, maxmodel_path, write_meta_graph=False)#fbtian_max
                        shutil.copy( maxmodel_path+'.data-00000-of-00001', modelmax_dir)
                        shutil.copy( maxmodel_path+'.index', modelmax_dir)

                        val_tmp=val   #fbtian
                    print('end to save the maxacc and maxval ') #fbtian


    sess.close()
    return model_dir
示例#13
0
def main(args):
    print(args.model_def)
    network = importlib.import_module(args.model_def, 'inference')

    print(1111)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:
        print('Pre-trained model: %s' %
              os.path.expanduser(args.pretrained_model))

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 3),
                                                 name='image_paths')
        labels_placeholder = tf.placeholder(tf.int64,
                                            shape=(None, 3),
                                            name='labels')

        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                              dtypes=[tf.string, tf.int64],
                                              shapes=[(3, ), (3, )],
                                              shared_name=None,
                                              name=None)
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder])

        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_png(file_contents)

                if args.random_crop:
                    image = tf.random_crop(
                        image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(
                        image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)

                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])

        image_batch, labels_batch = tf.train.batch_join(
            images_and_labels,
            batch_size=batch_size_placeholder,
            shapes=[(args.image_size, args.image_size, 3), ()],
            enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)

        # Build the inference graph
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
        # Split embeddings into anchor, positive and negative and calculate triplet loss
        anchor, positive, negative = tf.unstack(
            tf.reshape(embeddings, [-1, 3, args.embedding_size]), 3, 1)
        triplet_loss = facenet.triplet_loss(anchor, positive, negative,
                                            args.alpha)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([triplet_loss] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables())

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        # Initialize variables
        sess.run(tf.global_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})
        sess.run(tf.local_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})

        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                print('Restoring pretrained model: %s' % args.pretrained_model)
                saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, train_set, epoch, image_paths_placeholder,
                      labels_placeholder, labels_batch, batch_size_placeholder,
                      learning_rate_placeholder, phase_train_placeholder,
                      enqueue_op, input_queue, global_step, embeddings,
                      total_loss, train_op, summary_op, summary_writer,
                      args.learning_rate_schedule_file, args.embedding_size,
                      anchor, positive, negative, triplet_loss)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, lfw_paths, embeddings, labels_batch,
                             image_paths_placeholder, labels_placeholder,
                             batch_size_placeholder, learning_rate_placeholder,
                             phase_train_placeholder, enqueue_op,
                             actual_issame, args.batch_size,
                             args.lfw_nrof_folds, log_dir, step,
                             summary_writer, args.embedding_size)

    sess.close()
    return model_dir
示例#14
0
def main(args):
    align = align_dlib.AlignDlib(os.path.expanduser(args.dlib_face_predictor))
    landmarkIndices = align_dlib.AlignDlib.OUTER_EYES_AND_NOSE
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)
    random.shuffle(dataset)
    # Scale the image such that the face fills the frame when cropped to crop_size
    scale = float(args.face_size) / args.image_size
    nrof_images_total = 0
    nrof_prealigned_images = 0
    nrof_successfully_aligned = 0
    for cls in dataset:
        output_class_dir = os.path.join(output_dir, cls.name)
        if not os.path.exists(output_class_dir):
            os.makedirs(output_class_dir)
        random.shuffle(cls.image_paths)
        for image_path in cls.image_paths:
            nrof_images_total += 1
            filename = os.path.splitext(os.path.split(image_path)[1])[0]
            output_filename = os.path.join(output_class_dir, filename+'.png')
            if not os.path.exists(output_filename):
                try:
                    img = misc.imread(image_path)
                except (IOError, ValueError, IndexError) as e:
                    errorMessage = '{}: {}'.format(image_path, e)
                    print(errorMessage)
                else:
                    if img.ndim == 2:
                        img = facenet.to_rgb(img)
                    if args.use_center_crop:
                        scaled = misc.imresize(img, args.prealigned_scale, interp='bilinear')
                        sz1 = scaled.shape[1]/2
                        sz2 = args.image_size/2
                        aligned = scaled[int(sz1-sz2):int(sz1+sz2),int(sz1-sz2):int(sz1+sz2),:]
                    else:
                        aligned = align.align(args.image_size, img, landmarkIndices=landmarkIndices, 
                                              skipMulti=False, scale=scale)
                    if aligned is not None:
                        print(image_path)
                        nrof_successfully_aligned += 1
                        misc.imsave(output_filename, aligned)
                    elif args.prealigned_dir:
                        # Face detection failed. Use center crop from pre-aligned dataset
                        class_name = os.path.split(output_class_dir)[1]
                        image_path_without_ext = os.path.join(os.path.expanduser(args.prealigned_dir), 
                                                              class_name, filename)
                        # Find the extension of the image
                        exts = ('jpg', 'png')
                        for ext in exts:
                            temp_path = image_path_without_ext + '.' + ext
                            image_path = ''
                            if os.path.exists(temp_path):
                                image_path = temp_path
                                break
                        try:
                            img = misc.imread(image_path)
                        except (IOError, ValueError, IndexError) as e:
                            errorMessage = '{}: {}'.format(image_path, e)
                            print(errorMessage)
                        else:
                            scaled = misc.imresize(img, args.prealigned_scale, interp='bilinear')
                            sz1 = scaled.shape[1]/2
                            sz2 = args.image_size/2
                            cropped = scaled[(sz1-sz2):(sz1+sz2),(sz1-sz2):(sz1+sz2),:]
                            print(image_path)
                            nrof_prealigned_images += 1
                            misc.imsave(output_filename, cropped)
                    else:
                        print('Unable to align "%s"' % image_path)

    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
    print('Number of pre-aligned images: %d' % nrof_prealigned_images)
示例#15
0
def main(args):
    with tf.Graph().as_default():

        with tf.Session() as sess:

            np.random.seed(seed=args["seed"])

            if args["use_split_dataset"]:
                dataset_tmp = facenet.get_dataset(args["data_dir"])
                train_set, test_set = split_dataset(
                    dataset_tmp, args["min_nrof_images_per_class"], args["nrof_train_images_per_class"])
                if (args["mode"] == 'TRAIN'):
                    dataset = train_set
                elif (args["mode"] == 'CLASSIFY'):
                    dataset = test_set
            else:
                dataset = facenet.get_dataset(args["data_dir"])

            # Check that there are at least one training image per class
            for cls in dataset:
                assert(len(cls.image_paths) > 0,
                       'There must be at least one image for each class in the dataset')

            paths, labels = facenet.get_image_paths_and_labels(dataset)

            print('Number of classes: %d' % len(dataset))
            print('Number of images: %d' % len(paths))

            # Load the model
            print('Loading feature extraction model')
            facenet.load_model(args["model"])

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Calculating features for images')
            nrof_images = len(paths)
            nrof_batches_per_epoch = int(
                math.ceil(1.0*nrof_images / args["batch_size"]))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches_per_epoch):
                start_index = i*args["batch_size"]
                end_index = min((i+1)*args["batch_size"], nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(
                    paths_batch, False, False, args["image_size"])
                feed_dict = {images_placeholder: images,
                             phase_train_placeholder: False}
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            classifier_filename_exp = os.path.expanduser(
                args["classifier_filename"])

            if (args["mode"] == 'TRAIN'):
                # Train classifier
                print('Training classifier')
                model = SVC(kernel='linear', probability=True)
                model.fit(emb_array, labels)

                # Create a list of class names
                class_names = [cls.name.replace('_', ' ') for cls in dataset]

                # Saving classifier model
                with open(classifier_filename_exp, 'wb') as outfile:
                    pickle.dump((model, class_names), outfile)
                print('Saved classifier model to file "%s"' %
                      classifier_filename_exp)

            elif (args["mode"] == 'CLASSIFY'):
                # Classify images
                print('Testing classifier')
                with open(classifier_filename_exp, 'rb') as infile:
                    (model, class_names) = pickle.load(infile)

                print('Loaded classifier model from file "%s"' %
                      classifier_filename_exp)

                predictions = model.predict_proba(emb_array)
                best_class_indices = np.argmax(predictions, axis=1)
                best_class_probabilities = predictions[np.arange(
                    len(best_class_indices)), best_class_indices]

                for i in range(len(best_class_indices)):
                    print('%4d  %s: %.3f' % (
                        i, class_names[best_class_indices[i]], best_class_probabilities[i]))

                accuracy = np.mean(np.equal(best_class_indices, labels))
                print('Accuracy: %.3f' % accuracy)
def main(args):
    network = importlib.import_module(args.model_def)
    image_size = (args.image_size, args.image_size)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')  # 用当前时间作为文件夹名称
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir),
                           subdir)  # log保存路径
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir),
                             subdir)  # 训练好的模型的保存路径
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    stat_file_name = os.path.join(log_dir, 'stat.h5')

    # Write arguments to a text file 这个文件保存一些训练时候用到的超参数信息
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    # 保存一些版本信息
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    dataset = facenet.get_dataset(args.data_dir)  # 训练数据路径
    if args.filter_filename:
        dataset = filter_dataset(dataset,
                                 os.path.expanduser(args.filter_filename),
                                 args.filter_percentile,
                                 args.filter_min_nrof_images_per_class)
    # 数据集划分为训练集和验证集
    if args.validation_set_split_ratio > 0.0:
        train_set, val_set = facenet.split_dataset(
            dataset, args.validation_set_split_ratio,
            args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
    else:
        train_set, val_set = dataset, []
    # 分类的数量,即训练集中的文件夹数量
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    # 预训练模型
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    # 测试用的lfw数据的路径
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        # 获取训练图像路径和对应的标签
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list) > 0, 'The training set should not be empty'
        # 用于验证的图像的路径和对应的标签
        val_image_list, val_label_list = facenet.get_image_paths_and_labels(
            val_set)

        # Create a queue that produces indices into the image_list and label_list
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size,
                                                    num_epochs=None,
                                                    shuffle=True,
                                                    seed=None,
                                                    capacity=32)

        index_dequeue_op = index_queue.dequeue_many(
            args.batch_size * args.epoch_size, 'index_dequeue')

        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 1),
                                                 name='image_paths')
        labels_placeholder = tf.placeholder(tf.int32,
                                            shape=(None, 1),
                                            name='labels')
        control_placeholder = tf.placeholder(tf.int32,
                                             shape=(None, 1),
                                             name='control')

        nrof_preprocess_threads = 4
        # 先入先出队列
        input_queue = data_flow_ops.FIFOQueue(
            capacity=2000000,
            dtypes=[tf.string, tf.int32, tf.int32],
            shapes=[(1, ), (1, ), (1, )],
            shared_name=None,
            name=None)
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder, control_placeholder],
            name='enqueue_op')
        image_batch, label_batch = facenet.create_input_pipeline(
            input_queue, image_size, nrof_preprocess_threads,
            batch_size_placeholder)

        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')

        print('Number of classes in training set: %d' % nrof_classes)
        print('Number of examples in training set: %d' % len(image_list))

        print('Number of classes in validation set: %d' % len(val_set))
        print('Number of examples in validation set: %d' % len(val_image_list))

        print('Building training graph')

        # Build the inference graph
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(
            prelogits,
            len(train_set),
            activation_fn=None,
            weights_initializer=slim.initializers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            scope='Logits',
            reuse=False)
        # 标准化
        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Norm for the prelogits
        eps = 1e-4
        prelogits_norm = tf.reduce_mean(
            tf.norm(tf.abs(prelogits) + eps, ord=args.prelogits_norm_p,
                    axis=1))
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                             prelogits_norm * args.prelogits_norm_loss_factor)

        # Add center loss
        prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch,
                                                       args.center_loss_alfa,
                                                       nrof_classes)
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                             prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch,
            logits=logits,
            name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        correct_prediction = tf.cast(
            tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)),
            tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables(), args.log_histograms)

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(),
                               max_to_keep=1,
                               save_relative_paths=True)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            # if pretrained_model:
            #     print('Restoring pretrained model: %s' % pretrained_model)
            #     saver.restore(sess, tf.train.latest_checkpoint(pretrained_model))

            if args.pretrained_model:
                print('Restoring pretrained model: %s' % args.pretrained_model)
            ckpt = tf.train.get_checkpoint_state(args.pretrained_model)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)

            # Training and validation loop
            print('Running training')
            nrof_steps = args.max_nrof_epochs * args.epoch_size
            nrof_val_samples = int(
                math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs)
            )  # Validate every validate_every_n_epochs as well as in the last epoch
            stat = {
                'loss':
                np.zeros((nrof_steps, ), np.float32),
                'center_loss':
                np.zeros((nrof_steps, ), np.float32),
                'reg_loss':
                np.zeros((nrof_steps, ), np.float32),
                'xent_loss':
                np.zeros((nrof_steps, ), np.float32),
                'prelogits_norm':
                np.zeros((nrof_steps, ), np.float32),
                'accuracy':
                np.zeros((nrof_steps, ), np.float32),
                'val_loss':
                np.zeros((nrof_val_samples, ), np.float32),
                'val_xent_loss':
                np.zeros((nrof_val_samples, ), np.float32),
                'val_accuracy':
                np.zeros((nrof_val_samples, ), np.float32),
                'lfw_accuracy':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'lfw_valrate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'learning_rate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'time_train':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'time_validate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'time_evaluate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'prelogits_hist':
                np.zeros((args.max_nrof_epochs, 1000), np.float32),
            }
            for epoch in range(1, args.max_nrof_epochs + 1):
                step = sess.run(global_step, feed_dict=None)
                # Train for one epoch
                t = time.time()
                cont = train(
                    args, sess, epoch, image_list, label_list,
                    index_dequeue_op, enqueue_op, image_paths_placeholder,
                    labels_placeholder, learning_rate_placeholder,
                    phase_train_placeholder, batch_size_placeholder,
                    control_placeholder, global_step, total_loss, train_op,
                    summary_op, summary_writer, regularization_losses,
                    args.learning_rate_schedule_file, stat, cross_entropy_mean,
                    accuracy, learning_rate, prelogits, prelogits_center_loss,
                    args.random_rotate, args.random_crop, args.random_flip,
                    prelogits_norm, args.prelogits_hist_max,
                    args.use_fixed_image_standardization)
                stat['time_train'][epoch - 1] = time.time() - t

                if not cont:
                    break

                t = time.time()
                if len(val_image_list) > 0 and (
                    (epoch - 1) % args.validate_every_n_epochs
                        == args.validate_every_n_epochs - 1
                        or epoch == args.max_nrof_epochs):
                    validate(args, sess, epoch, val_image_list, val_label_list,
                             enqueue_op, image_paths_placeholder,
                             labels_placeholder, control_placeholder,
                             phase_train_placeholder, batch_size_placeholder,
                             stat, total_loss, regularization_losses,
                             cross_entropy_mean, accuracy,
                             args.validate_every_n_epochs,
                             args.use_fixed_image_standardization)
                stat['time_validate'][epoch - 1] = time.time() - t

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, epoch)

                # Evaluate on LFW
                t = time.time()
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder,
                             labels_placeholder, phase_train_placeholder,
                             batch_size_placeholder, control_placeholder,
                             embeddings, label_batch, lfw_paths, actual_issame,
                             args.lfw_batch_size, args.lfw_nrof_folds, log_dir,
                             step, summary_writer, stat, epoch,
                             args.lfw_distance_metric, args.lfw_subtract_mean,
                             args.lfw_use_flipped_images,
                             args.use_fixed_image_standardization)
                stat['time_evaluate'][epoch - 1] = time.time() - t

                print('Saving statistics')
                with h5py.File(stat_file_name, 'w') as f:
                    for key, value in stat.items():
                        f.create_dataset(key, data=value)

    return model_dir
示例#17
0
    def do_POST(self):
        #stamp = time.time()
        if self.headers['content-type'] == 'application/json':
            global images_placeholder
            global embeddings
            global phase_train_placeholder
            global embedding_size
            global svm_model
            global class_names
            global schools

            global model
            length = int(self.headers['content-length'])
            request = json.loads(self.rfile.read(length))
            if request['request'] == 'Compare':
                request['face'][0] = BytesIO(
                    base64.b64decode(request['face'][0]))
                request['face'][1] = BytesIO(
                    base64.b64decode(request['face'][1]))
                print('!!!!!!:', request['face'][0], request['face'][1])
                img1 = misc.imread(request['face'][0], mode='RGB')
                img2 = misc.imread(request['face'][1], mode='RGB')

                img1, box1, _ = cut_face(img1)
                img2, box2, _ = cut_face(img2)
                if len(img1) > 1 or len(img2) > 1:
                    self.send_response(400)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response':
                            'error',
                            'description':
                            'There are more than one face in an image'
                        }).encode())

                img1 = img1[0]
                img2 = img2[0]
                f1 = model.get_feature(img1)
                f2 = model.get_feature(img2)
                dist = np.sum(np.square(f1 - f2))
                #                sim = np.dot(f1, f2.T)
                #                print('dist,sim',dist,sim)
                sim = get_sim(dist)

                self.send_response(200)
                self.send_header('Content-type', 'application/json')
                self.end_headers()
                self.wfile.write(
                    json.dumps({
                        'response': 'ok',
                        'similarity': str(sim),
                        'description': ''
                    }).encode())

            elif request['request'] == 'AddFace':
                pic = BytesIO(base64.b64decode(request['face']))
                img = misc.imread(pic, mode='RGB')
                images, box, bgr = cut_face(img)
                if len(images) == 1:
                    person_path = os.path.join(args.images_dir,
                                               request['SchoolId'],
                                               request['PersonId'])
                    if not os.path.exists(person_path):
                        os.makedirs(person_path)
                    files = os.listdir(person_path)
                    pic_name = request['PersonId'] + str(len(files) +
                                                         1) + '.png'
                    pic_path = os.path.join(person_path, pic_name)
                    cv2.imwrite(pic_path, bgr[0])
                    self.send_response(200)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'ok',
                            'FileId': os.path.basename(pic_path),
                            'description': ''
                        }).encode())
                elif len(images) == 0:
                    self.send_response(400)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'error',
                            'FileId': '',
                            'description': 'no face detected'
                        }).encode())
                    return
                else:
                    self.send_response(400)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'error',
                            'FileId': '',
                            'description': 'more than one face detected'
                        }).encode())
                    return

            elif request['request'] == 'DelFace':
                response = 'ok'
                school_path = os.path.join(args.images_dir,
                                           request['SchoolId'])
                if os.path.isdir(school_path):
                    for person in request['PersonId']:
                        person_path = os.path.join(school_path, person[0])
                        if os.path.isdir(person_path):
                            if person[1] == '*':
                                for file_name in os.listdir(person_path):
                                    os.remove(
                                        os.path.join(person_path, file_name))
                                os.rmdir(person_path)
                            else:
                                for i in range(1, len(person)):
                                    file_path = os.path.join(
                                        person_path, person[i])
                                    if os.path.isfile(file_path):
                                        os.remove(file_path)
                                    else:
                                        response = 'error'
                        else:
                            response = 'error'
                else:
                    response = 'error'
                if response == 'ok':
                    self.send_response(200)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': response,
                            'description': ''
                        }).encode())
                else:
                    self.send_response(400)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'error',
                            'description': 'some or all deletion failed'
                        }).encode())

            elif request['request'] == 'GetFaceList':
                if request['SchoolId'] == '*':
                    self.send_response(200)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'ok',
                            'list': os.listdir(args.images_dir),
                            'description': ''
                        }).encode())
                elif request['PersonId'] == '*':
                    path = os.path.join(args.images_dir, request['SchoolId'])
                    self.send_response(200)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'ok',
                            'list': os.listdir(path),
                            'description': ''
                        }).encode())
                elif request['FileId'] == '*':
                    path = os.path.join(args.images_dir, request['SchoolId'],
                                        request['PersonId'])
                    self.send_response(200)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'ok',
                            'list': os.listdir(path),
                            'description': ''
                        }).encode())
                else:
                    self.send_response(400)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'error',
                            'list': {},
                            'description': 'no such file'
                        }).encode())

            elif request['request'] == 'GetFaceImage':
                path = os.path.join(args.images_dir, request['SchoolId'],
                                    request['PersonId'], request['FileId'])
                if os.path.isfile(path):
                    with open(path, 'rb') as f:
                        image = str(base64.b64encode(f.read()))
                    self.send_response(200)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'ok',
                            'face': image,
                            'description': ''
                        }).encode())
                else:
                    self.send_response(400)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'error',
                            'face': '',
                            'description': 'no such file'
                        }).encode())

            elif request['request'] == 'Train':
                path = os.path.join(args.images_dir, request['SchoolId'])
                if os.path.isdir(path):
                    dataset = facenet.get_dataset(path)
                    if len(dataset) < 2:
                        self.send_response(400)
                        self.send_header('Content-type', 'application/json')
                        self.end_headers()
                        self.wfile.write(
                            json.dumps({
                                'response':
                                'error',
                                'description':
                                'number of person less than 2'
                            }).encode())
                        return
                    for cls in dataset:
                        assert len(
                            cls.image_paths
                        ) > 0  #'There must be at least one image for each class in the dataset'
                    paths, labels = facenet.get_image_paths_and_labels(dataset)
                    print('Number of classes: %d' % len(dataset))
                    print('Number of images: %d' % len(paths))
                    print('Calculating features for images')
                    emb_array = []
                    num_of_images = len(paths)
                    ind = 0
                    for path in paths:
                        #                        print("path of img " , path)
                        img_tem = misc.imread(path)

                        img, box, _ = cut_face(img_tem)
                        #                        print("path of img " , len(img))
                        if len(img) < 1 or len(img) > 1:
                            lbl = labels.pop(ind)
                            continue
                        else:
                            ind += 1

                        img = img[0]
                        img_emb = model.get_feature(img)

                        emb_array.append(img_emb)


#                        print('lenth of emb_array' , len(emb_array))
#                    print('lenth of emb_array' , len(emb_array) , '   length of labels' , len(labels))
                    emb_arrar = np.array(emb_array)

                    print('Training classifier')
                    #                    new_svm_model = SVC(kernel='linear', probability=False)
                    #                    print(cpu_count())
                    SGDmodel = SGDClassifier(loss="log",
                                             max_iter=100,
                                             penalty="l2",
                                             n_jobs=cpu_count() - 1,
                                             shuffle=True)
                    SGDmodel.fit(emb_array, labels)
                    new_emb = []
                    lastidx = -1
                    # Create a list of class names
                    new_class_names = [cls.name for cls in dataset]
                    for i in range(len(labels)):
                        if labels[i] == lastidx:
                            new_emb[lastidx].append(emb_array[i])
                        else:
                            new_emb.append([emb_array[i]])
                            lastidx = labels[i]
                    #print(emb_array[0:5] , new_emb[0:5])
                    svm_model_path = '{}/{}.pkl'.format(
                        args.classifier_dir, request['SchoolId'])
                    # Saving classifier model
                    with open(svm_model_path, 'wb') as outfile:
                        pickle.dump((SGDmodel, new_class_names, new_emb),
                                    outfile,
                                    protocol=2)
                    print('Saved classifier to file ', svm_model_path)

                    schools[request['SchoolId']] = school()
                    schools[request['SchoolId']].svm_model = SGDmodel
                    schools[request['SchoolId']].class_names = new_class_names
                    schools[request['SchoolId']].emb = new_emb

                    self.send_response(200)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'ok',
                            'description': ''
                        }).encode())
                else:
                    self.send_response(400)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'error',
                            'description': 'no such school'
                        }).encode())

            elif request['request'] == 'Predict':
                if request['SchoolId'] in schools.keys():
                    request['face'] = BytesIO(base64.b64decode(
                        request['face']))
                    img1 = misc.imread(request['face'], mode='RGB')
                    images, boxes, _ = cut_face(img1)
                    if len(boxes) == 0:
                        self.send_response(400)
                        self.send_header('Content-type', 'application/json')
                        self.end_headers()
                        self.wfile.write(
                            json.dumps({
                                'response': 'error',
                                'predict': [],
                                'boxes': [],
                                'description': 'no face detected'
                            }).encode())
                        return

                    emb = []
                    for image in images:
                        emb.append(model.get_feature(image))
                    emb = np.array(emb)

                    predict = []

                    path = os.path.join(args.images_dir, request['SchoolId'])
                    num_of_people = len(os.listdir(path))

                    predictions = schools[
                        request['SchoolId']].svm_model.predict_proba(emb)
                    predict_index = np.argmax(predictions, axis=1)
                    best_class_indices = []
                    best_class_probabilities = []
                    count = min(num_of_people, 1)
                    for i in range(count):
                        tmp_best_class_indices = np.argmax(predictions, axis=1)
                        tmp_best_class_probabilities = predictions[
                            np.arange(len(tmp_best_class_indices)),
                            tmp_best_class_indices]
                        best_class_indices.append(tmp_best_class_indices)
                        best_class_probabilities.append(
                            tmp_best_class_probabilities)
                        predictions[np.arange(len(tmp_best_class_indices)),
                                    tmp_best_class_indices] = 0

                    for i in range(len(best_class_indices[0])):
                        print('%4d' % i)
                        for j in range(len(best_class_indices)):
                            print('j:', j)
                            print('%s: %.3f' %
                                  (schools[request['SchoolId']].class_names[
                                      best_class_indices[j][i]],
                                   best_class_probabilities[j][i]))

                    print('best_class_indices[0]:', best_class_indices[0])
                    similarity = []
                    for i in range(len(best_class_indices)):
                        tmp = best_class_indices[i]
                        tmp_sim = []
                        for j in range(len(tmp)):
                            dist = 0
                            for k in range(
                                    len(schools[request['SchoolId']].emb[
                                        tmp[j]])):
                                dist += np.sum(
                                    np.square(
                                        np.subtract(
                                            emb[j],
                                            schools[request['SchoolId']].emb[
                                                tmp[j]][k])))
                            dist /= len(
                                schools[request['SchoolId']].emb[tmp[j]])
                            tmp_sim.append(get_sim(dist))
                        similarity.append(tmp_sim)

                    print('best_class_indices[0]:', best_class_indices[0])
                    for i in range(len(predict_index)):
                        dist = 0
                        for j in range(
                                len(schools[request['SchoolId']].emb[
                                    predict_index[i]])):
                            dist += np.sum(
                                np.square(
                                    np.subtract(
                                        emb[i],
                                        schools[request['SchoolId']].emb[
                                            predict_index[i]][j])))
                        dist /= len(
                            schools[request['SchoolId']].emb[predict_index[i]])

                        if dist > args.same_person_threshold:
                            predict.append('stranger')

                        else:
                            #tmp = []
                            for k in range(len(best_class_indices)):
                                #                                print("?????\n\n\n")
                                #                                print(len(best_class_indices))
                                predict.append([
                                    schools[request['SchoolId']].class_names[
                                        best_class_indices[k][i]],
                                    similarity[k][i]
                                ])
                            #predict.append(tmp)

                    print('predict', predict)

                    self.send_response(200)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'ok',
                            'predict': predict,
                            'boxes': boxes,
                            'description': ''
                        }).encode())
                else:
                    self.send_response(400)
                    self.send_header('Content-type', 'application/json')
                    self.end_headers()
                    self.wfile.write(
                        json.dumps({
                            'response': 'error',
                            'predict': [],
                            'boxes': [],
                            'description': 'no such school'
                        }).encode())
        else:
            self.send_error(404, 'Not a correct json: %s' % self.path)
def main(args):

    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    # src_path,_ = os.path.split(os.path.realpath(__file__))
    # facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)

    # Fetch dataets
    # VGGface2
    print('Fetch VGGface2 as source dataset at {}'.format(
        args.vggface2_train_dir))
    src_train_set = facenet.get_dataset(args.vggface2_train_dir)
    # validation_set = facenet.get_dataset(args.vggface2_val_dir)

    # COX S2V
    print('Fetch COX-S2V as target dataset at {}'.format(args.cox_video_dir))

    cox_dataset = cox.cox_data(args.cox_still_dir, args.cox_video_dir,
                               args.cox_pairs)

    train_folds = [0, 1, 2]
    evaluation_folds = [3, 4, 5, 6, 7, 8, 9]

    # cox_train_list = cox_dataset.get_dataset(train_folds)
    # cox_val_list = list(itertools.chain.from_iterable(fold_list[3:10]))
    tgt_train_set = cox_dataset.get_dataset(train_folds, video_only=True)
    # tgt_val_set = cox.get_video_dataset(args.cox_video_dir, cox_val_list)

    cox_paths, cox_issame = cox_dataset.get_pairs(evaluation_folds)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:
        print('Pre-trained model: %s' %
              os.path.expanduser(args.pretrained_model))

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs)
        # Get the paths for embeddings projection

    # Get the paths for embeddings projection

    lfw_proj_paths, lfw_proj_labels = lfw.get_paths_from_file(
        args.lfw_dir, args.lfw_projection)

    cox_proj_paths, cox_proj_labels = cox_dataset.get_paths_from_file(
        args.cox_projection)

    # Combine projection paths
    projection_paths = lfw_proj_paths + cox_proj_paths
    proj_labels = lfw_proj_labels + cox_proj_labels

    # Create label map if does not exist
    metadata_filename = 'meta.tsv'
    emb_dir = os.path.join(os.path.expanduser(log_dir), 'emb')
    if not os.path.isdir(
            emb_dir):  # Create the log directory if it doesn't exist
        os.makedirs(emb_dir)
    with open(os.path.join(emb_dir, metadata_filename), "w") as meta_file:
        csvWriter = csv.writer(meta_file, delimiter='\t')
        csvWriter.writerows(np.array([proj_labels]).T)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 4),
                                                 name='image_paths')
        labels_placeholder = tf.placeholder(tf.int64,
                                            shape=(None, 4),
                                            name='labels')

        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                              dtypes=[tf.string, tf.int64],
                                              shapes=[(4, ), (4, )],
                                              shared_name=None,
                                              name=None)
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder])

        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents, channels=3)

                if args.random_crop:
                    image = tf.random_crop(
                        image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(
                        image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)

                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])

        image_batch, labels_batch = tf.train.batch_join(
            images_and_labels,
            batch_size=batch_size_placeholder,
            shapes=[(args.image_size, args.image_size, 3), ()],
            enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        labels_batch = tf.identity(labels_batch, 'label_batch')

        # Build the inference graph
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
        # Split embeddings into anchor, positive and negative and calculate triplet loss
        anchor, positive, negative, target = tf.unstack(
            tf.reshape(embeddings, [-1, 4, args.embedding_size]), 4, 1)
        loss, adv_loss, triplet_loss = adversarialloss.quadruplets_loss(
            anchor, positive, negative, target, args.alpha, args.lamb,
            args.zeta)
        # triplet_loss = tripletloss.triplet_loss(anchor, positive, negative, args.alpha)
        # adv_loss = adversarialloss.adversarial_loss(anchor, target, args.alpha)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([loss] + regularization_losses,
                              name='total_loss')
        reg_loss = tf.add_n(regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables())

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        # summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        # Initialize variables
        sess.run(tf.global_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})
        sess.run(tf.local_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})

        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                print('Restoring pretrained model: %s' % args.pretrained_model)
                saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size

                # save_embeddings(sess, projection_paths, epoch, embeddings, labels_batch, image_paths_placeholder,
                #                 labels_placeholder,
                #                 batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op,
                #                 args.batch_size, emb_dir, args.embedding_size, tag='lfw')
                #
                # # Evaluate on COX
                # evaluate(sess, cox_paths, embeddings, labels_batch, image_paths_placeholder,
                #          labels_placeholder,
                #          batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder,
                #          enqueue_op,
                #          cox_issame, args.batch_size,
                #          args.lfw_nrof_folds, log_dir, step, summary_writer, args.embedding_size,
                #          tag='cox')
                #
                # if args.lfw_dir:
                #     evaluate(sess, lfw_paths, embeddings, labels_batch, image_paths_placeholder, labels_placeholder,
                #             batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, actual_issame, args.batch_size,
                #             args.lfw_nrof_folds, log_dir, step, summary_writer, args.embedding_size)

                # Train for one epoch
                train(args, sess, src_train_set, tgt_train_set, epoch,
                      image_paths_placeholder, labels_placeholder,
                      labels_batch, batch_size_placeholder,
                      learning_rate_placeholder, phase_train_placeholder,
                      enqueue_op, input_queue, global_step, embeddings,
                      total_loss, triplet_loss, adv_loss, reg_loss, train_op,
                      summary_writer, args.learning_rate_schedule_file,
                      args.embedding_size)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW

    return model_dir
示例#19
0
def main(args):
    train_set = facenet.get_dataset(args.data_dir)
    image_list, label_list = facenet.get_image_paths_and_labels(train_set)
    # fetch the classes (labels as strings) exactly as it's done in get_dataset
    path_exp = os.path.expanduser(args.data_dir)
    classes = [path for path in os.listdir(path_exp) \
               if os.path.isdir(os.path.join(path_exp, path))]
    classes.sort()
    # get the label strings
    label_strings = [name for name in classes if \
       os.path.isdir(os.path.join(path_exp, name))]

    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Load the model
            facenet.load_model(args.model_dir)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            nrof_images = len(image_list)
            print('Number of images: ', nrof_images)
            batch_size = args.image_batch
            if nrof_images % batch_size == 0:
                nrof_batches = nrof_images // batch_size
            else:
                nrof_batches = (nrof_images // batch_size) + 1
            print('Number of batches: ', nrof_batches)
            embedding_size = embeddings.get_shape()[1]
            emb_array = np.zeros((nrof_images, embedding_size))
            start_time = time.time()

            for i in range(nrof_batches):
                if i == nrof_batches - 1:
                    n = nrof_images
                else:
                    n = i * batch_size + batch_size
                # Get images for the batch
                if args.is_aligned is True:
                    images = facenet.load_data(image_list[i * batch_size:n],
                                               False, False, args.image_size)
                else:
                    images = load_and_align_data(image_list[i * batch_size:n],
                                                 args.image_size, args.margin,
                                                 args.gpu_memory_fraction)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                # Use the facenet model to calcualte embeddings
                embed = sess.run(embeddings, feed_dict=feed_dict)
                emb_array[i * batch_size:n, :] = embed
                print('Completed batch', i + 1, 'of', nrof_batches)

            run_time = time.time() - start_time
            print('Run time: ', run_time)

            #   export emedings and labels
            label_list = np.array(label_list)

            np.save(args.embeddings_name, emb_array)
            np.save(args.labels_name, label_list)
            label_strings = np.array(label_strings)
            np.save(args.labels_strings_name, label_strings[label_list])