Ejemplo n.º 1
0
def main(args):

    train_set = facenet.get_dataset(args.dataset_dir)
    train_set = train_set[0:10]

    with tf.Graph().as_default():

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        nrof_images = len(image_list)
        image_indices = range(nrof_images)

        image_batch, label_batch = facenet.read_and_augument_data(
            image_list,
            image_indices,
            args.image_size,
            args.batch_size,
            None,
            False,
            False,
            False,
            nrof_preprocess_threads=4,
            shuffle=False)
        with tf.Session() as sess:
            tf.train.start_queue_runners(sess=sess)
            print('Loading graphdef: %s' % args.model_file)
            with gfile.FastGFile(os.path.expanduser(args.model_file),
                                 'rb') as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())
                embeddings = tf.import_graph_def(
                    graph_def,
                    input_map={'input': image_batch},
                    return_elements=['embeddings:0'],
                    name='import')[0]

            embedding_size = int(embeddings.get_shape()[1])
            nrof_batches = nrof_images // args.batch_size
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches):
                t = time.time()
                emb, lab = sess.run([embeddings, label_batch])
                emb_array[lab, :] = emb
                print('Batch %d in %.3f seconds' % (i, time.time() - t))

            mdict = {
                'image_list': image_list,
                'label_list': label_list,
                'embeddings': emb_array
            }
            sio.savemat(args.mat_file_name, mdict)
Ejemplo n.º 2
0
def main(args):

    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augument_data(
            image_list, label_list, args.image_size, args.batch_size,
            args.max_nrof_epochs, args.random_crop, args.random_flip,
            args.nrof_preprocess_threads)
        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))

        print('Building training graph')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        # Build the inference graph
        prelogits, _ = network.inference(image_batch,
                                         args.keep_probability,
                                         phase_train=True,
                                         weight_decay=args.weight_decay)
        logits = slim.fully_connected(
            prelogits,
            len(train_set),
            activation_fn=None,
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            scope='Logits',
            reuse=False)

        # Add DeCov regularization loss
        if args.decov_loss_factor > 0.0:
            logits_decov_loss = facenet.decov_loss(
                logits) * args.decov_loss_factor
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                                 logits_decov_loss)

        # Add center loss
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, _ = facenet.center_loss(
                prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES,
                prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits, label_batch, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.all_variables(), args.log_histograms)

        # Evaluation
        print('Building evaluation graph')
        lfw_label_list = range(0, len(lfw_paths))
        assert (
            len(lfw_paths) % args.lfw_batch_size == 0
        ), "The number of images in the LFW test set need to be divisible by the lfw_batch_size"
        eval_image_batch, eval_label_batch = facenet.read_and_augument_data(
            lfw_paths,
            lfw_label_list,
            args.image_size,
            args.lfw_batch_size,
            None,
            False,
            False,
            args.nrof_preprocess_threads,
            shuffle=False)
        # Node for input images
        eval_image_batch.set_shape((None, args.image_size, args.image_size, 3))
        eval_image_batch = tf.identity(eval_image_batch, name='input')
        eval_prelogits, _ = network.inference(eval_image_batch,
                                              1.0,
                                              phase_train=False,
                                              weight_decay=0.0,
                                              reuse=True)
        eval_embeddings = tf.nn.l2_normalize(eval_prelogits,
                                             1,
                                             1e-10,
                                             name='embeddings')

        # Create a saver
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())
        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, learning_rate_placeholder,
                      global_step, total_loss, train_op, summary_op,
                      summary_writer, regularization_losses,
                      args.learning_rate_schedule_file)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, eval_embeddings, eval_label_batch,
                             actual_issame, args.lfw_batch_size, args.seed,
                             args.lfw_nrof_folds, log_dir, step,
                             summary_writer)

    return model_dir
Ejemplo n.º 3
0
def main(args):

    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augument_data(
            image_list, label_list, args.image_size, args.batch_size,
            args.max_nrof_epochs, args.random_crop, args.random_flip,
            args.nrof_preprocess_threads)
        print('Total number of classes: %d' % len(train_set))
        print('Total number of examples: %d' % len(image_list))

        # Node for input images
        image_batch.set_shape((None, args.image_size, args.image_size, 3))
        image_batch = tf.identity(image_batch, name='input')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Build the inference graph
        prelogits, _ = network.inference(image_batch,
                                         args.keep_probability,
                                         phase_train=phase_train_placeholder,
                                         weight_decay=args.weight_decay)
        with tf.variable_scope('Logits'):
            n = int(prelogits.get_shape()[1])
            m = len(train_set)
            w = tf.get_variable(
                'w',
                shape=[n, m],
                dtype=tf.float32,
                initializer=tf.truncated_normal_initializer(stddev=0.1),
                regularizer=slim.l2_regularizer(args.weight_decay),
                trainable=True)
            b = tf.get_variable('b', [m], initializer=None, trainable=True)
            logits = tf.matmul(prelogits, w) + b

        # Add DeCov regularization loss
        if args.decov_loss_factor > 0.0:
            logits_decov_loss = facenet.decov_loss(
                logits) * args.decov_loss_factor
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                                 logits_decov_loss)

        # Add center loss
        update_centers = tf.no_op('update_centers')
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, update_centers = facenet.center_loss(
                prelogits, label_batch, args.center_loss_alfa)
            tf.add_to_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES,
                prelogits_center_loss * args.center_loss_factor)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits, label_batch, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.all_variables(), args.log_histograms)

        # Create a saver
        save_variables = list(set(tf.all_variables()) - set([w]) - set([b]))
        saver = tf.train.Saver(save_variables, max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())
        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if pretrained_model:
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, phase_train_placeholder,
                      learning_rate_placeholder, global_step, total_loss,
                      train_op, summary_op, summary_writer,
                      regularization_losses, args.learning_rate_schedule_file,
                      update_centers)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    start_time = time.time()
                    _, _, accuracy, val, val_std, far = lfw.validate(
                        sess,
                        lfw_paths,
                        actual_issame,
                        args.seed,
                        args.batch_size,
                        image_batch,
                        phase_train_placeholder,
                        embeddings,
                        nrof_folds=args.lfw_nrof_folds)
                    print('Accuracy: %1.3f+-%1.3f' %
                          (np.mean(accuracy), np.std(accuracy)))
                    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %
                          (val, val_std, far))
                    lfw_time = time.time() - start_time
                    # Add validation loss and accuracy to summary
                    summary = tf.Summary()
                    #pylint: disable=maybe-no-member
                    summary.value.add(tag='lfw/accuracy',
                                      simple_value=np.mean(accuracy))
                    summary.value.add(tag='lfw/val_rate', simple_value=val)
                    summary.value.add(tag='time/lfw', simple_value=lfw_time)
                    summary_writer.add_summary(summary, step)
                    with open(os.path.join(log_dir, 'lfw_result.txt'),
                              'at') as f:
                        f.write('%d\t%.5f\t%.5f\n' %
                                (step, np.mean(accuracy), val))

    return model_dir
Ejemplo n.º 4
0
def main(args):
    network = importlib.import_module(args.model_def, 'inference')
  
    train_set = facenet.get_dataset(args.dataset_dir)
  
    with tf.Graph().as_default():
      
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        nrof_images = len(image_list)
        image_indices = range(nrof_images)

        image_batch, label_batch = facenet.read_and_augument_data(image_list, image_indices, args.image_size, args.batch_size, None, 
            False, False, False, nrof_preprocess_threads=4, shuffle=False)
        prelogits, _ = network.inference(image_batch, 1.0, 
            phase_train=False, weight_decay=0.0, reuse=False)
        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
        saver = tf.train.Saver(tf.global_variables())
        
        with tf.Session() as sess:
            saver.restore(sess, os.path.join(os.path.expanduser(args.model_file)))
            tf.train.start_queue_runners(sess=sess)
                
            embedding_size = int(embeddings.get_shape()[1])
            nrof_batches = int(math.ceil(nrof_images / args.batch_size))
            nrof_classes = len(train_set)
            label_array = np.array(label_list)
            class_names = [cls.name for cls in train_set]
            nrof_examples_per_class = [ len(cls.image_paths) for cls in train_set ]
            class_variance = np.zeros((nrof_classes,))
            class_center = np.zeros((nrof_classes,embedding_size))
            distance_to_center = np.ones((len(label_list),))*np.NaN
            emb_array = np.zeros((0,embedding_size))
            idx_array = np.zeros((0,), dtype=np.int32)
            lab_array = np.zeros((0,), dtype=np.int32)
            index_arr = np.append(0, np.cumsum(nrof_examples_per_class))
            for i in range(nrof_batches):
                t = time.time()
                emb, idx = sess.run([embeddings, label_batch])
                emb_array = np.append(emb_array, emb, axis=0)
                idx_array = np.append(idx_array, idx, axis=0)
                lab_array = np.append(lab_array, label_array[idx], axis=0)
                for cls in set(lab_array):
                    cls_idx = np.where(lab_array==cls)[0]
                    if cls_idx.shape[0]==nrof_examples_per_class[cls]:
                        # We have calculated all the embeddings for this class
                        i2 = np.argsort(idx_array[cls_idx])
                        emb_class = emb_array[cls_idx,:]
                        emb_sort = emb_class[i2,:]
                        center = np.mean(emb_sort, axis=0)
                        diffs = emb_sort - center
                        dists_sqr = np.sum(np.square(diffs), axis=1)
                        class_variance[cls] = np.mean(dists_sqr)
                        class_center[cls,:] = center
                        distance_to_center[index_arr[cls]:index_arr[cls+1]] = np.sqrt(dists_sqr)
                        emb_array = np.delete(emb_array, cls_idx, axis=0)
                        idx_array = np.delete(idx_array, cls_idx, axis=0)
                        lab_array = np.delete(lab_array, cls_idx, axis=0)

                        
                print('Batch %d in %.3f seconds' % (i, time.time()-t))
                
            print('Writing filtering data to %s' % args.data_file_name)
            mdict = {'class_names':class_names, 'image_list':image_list, 'label_list':label_list, 'distance_to_center':distance_to_center }
            with h5py.File(args.data_file_name, 'w') as f:
                for key, value in mdict.iteritems():
                    f.create_dataset(key, data=value)
Ejemplo n.º 5
0
def main(args):

    #network = importlib.import_module(args.model_def, 'inception_v3')
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')

    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)

    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    with open(os.path.join(model_dir, 'args.txt'), 'w') as f:
        for arg in vars(args):
            f.write(arg + ' ' + str(getattr(args, arg)) + '\n')

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    #train_set = facenet.get_dataset(args.data_dir)
    train_set = facenet.get_dataset_with_enhanced(args.data_dir)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augument_data(
            image_list, label_list, args.image_size, args.batch_size,
            args.max_nrof_epochs, args.random_rotate, args.random_crop,
            args.random_flip, args.nrof_preprocess_threads, args.padding_size,
            args.patch_type)
        #print('Total number of classes: %d' % len(train_set))
        print('Total number of examples: %d' % len(image_list))

        # Node for input images
        image_batch = tf.identity(image_batch, name='input')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Placeholder for keep probability
        keep_probability_placeholder = tf.placeholder(tf.float32,
                                                      name='keep_prob')

        # Build the inference graph
        # prelogits = network.inference(image_batch, keep_probability_placeholder,
        #    phase_train=phase_train_placeholder, weight_decay=args.weight_decay)
        batch_norm_params = {
            # Decay for the moving averages
            'decay': 0.995,
            # epsilon to prevent 0s in variance
            'epsilon': 0.001,
            # force in-place updates of mean and variance estimates
            'updates_collections': None,
            # Moving averages ends up in the trainable variables collection
            'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
            # Only update statistics during training mode
            'is_training': phase_train_placeholder
        }

        #prelogits, _ = network.inception_v3(image_batch, num_classes=len(train_set),is_training=True)
        prelogits, _ = network.inference(image_batch,
                                         args.keep_probability,
                                         phase_train=phase_train_placeholder,
                                         weight_decay=args.weight_decay)
        #prelogits = tf.identity(prelogits, name="prelogits")
        bottleneck = _fully_connected(prelogits,
                                      args.embedding_size,
                                      name='pre_embedding')
        #bottleneck = tf.nn.l2_normalize(bottleneck, dim=1,name='embedding')
        logits = _fully_connected_classifier(bottleneck,
                                             len(train_set),
                                             name='logits')
        """
        bottleneck = slim.fully_connected(prelogits, args.embedding_size, activation_fn=None, 
                weights_initializer=tf.truncated_normal_initializer(stddev=0.1), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params,
                scope='Bottleneck', reuse=False)

        logits = slim.fully_connected(bottleneck, len(train_set), activation_fn=None,
                weights_initializer=tf.truncated_normal_initializer(stddev=0.1), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                scope='Logits', reuse=False)

        logits = tf.identity(logits, name="logits")
        """
        # Add DeCov regularization loss
        if args.decov_loss_factor > 0.0:
            logits_decov_loss = facenet.decov_loss(
                logits) * args.decov_loss_factor
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                                 logits_decov_loss)

        # Add center loss
        update_centers = tf.no_op('update_centers')
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, update_centers = facenet.center_loss(
                bottleneck, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES,
                prelogits_center_loss * args.center_loss_factor)

        #embeddings = tf.nn.l2_normalize(bottleneck, 1, 1e-10, name='embeddings')

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch,
            logits=logits,
            name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        """
        # Multi-label loss: sigmoid loss
        sigmoid_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=label_batch, logits=logits, name='sigmoid_loss_per_example')
        sigmoid_loss_mean = tf.reduce_mean(sigmoid_loss, name='sigmoid_loss')
        """
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')
        #total_loss = tf.add_n([cross_entropy_mean], name='total_loss')

        # prediction
        prediction = tf.argmax(logits, axis=1, name='prediction')
        acc = slim.metrics.accuracy(predictions=tf.cast(prediction,
                                                        dtype=tf.int32),
                                    labels=tf.cast(label_batch,
                                                   dtype=tf.int32))

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables())

        # Create a saver
        # save_variables = list(set(tf.all_variables())-set([w])-set([b]))
        save_variables = tf.trainable_variables()
        saver = tf.train.Saver(save_variables, max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if pretrained_model:
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, phase_train_placeholder,
                      learning_rate_placeholder, keep_probability_placeholder,
                      global_step, total_loss, acc, train_op, summary_op,
                      summary_writer, regularization_losses,
                      args.learning_rate_schedule_file, update_centers)

                # Evaluate on LFW
                if args.lfw_dir:
                    start_time = time.time()
                    _, _, accuracy, val, val_std, far = lfw.validate(
                        sess,
                        lfw_paths,
                        actual_issame,
                        args.seed,
                        args.batch_size,
                        image_batch,
                        phase_train_placeholder,
                        keep_probability_placeholder,
                        embeddings,
                        nrof_folds=args.lfw_nrof_folds)
                    print('Accuracy: %1.3f+-%1.3f' %
                          (np.mean(accuracy), np.std(accuracy)))
                    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %
                          (val, val_std, far))
                    lfw_time = time.time() - start_time
                    # Add validation loss and accuracy to summary
                    summary = tf.Summary()
                    #pylint: disable=maybe-no-member
                    summary.value.add(tag='lfw/accuracy',
                                      simple_value=np.mean(accuracy))
                    summary.value.add(tag='lfw/val_rate', simple_value=val)
                    summary.value.add(tag='time/lfw', simple_value=lfw_time)
                    summary_writer.add_summary(summary, step)
                    with open(os.path.join(log_dir, 'lfw_result.txt'),
                              'at') as f:
                        f.write('%d\t%.5f\t%.5f\n' %
                                (step, np.mean(accuracy), val))

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

    return model_dir
def main(args):
  
    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augument_data(image_list, label_list, args.image_size,
            args.batch_size, args.max_nrof_epochs, args.random_crop, args.random_flip, args.random_rotate, 
            args.nrof_preprocess_threads)
        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))
        
        print('Building training graph')
        
        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
        
        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=True, weight_decay=args.weight_decay)
        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None, 
                weights_initializer=tf.truncated_normal_initializer(stddev=0.1), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                scope='Logits', reuse=False)

        # Add DeCov regularization loss
        if args.decov_loss_factor>0.0:
            logits_decov_loss = facenet.decov_loss(logits) * args.decov_loss_factor
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, logits_decov_loss)
            
        # Add center loss
        if args.center_loss_factor>0.0:
            prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits, label_batch, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)
        
        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
        
        # Evaluation
        print('Building evaluation graph')
        lfw_label_list = range(0,len(lfw_paths))
        assert (len(lfw_paths) % args.lfw_batch_size == 0), "The number of images in the LFW test set need to be divisible by the lfw_batch_size"
        eval_image_batch, eval_label_batch = facenet.read_and_augument_data(lfw_paths, lfw_label_list, args.image_size,
            args.lfw_batch_size, None, False, False, False, args.nrof_preprocess_threads, shuffle=False)
        # Node for input images
        eval_image_batch.set_shape((None, args.image_size, args.image_size, 3))
        eval_image_batch = tf.identity(eval_image_batch, name='input')
        eval_prelogits, _ = network.inference(eval_image_batch, 1.0, 
            phase_train=False, weight_decay=0.0, reuse=True)
        eval_embeddings = tf.nn.l2_normalize(eval_prelogits, 1, 1e-10, name='embeddings')

        # Create a saver
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, learning_rate_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, eval_embeddings, eval_label_batch, actual_issame, args.lfw_batch_size, args.seed, 
                        args.lfw_nrof_folds, log_dir, step, summary_writer)
                
    return model_dir
Ejemplo n.º 7
0
def main(argv=None):

    print("Debug")

    config = SafeConfigParser()
    config.read(cmd_args.config_path)

    with tf.Graph().as_default() as g:

        global_step = tf.Variable(0, trainable=False)

        #saver = tf.train.Saver(tf.global_variable())

        print("Now it is the time to load from the downloaded one")

        #############################
        image_size = config.getint("main", "image_size")
        batch_size = config.getint("main", "batch_size")
        # batch means the subset of the learning data.
        max_nrof_epochs = config.getint("main", "max_nrof_epochs")
        random_crop = True
        random_flip = False
        nrof_preprocess_threads = config.getint("main",
                                                "nrof_preprocess_threads")
        ############################

        # Get the directory
        train_set = facenet.get_dataset(config.get("data", "work_directory"))

        # Get image lists and label lists
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # What the image is and label is
        images_raw, labels_eval = facenet.read_and_augument_data(
            image_list, label_list, image_size, batch_size, max_nrof_epochs,
            random_crop, random_flip, nrof_preprocess_threads)

        print("tensor:%s" % images_raw)

        num_classes = len(os.listdir(config.get("data", "work_directory")))

        print("classes:%s" % num_classes)

        lfw_list = []

        lfw_classes = os.listdir(config.get("data", "work_directory"))
        lfw_classes.sort()

        for i in lfw_classes:
            lfw_list.append(i)

        keep_probability = 1.0

        # logits_eval is like this
        #
        #     1    [........]
        #     2    [........]
        #     3    [........]
        #     .        .
        #     .        .
        #     .        .
        #     128  [........]
        #
        #     128 x 10(num_channel)
        #
        # This goes to
        # cross_entropy = tf.nn.softmax_cross_entrpy_with_logts(logits......)

        prelogits, _ = inference(images_raw,
                                 keep_probability,
                                 phase_train=True,
                                 weight_decay=0.0)

        # logits_single is like this
        #
        #     1    [..................................]
        #
        #          |----------------------------------|
        #                           a
        #
        #     1 x 10(num_channel) with whitening(but idk why)
        #
        # This goes to
        # pred_logit = sess.run(logits_single, feed_dict={x:raw_image_reshaped})
        # (12/24) Changed the shape of logits because there is something wrong in model function. Should be changed

        print("Done")

        pre_embeddings = slim.fully_connected(prelogits,
                                              128,
                                              activation_fn=None,
                                              scope='Embeddings',
                                              reuse=False)
        logits_eval = tf.nn.l2_normalize(pre_embeddings,
                                         1,
                                         1e-10,
                                         name='embeddings')

    input_dict = {
        "graph": g,
        "x": images_raw,
        "x_raw": images_raw,
        "y_": labels_eval,
        "y_conv": logits_eval,
        #"y_conv_single": logits_single,
        #"adv_image_placeholder": x,
        #"keep_prob": None,
    }

    print("succees in generating images")

    output_dir = config.get("main", "image_output_path")

    #fastgradientsign_advgen = FastGradientSign_AdvGen(cmd_args, [1, 24, 24, 3], config)
    #fastgradientsign_advgen.run_queue_for_lfw(input_dict, num_classes, lfw_list)
    tripletloss_advgen = TripletLoss_AdvGen(cmd_args, [1, 24, 24, 3], config)
    tripletloss_advgen.run_queue_for_lfw(input_dict, num_classes, lfw_list,
                                         output_dir)