def main(args):
    output_dir = os.path.expanduser(args.output_dir)
  
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
  
    # Store some git revision info in a text file in the output directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    
    i = 0
    for f in args.tsv_files:
        for line in f:
            fields = line.split('\t')
            class_dir = fields[0]
            img_name = fields[1] + '-' + fields[4] + '.' + args.output_format
            img_string = fields[6]
            img_dec_string = base64.b64decode(img_string)
            img_data = np.fromstring(img_dec_string, dtype=np.uint8)
            img = cv2.imdecode(img_data, cv2.cv.CV_LOAD_IMAGE_COLOR) #pylint: disable=maybe-no-member
            if args.size>0:
                img = misc.imresize(img, (args.size, args.size), interp='bilinear')
            full_class_dir = os.path.join(output_dir, class_dir)
            if not os.path.exists(full_class_dir):
                os.mkdir(full_class_dir)
            full_path = os.path.join(full_class_dir, img_name)
            cv2.imwrite(full_path, img) #pylint: disable=maybe-no-member
            print('%8d: %s' % (i, full_path))
            i += 1
def main(args):
    funnel_cmd = 'funnelReal'
    funnel_model = 'people.train'

    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the output directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)
    np.random.shuffle(dataset)
    # Scale the image such that the face fills the frame when cropped to crop_size
    #scale = float(args.face_size) / args.image_size
    with TemporaryDirectory() as tmp_dir:
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            tmp_output_class_dir = os.path.join(tmp_dir, cls.name)
            if not os.path.exists(output_class_dir) and not os.path.exists(tmp_output_class_dir):
                print('Aligning class %s:' % cls.name)
                tmp_filenames = []
                if not os.path.exists(tmp_output_class_dir):
                    os.makedirs(tmp_output_class_dir)
                input_list_filename = os.path.join(tmp_dir, 'input_list.txt')
                output_list_filename = os.path.join(tmp_dir, 'output_list.txt')
                input_file = open(input_list_filename, 'w')
                output_file = open(output_list_filename,'w')
                for image_path in cls.image_paths:
                    filename = os.path.split(image_path)[1]
                    input_file.write(image_path+'\n')
                    output_filename = os.path.join(tmp_output_class_dir, filename)
                    output_file.write(output_filename+'\n')
                    tmp_filenames.append(output_filename)
                input_file.close()
                output_file.close()
                cmd = args.funnel_dir+funnel_cmd + ' ' + input_list_filename + ' ' + args.funnel_dir+funnel_model + ' ' + output_list_filename
                subprocess.call(cmd, shell=True)
                
                # Resize and crop images
                if not os.path.exists(output_class_dir):
                    os.makedirs(output_class_dir)
                scale = 1.0
                for tmp_filename in tmp_filenames:
                    img = misc.imread(tmp_filename)
                    img_scale = misc.imresize(img, scale)
                    sz1 = img.shape[1]/2
                    sz2 = args.image_size/2
                    img_crop = img_scale[int(sz1-sz2):int(sz1+sz2),int(sz1-sz2):int(sz1+sz2),:]
                    filename = os.path.splitext(os.path.split(tmp_filename)[1])[0]
                    output_filename = os.path.join(output_class_dir, filename+'.png')
                    print('Saving image %s' % output_filename)
                    misc.imsave(output_filename, img_crop)
                    
                # Remove tmp directory with images
                shutil.rmtree(tmp_output_class_dir)
Esempio n. 3
0
def main(args):
    TEMPORAL_DIM = int(args.image_w / 5)
    SPATIAL_DIM = int(args.image_h / 5)
    seed_batch_idx = args.seed_batch_idx

    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        action_list, label_list = get_image_paths_and_labels(
            dataPath=args.dataPath,
            s_ID=None,
            is_training=True,
            x_type=args.x_type)
        assert len(action_list) > 0, 'The dataset should not be empty'

        ## create input pipeline
        queue_input_data = tf.placeholder(
            tf.float32, shape=[None, args.image_h, args.image_w, 3])
        queue_input_label = tf.placeholder(tf.int64, shape=[None, 1])

        queue = tf.FIFOQueue(capacity=20000,
                             dtypes=[tf.float32, tf.int64],
                             shapes=[[args.image_h, args.image_w, 3], [1]])
        enqueue_op = queue.enqueue_many([queue_input_data, queue_input_label])

        dequeue_data, dequeue_lab = queue.dequeue()
        dequeue_image = tf.image.per_image_standardization(dequeue_data)

        # Create a queue that produces indices into the action_list and label_list
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size,
                                                    num_epochs=None,
                                                    shuffle=True,
                                                    seed=None,
                                                    capacity=32)

        #index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
        index_dequeue_op = index_queue.dequeue_many(args.batch_size * 100,
                                                    'index_dequeue')

        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 1),
                                                 name='image_paths')

        labels_placeholder = tf.placeholder(tf.int64,
                                            shape=(None, 1),
                                            name='labels')

        #image_batch, label_batch = tf.train.batch([dequeue_image, dequeue_lab], batch_size=args.batch_size, capacity=100)
        image_batch, label_batch = tf.train.batch(
            [dequeue_image, dequeue_lab],
            batch_size=batch_size_placeholder,
            capacity=1000)

        label_batch = tf.squeeze(label_batch)

        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')

        tf.summary.image("input_image", image_batch, max_outputs=4)

        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(action_list))

        print('Building training graph')

        # Build the inference graph
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(
            prelogits,
            len(train_set),
            activation_fn=None,
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            scope='Logits',
            reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Add center loss
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, _ = facenet.center_loss(
                prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES,
                prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch,
            logits=logits,
            name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate batch accuracy
        correct_prediction = tf.equal(tf.argmax(logits, axis=1), label_batch)
        batch_accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                                tf.float32),
                                        name='batch_accuracy')
        tf.summary.scalar('batch_accuracy', batch_accuracy)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one 0batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables(), args.log_histograms)

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=10)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord, sess=sess)

        enqueue_thread = threading.Thread(target=enqueue,
                                          args=[
                                              sess, index_dequeue_op,
                                              enqueue_op, action_list,
                                              label_list, queue_input_data,
                                              queue_input_label, TEMPORAL_DIM,
                                              SPATIAL_DIM, seed_batch_idx
                                          ])
        enqueue_thread.isDaemon()
        enqueue_thread.start()

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(
                    args,
                    sess,
                    epoch,  #image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder,
                    phase_train_placeholder,
                    batch_size_placeholder,
                    global_step,
                    total_loss,
                    train_op,
                    summary_op,
                    summary_writer,
                    regularization_losses,
                    args.learning_rate_schedule_file)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder,
                             labels_placeholder, phase_train_placeholder,
                             batch_size_placeholder, embeddings, label_batch,
                             lfw_paths, actual_issame, args.lfw_batch_size,
                             args.lfw_nrof_folds, log_dir, step,
                             summary_writer)

        sess.run(queue.close(cancel_pending_enqueues=True))
        coord.request_stop()
        coord.join(threads)
        sess.close()

    return model_dir
Esempio n. 4
0
def main(args):
    sleep(random.random())
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)

    print('Creating networks and loading parameters')

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    # Add a random key to the filename to allow alignment using multiple processes
    random_key = np.random.randint(0, high=99999)
    bounding_boxes_filename = os.path.join(
        output_dir, 'bounding_boxes_%05d.txt' % random_key)

    with open(bounding_boxes_filename, "w") as text_file:
        nrof_images_total = 0
        nrof_successfully_aligned = 0
        if args.random_order:
            random.shuffle(dataset)
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            if not os.path.exists(output_class_dir):
                os.makedirs(output_class_dir)
                if args.random_order:
                    random.shuffle(cls.image_paths)
            for image_path in cls.image_paths:
                nrof_images_total += 1
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                output_filename = os.path.join(output_class_dir,
                                               filename + '.png')
                print(image_path)
                if not os.path.exists(output_filename):
                    try:
                        img = misc.imread(image_path)
                    except (IOError, ValueError, IndexError) as e:
                        errorMessage = '{}: {}'.format(image_path, e)
                        print(errorMessage)
                    else:
                        if img.ndim < 2:
                            print('Unable to align "%s"' % image_path +
                                  ", original saved.")
                            misc.imsave(output_filename, img)
                            text_file.write('%s\n' % (output_filename))
                            continue
                        if img.ndim == 2:
                            img = facenet.to_rgb(img)
                        img = img[:, :, 0:3]

                        bounding_boxes, _ = align.detect_face.detect_face(
                            img, minsize, pnet, rnet, onet, threshold, factor)
                        nrof_faces = bounding_boxes.shape[0]
                        if nrof_faces > 0:
                            det = bounding_boxes[:, 0:4]
                            det_arr = []
                            img_size = np.asarray(img.shape)[0:2]
                            if nrof_faces > 1:
                                if args.detect_multiple_faces:
                                    for i in range(nrof_faces):
                                        det_arr.append(np.squeeze(det[i]))
                                else:
                                    bounding_box_size = (
                                        det[:, 2] - det[:, 0]) * (det[:, 3] -
                                                                  det[:, 1])
                                    img_center = img_size / 2
                                    offsets = np.vstack([
                                        (det[:, 0] + det[:, 2]) / 2 -
                                        img_center[1],
                                        (det[:, 1] + det[:, 3]) / 2 -
                                        img_center[0]
                                    ])
                                    offset_dist_squared = np.sum(
                                        np.power(offsets, 2.0), 0)
                                    index = np.argmax(
                                        bounding_box_size -
                                        offset_dist_squared * 2.0
                                    )  # some extra weight on the centering
                                    det_arr.append(det[index, :])
                            else:
                                det_arr.append(np.squeeze(det))

                            for i, det in enumerate(det_arr):
                                det = np.squeeze(det)
                                bb = np.zeros(4, dtype=np.int32)
                                bb[0] = np.maximum(det[0] - args.margin / 2, 0)
                                bb[1] = np.maximum(det[1] - args.margin / 2, 0)
                                bb[2] = np.minimum(det[2] + args.margin / 2,
                                                   img_size[1])
                                bb[3] = np.minimum(det[3] + args.margin / 2,
                                                   img_size[0])
                                cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                                scaled = misc.imresize(
                                    cropped,
                                    (args.image_size, args.image_size),
                                    interp='bilinear')
                                nrof_successfully_aligned += 1
                                filename_base, file_extension = os.path.splitext(
                                    output_filename)
                                if args.detect_multiple_faces:
                                    output_filename_n = "{}_{}{}".format(
                                        filename_base, i, file_extension)
                                else:
                                    output_filename_n = "{}{}".format(
                                        filename_base, file_extension)
                                misc.imsave(output_filename_n, scaled)
                                text_file.write('%s %d %d %d %d\n' %
                                                (output_filename_n, bb[0],
                                                 bb[1], bb[2], bb[3]))
                        else:
                            print('Unable to align "%s"' % image_path +
                                  ", original saved.")
                            misc.imsave(output_filename, img)
                            text_file.write('%s\n' % (output_filename))

    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' %
          nrof_successfully_aligned)
Esempio n. 5
0
def main(args):
  
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
        
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)

    # 预训练模型
    if args.pretrained_model:
        print('Pre-trained model: %s' % os.path.expanduser(args.pretrained_model))
    
    # #  lfw 数据集的位置
    # if args.lfw_dir:
    #     print('LFW directory: %s' % args.lfw_dir)
    #     # Read the file containing the pairs used for testing
    #     pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
    #     # Get the paths for the corresponding images
    #     lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False, name='global_step')
        # Placeholders for the learning rate, batch_size, phase_train, image_path, labels
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        image_paths_placeholder = tf.placeholder(tf.string, shape=(None, 3), name='image_paths')
        labels_placeholder = tf.placeholder(tf.int64, shape=(None, 3), name='labels')

        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                    dtypes=[tf.string, tf.int64],
                                    shapes=[(3,), (3,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder])

        # 读取数据的线程数,将下面改为 multi_gpu 运行的版本
        nrof_preprocess_threads = 4
        
        images_and_labels_all = []
        for _ in range(nrof_preprocess_threads):
            for gpu in range(args.num_gpus):
                images_and_labels = []
                # 每次都从 queue 中将数据 dequeue 出来
                filenames, label = input_queue.dequeue()
                images = []
                for filename in tf.unstack(filenames):
                    file_contents = tf.read_file(filename)
                    image = tf.image.decode_image(file_contents, channels=3)

                    if args.random_crop:
                        image = tf.random_crop(image, [args.image_size, args.image_size, 3])
                    else:
                        image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                    if args.random_flip:
                        image = tf.image.random_flip_left_right(image)

                    # pylint: disable=no-member
                    image.set_shape((args.image_size, args.image_size, 3))
                    images.append(tf.image.per_image_standardization(image))
                images_and_labels.append([images, label])
            images_and_labels_all.append(images_and_labels)
        # 将数据整理,并适用于下面的多 gpu 运行
        image_batch_split = []
        label_batch_split = []
        # label_extend = []        
        for i in range(args.num_gpus):
            image_batch, labels_batch = tf.train.batch_join(
                images_and_labels_all[i], batch_size=batch_size_placeholder,
                shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
                capacity=4 * nrof_preprocess_threads * args.batch_size,
                allow_smaller_final_batch=True)
            image_batch = tf.identity(image_batch, 'image_batch')
            image_batch = tf.identity(image_batch, 'input')
            labels_batch = tf.identity(labels_batch, 'label_batch')
            image_batch_split.append(image_batch)
            label_batch_split.append(labels_batch)
            # label_extend.extend(labels_batch)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # print('Using optimizer: {}'.format(args.optimizer))
        # if args.optimizer == 'ADAGRAD':
        #     opt = tf.train.AdagradOptimizer(learning_rate)
        # elif args.optimizer == 'MOM':
        #     opt = tf.train.MomentumOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
        # elif args.optimizer=='ADAM':
        #     opt = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
        # elif args.optimizer=='RMSPROP':
        #     opt = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
        # elif args.optimizer=='MOM':
        #     opt = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
        # else:
        #     raise Exception('Not supported optimizer: {}'.format(args.optimizer))

        # 在这部分进行 multi_gpu 
        print('Building training graph....')
        tower_losses = []
        tower_triplet = []
        tower_reg= []
        # embeddings_extend = []      
        embeddings_split = []
        for i in range(args.num_gpus):
            with tf.device("/gpu:" + str(i)):
                with tf.name_scope("tower_" + str(i)) as scope:
                    with tf.variable_scope(tf.get_variable_scope()) as var_scope:
                        # Build the inference graph
                        with tf.variable_scope(name_or_scope='', reuse=tf.AUTO_REUSE):
                            # # Dequeues one batch for one tower 
                            # image_batch_de, label_batch_de = batch_queue.dequeue()
                            prelogits, _ = network.inference(image_batch_split[i], args.keep_probability, 
                                phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size,
                                weight_decay=args.weight_decay)

                            embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
                            # embeddings_extend.extend(embeddings)
                            embeddings_split.append(embeddings)
                            # Split embeddings into anchor, postive and negative and calculate triplet loss
                            anchor, positive, negative = tf.unstack(tf.reshape(embeddings, [-1,3,args.embedding_size]), 3, 1)
                            triplet_loss_split = facenet.triplet_loss(anchor, positive, negative, args.alpha)
                            regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
                            tower_triplet.append(triplet_loss_split)
                            loss = triplet_loss_split + tf.add_n(regularization_losses)
                            tower_losses.append(loss)
                            tower_reg.append(regularization_losses)
                            # 同名变量可以重用
                            tf.get_variable_scope().reuse_variables()
        total_loss = tf.reduce_mean(tower_losses)
        total_reg = tf.reduce_mean(tower_reg)
        losses = {}
        losses['total_loss'] = total_loss
        losses['total_reg'] = total_reg

        # # 计算 embeddings 的均值
        # tmp_embeddings = None
        # for j in range(arg.num_gpus):
        #     if j > 0:
        #         tmp_embeddings += embeddings_split[j]
        #     else:
        #         tmp_embeddings = embeddings_split[j]
        # embeddings = tmp_embeddings / args.num_gpus


        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.global_variables())

        # grads = opt.compute_gradients(total_loss,tf.trainable_variables(),colocate_gradients_with_ops=True)
        # apply_gradient_op = opt.apply_gradients(grads,global_step=global_step)
        # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        # with tf.control_dependencies(update_ops):
        #     train_op = tf.group(apply_gradient_op)

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))        

        # Initialize variables
        sess.run(tf.global_variables_initializer(), feed_dict={phase_train_placeholder:True})
        sess.run(tf.local_variables_initializer(), feed_dict={phase_train_placeholder:True})

        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)
        
        with sess.as_default():

            if args.pretrained_model:
                print('Restoring pretrained model: %s' % args.pretrained_model)
                saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train multi gpus for one epoch
                epoch_start_time = time.time()
                # for i in range(args.num_gpus):
                train_multi_gpu(args, sess, train_set, epoch, image_paths_placeholder, labels_placeholder, 
                            label_batch_split[0], label_batch_split[1], batch_size_placeholder, learning_rate_placeholder, 
                            phase_train_placeholder, enqueue_op, input_queue, global_step, embeddings_split[0], embeddings_split[1], losses['total_loss'], losses['total_reg'], train_op, 
                            summary_op, summary_writer, args.learning_rate_schedule_file, args.embedding_size)
            
                print('The %dth epoch running time is %.3f seconds!!!' %(epoch, time.time()-epoch_start_time))       
                # # Train for one epoch
                # train(args, sess, epoch, 
                #      learning_rate_placeholder, phase_train_placeholder, global_step, 
                #      losses, train_op, summary_op, summary_writer, args.learning_rate_schedule_file)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

    return model_dir
Esempio n. 6
0
def main(args):

    network = importlib.import_module(args.model_def)
    image_size = (args.image_size, args.image_size)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    stat_file_name = os.path.join(log_dir, 'stat.h5')

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    dataset = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        dataset = filter_dataset(dataset,
                                 os.path.expanduser(args.filter_filename),
                                 args.filter_percentile,
                                 args.filter_min_nrof_images_per_class)

    if args.validation_set_split_ratio > 0.0:
        train_set, val_set = facenet.split_dataset(
            dataset, args.validation_set_split_ratio,
            args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
    else:
        train_set, val_set = dataset, []

    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list) > 0, 'The training set should not be empty'

        val_image_list, val_label_list = facenet.get_image_paths_and_labels(
            val_set)

        # Create a queue that produces indices into the image_list and label_list
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size,
                                                    num_epochs=None,
                                                    shuffle=True,
                                                    seed=None,
                                                    capacity=32)

        index_dequeue_op = index_queue.dequeue_many(
            args.batch_size * args.epoch_size, 'index_dequeue')

        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 1),
                                                 name='image_paths')
        labels_placeholder = tf.placeholder(tf.int32,
                                            shape=(None, 1),
                                            name='labels')
        control_placeholder = tf.placeholder(tf.int32,
                                             shape=(None, 1),
                                             name='control')

        nrof_preprocess_threads = 4
        input_queue = data_flow_ops.FIFOQueue(
            capacity=2000000,
            dtypes=[tf.string, tf.int32, tf.int32],
            shapes=[(1, ), (1, ), (1, )],
            shared_name=None,
            name=None)
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder, control_placeholder],
            name='enqueue_op')
        image_batch, label_batch = facenet.create_input_pipeline(
            input_queue, image_size, nrof_preprocess_threads,
            batch_size_placeholder)

        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')

        print('Number of classes in training set: %d' % nrof_classes)
        print('Number of examples in training set: %d' % len(image_list))

        print('Number of classes in validation set: %d' % len(val_set))
        print('Number of examples in validation set: %d' % len(val_image_list))

        print('Building training graph')

        # Build the inference graph
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(
            prelogits,
            len(train_set),
            activation_fn=None,
            weights_initializer=slim.initializers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            scope='Logits',
            reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        g = tf.get_default_graph()
        tf.contrib.quantize.create_training_graph(input_graph=g, quant_delay=0)

        # Norm for the prelogits
        eps = 1e-4
        prelogits_norm = tf.reduce_mean(
            tf.norm(tf.abs(prelogits) + eps, ord=args.prelogits_norm_p,
                    axis=1))
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                             prelogits_norm * args.prelogits_norm_loss_factor)

        # Add center loss
        prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch,
                                                       args.center_loss_alfa,
                                                       nrof_classes)
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                             prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch,
            logits=logits,
            name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        correct_prediction = tf.cast(
            tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)),
            tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables(), args.log_histograms)

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
        saver_new = tf.train.Saver(max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            nrof_steps = args.max_nrof_epochs * args.epoch_size
            nrof_val_samples = int(
                math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs)
            )  # Validate every validate_every_n_epochs as well as in the last epoch
            stat = {
                'loss':
                np.zeros((nrof_steps, ), np.float32),
                'center_loss':
                np.zeros((nrof_steps, ), np.float32),
                'reg_loss':
                np.zeros((nrof_steps, ), np.float32),
                'xent_loss':
                np.zeros((nrof_steps, ), np.float32),
                'prelogits_norm':
                np.zeros((nrof_steps, ), np.float32),
                'accuracy':
                np.zeros((nrof_steps, ), np.float32),
                'val_loss':
                np.zeros((nrof_val_samples, ), np.float32),
                'val_xent_loss':
                np.zeros((nrof_val_samples, ), np.float32),
                'val_accuracy':
                np.zeros((nrof_val_samples, ), np.float32),
                'lfw_accuracy':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'lfw_valrate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'learning_rate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'time_train':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'time_validate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'time_evaluate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'prelogits_hist':
                np.zeros((args.max_nrof_epochs, 1000), np.float32),
            }
            for epoch in range(1, args.max_nrof_epochs + 1):
                step = sess.run(global_step, feed_dict=None)
                # Train for one epoch
                t = time.time()
                cont = train(
                    args, sess, epoch, image_list, label_list,
                    index_dequeue_op, enqueue_op, image_paths_placeholder,
                    labels_placeholder, learning_rate_placeholder,
                    phase_train_placeholder, batch_size_placeholder,
                    control_placeholder, global_step, total_loss, train_op,
                    summary_op, summary_writer, regularization_losses,
                    args.learning_rate_schedule_file, stat, cross_entropy_mean,
                    accuracy, learning_rate, prelogits, prelogits_center_loss,
                    args.random_rotate, args.random_crop, args.random_flip,
                    prelogits_norm, args.prelogits_hist_max,
                    args.use_fixed_image_standardization)
                stat['time_train'][epoch - 1] = time.time() - t

                if not cont:
                    break

                t = time.time()
                if len(val_image_list) > 0 and (
                    (epoch - 1) % args.validate_every_n_epochs
                        == args.validate_every_n_epochs - 1
                        or epoch == args.max_nrof_epochs):
                    validate(args, sess, epoch, val_image_list, val_label_list,
                             enqueue_op, image_paths_placeholder,
                             labels_placeholder, control_placeholder,
                             phase_train_placeholder, batch_size_placeholder,
                             stat, total_loss, regularization_losses,
                             cross_entropy_mean, accuracy,
                             args.validate_every_n_epochs,
                             args.use_fixed_image_standardization)
                stat['time_validate'][epoch - 1] = time.time() - t

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver_new, summary_writer,
                                             model_dir, subdir, epoch)

                # Evaluate on LFW
                t = time.time()
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder,
                             labels_placeholder, phase_train_placeholder,
                             batch_size_placeholder, control_placeholder,
                             embeddings, label_batch, lfw_paths, actual_issame,
                             args.lfw_batch_size, args.lfw_nrof_folds, log_dir,
                             step, summary_writer, stat, epoch,
                             args.lfw_distance_metric, args.lfw_subtract_mean,
                             args.lfw_use_flipped_images,
                             args.use_fixed_image_standardization)
                stat['time_evaluate'][epoch - 1] = time.time() - t

                print('Saving statistics')
                with h5py.File(stat_file_name, 'w') as f:
                    for key, value in stat.items():
                        f.create_dataset(key, data=value)

    return model_dir
Esempio n. 7
0
def main(args):
  
    img_mean = np.array([134.10714722, 102.52040863, 87.15436554])
    img_stddev = np.sqrt(np.array([3941.30175781, 2856.94287109, 2519.35791016]))
  
    vae_def = importlib.import_module(args.vae_def)
    vae = vae_def.Vae(args.latent_var_size)
    gen_image_size = vae.get_image_size()

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)
    log_file_name = os.path.join(model_dir, 'logs.h5')
    
    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(model_dir, 'arguments.txt'))
        
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, model_dir, ' '.join(sys.argv))
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        train_set = facenet.get_dataset(args.data_dir)
        image_list, _ = facenet.get_image_paths_and_labels(train_set)
        
        # Create the input queue
        input_queue = tf.train.string_input_producer(image_list, shuffle=True)
    
        nrof_preprocess_threads = 4
        image_per_thread = []
        for _ in range(nrof_preprocess_threads):
            file_contents = tf.read_file(input_queue.dequeue())
            image = tf.image.decode_image(file_contents, channels=3)
            image = tf.image.resize_image_with_crop_or_pad(image, args.input_image_size, args.input_image_size)
            image.set_shape((args.input_image_size, args.input_image_size, 3))
            image = tf.cast(image, tf.float32)
            #pylint: disable=no-member
            image_per_thread.append([image])
    
        images = tf.train.batch_join(
            image_per_thread, batch_size=args.batch_size,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=False)
        
        # Normalize
        images_norm = (images-img_mean) / img_stddev

        # Resize to appropriate size for the encoder 
        images_norm_resize = tf.image.resize_images(images_norm, (gen_image_size,gen_image_size))
        
        # Create encoder network
        mean, log_variance = vae.encoder(images_norm_resize, True)
        
        epsilon = tf.random_normal((tf.shape(mean)[0], args.latent_var_size))
        std = tf.exp(log_variance/2)
        latent_var = mean + epsilon * std
        
        # Create decoder network
        reconstructed_norm = vae.decoder(latent_var, True)
        
        # Un-normalize
        reconstructed = (reconstructed_norm*img_stddev) + img_mean
        
        # Create reconstruction loss
        if args.reconstruction_loss_type=='PLAIN':
            images_resize = tf.image.resize_images(images, (gen_image_size,gen_image_size))
            reconstruction_loss = tf.reduce_mean(tf.reduce_sum(tf.pow(images_resize - reconstructed,2)))
        elif args.reconstruction_loss_type=='PERCEPTUAL':
            network = importlib.import_module(args.model_def)

            reconstructed_norm_resize = tf.image.resize_images(reconstructed_norm, (args.input_image_size,args.input_image_size))

            # Stack images from both the input batch and the reconstructed batch in a new tensor 
            shp = [-1] + images_norm.get_shape().as_list()[1:]
            input_images = tf.reshape(tf.stack([images_norm, reconstructed_norm_resize], axis=0), shp)
            _, end_points = network.inference(input_images, 1.0, 
                phase_train=False, bottleneck_layer_size=128, weight_decay=0.0)

            # Get a list of feature names to use for loss terms
            feature_names = args.loss_features.replace(' ', '').split(',')

            # Calculate L2 loss between original and reconstructed images in feature space
            reconstruction_loss_list = []
            for feature_name in feature_names:
                feature_flat = slim.flatten(end_points[feature_name])
                image_feature, reconstructed_feature = tf.unstack(tf.reshape(feature_flat, [2,args.batch_size,-1]), num=2, axis=0)
                reconstruction_loss = tf.reduce_mean(tf.reduce_sum(tf.pow(image_feature-reconstructed_feature, 2)), name=feature_name+'_loss')
                reconstruction_loss_list.append(reconstruction_loss)
            # Sum up the losses in for the different features
            reconstruction_loss = tf.add_n(reconstruction_loss_list, 'reconstruction_loss')
        else:
            pass
        
        # Create KL divergence loss
        kl_loss = kl_divergence_loss(mean, log_variance)
        kl_loss_mean = tf.reduce_mean(kl_loss)
        
        total_loss = args.alfa*kl_loss_mean + args.beta*reconstruction_loss
        
        learning_rate = tf.train.exponential_decay(args.initial_learning_rate, global_step,
            args.learning_rate_decay_steps, args.learning_rate_decay_factor, staircase=True)
        
        # Calculate gradients and make sure not to include parameters for the perceptual loss model
        opt = tf.train.AdamOptimizer(learning_rate)
        grads = opt.compute_gradients(total_loss, var_list=get_variables_to_train())
        
        # Apply gradients
        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
        with tf.control_dependencies([apply_gradient_op]):
            train_op = tf.no_op(name='train')

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
        
        facenet_saver = tf.train.Saver(get_facenet_variables_to_restore())

        # Start running operations on the Graph
        gpu_memory_fraction = 1.0
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():
            
            if args.reconstruction_loss_type=='PERCEPTUAL':
                if not args.pretrained_model:
                    raise ValueError('A pretrained model must be specified when using perceptual loss')
                pretrained_model_exp = os.path.expanduser(args.pretrained_model)
                print('Restoring pretrained model: %s' % pretrained_model_exp)
                facenet_saver.restore(sess, pretrained_model_exp)
          
            log = {
                'total_loss': np.zeros((0,), np.float),
                'reconstruction_loss': np.zeros((0,), np.float),
                'kl_loss': np.zeros((0,), np.float),
                'learning_rate': np.zeros((0,), np.float),
                }
            
            step = 0
            print('Running training')
            while step < args.max_nrof_steps:
                start_time = time.time()
                step += 1
                save_state = step>0 and (step % args.save_every_n_steps==0 or step==args.max_nrof_steps)
                if save_state:
                    _, reconstruction_loss_, kl_loss_mean_, total_loss_, learning_rate_, rec_ = sess.run(
                          [train_op, reconstruction_loss, kl_loss_mean, total_loss, learning_rate, reconstructed])
                    img = facenet.put_images_on_grid(rec_, shape=(16,8))
                    misc.imsave(os.path.join(model_dir, 'reconstructed_%06d.png' % step), img)
                else:
                    _, reconstruction_loss_, kl_loss_mean_, total_loss_, learning_rate_ = sess.run(
                          [train_op, reconstruction_loss, kl_loss_mean, total_loss, learning_rate])
                log['total_loss'] = np.append(log['total_loss'], total_loss_)
                log['reconstruction_loss'] = np.append(log['reconstruction_loss'], reconstruction_loss_)
                log['kl_loss'] = np.append(log['kl_loss'], kl_loss_mean_)
                log['learning_rate'] = np.append(log['learning_rate'], learning_rate_)

                duration = time.time() - start_time
                print('Step: %d \tTime: %.3f \trec_loss: %.3f \tkl_loss: %.3f \ttotal_loss: %.3f' % (step, duration, reconstruction_loss_, kl_loss_mean_, total_loss_))

                if save_state:
                    print('Saving checkpoint file')
                    checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
                    print('Saving log')
                    with h5py.File(log_file_name, 'w') as f:
                        for key, value in log.iteritems():
                            f.create_dataset(key, data=value)
Esempio n. 8
0
def main(args):
  
    network = importlib.import_module(args.model_def, 'inference')

    if args.model_name:
        subdir = args.model_name
        preload_model = True
    else:
        subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
        preload_model = False
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.mkdir(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.mkdir(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    
    print('Model directory: %s' % model_dir)
    
    # Read the file containing the pairs used for testing
    pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))

    # Get the paths for the corresponding images
    paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for input images
        images_placeholder = tf.placeholder(tf.float32, shape=(None, args.image_size, args.image_size, 3), name='input')

        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Build the inference graph
        embeddings = network.inference(images_placeholder, args.pool_type, args.use_lrn, 
             args.keep_probability, phase_train=phase_train_placeholder, weight_decay=args.weight_decay)

        # Split example embeddings into anchor, positive and negative
        anchor, positive, negative = tf.split(0, 3, embeddings)

        # Calculate triplet loss
        loss = facenet.triplet_loss(anchor, positive, negative, args.alpha)
        
        # Calculate the total loss
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([loss] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op, _ = facenet.train(total_loss, global_step, args.optimizer, args.learning_rate, 
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, args.moving_average_decay)

        # Create a saver
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=0)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Build an initialization operation to run below.
        init = tf.initialize_all_variables()

        # Start running operations on the Graph.
        sess = tf.Session()
        sess.run(init)

        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)

        with sess.as_default():

            if preload_model:
                ckpt = tf.train.get_checkpoint_state(model_dir)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                else:
                    raise ValueError('Checkpoint not found')

            # Training and validation loop
            for epoch in range(args.max_nrof_epochs):
                # Train for one epoch
                step = train(args, sess, train_set, epoch, images_placeholder, phase_train_placeholder,
                             global_step, embeddings, loss, train_op, summary_op, summary_writer)
               
                _, _, accuracy, val, val_std, far = lfw.validate(sess, 
                    paths, actual_issame, args.seed, args.batch_size,
                    images_placeholder, phase_train_placeholder, embeddings, nrof_folds=args.lfw_nrof_folds)
                print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
                print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
                # Add validation loss and accuracy to summary
                summary = tf.Summary()
                summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
                summary.value.add(tag='lfw/val_rate', simple_value=val)
                summary_writer.add_summary(summary, step)

                if (epoch % args.checkpoint_period == 0) or (epoch==args.max_nrof_epochs-1):
                    # Save the model checkpoint
                    print('Saving checkpoint')
                    checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
    return model_dir
Esempio n. 9
0
def main(args):
    sleep(random.random())
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)

    print('Load from model!')

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    # Add a random key to the filename to allow alignment using multiple processes
    random_key = np.random.randint(0, high=99999)
    bounding_boxes_filename = os.path.join(
        output_dir, 'bounding_boxes_%05d.txt' % random_key)

    with open(bounding_boxes_filename, "w") as text_file:
        nrof_images_total = 0
        nrof_successfully_aligned = 0
        if args.random_order:
            random.shuffle(dataset)
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            if not os.path.exists(output_class_dir):
                os.makedirs(output_class_dir)
                if args.random_order:
                    random.shuffle(cls.image_paths)
            for image_path in cls.image_paths:
                nrof_images_total += 1
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                output_filename = os.path.join(output_class_dir,
                                               filename + '.png')
                print(image_path)
                if not os.path.exists(output_filename):
                    try:
                        img = misc.imread(image_path)
                    except (IOError, ValueError, IndexError) as e:
                        errorMessage = '{}: {}'.format(image_path, e)
                        print(errorMessage)
                    else:
                        if img.ndim < 2:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))
                            continue
                        if img.ndim == 2:
                            img = facenet.to_rgb(img)
                        img = img[:, :, 0:3]

                        ####  alignment faces
                        # saver = tf.train.import_meta_graph('./mtcnn-3000000.meta')
                        # graph = tf.get_default_graph()
                        # sess.run(tf.global_variables_initializer())
                        # saver.restore(sess, './mtcnn-3000000')

                        bounding_boxes, points = align.detect_face.detect_face(
                            img, minsize, pnet, rnet, onet, threshold, factor)

                        if len(points) != 0:
                            nrof_faces = points.shape[1]

                            if nrof_faces > 0:
                                det = bounding_boxes[:, 0:4]

                                # ---------  show faces with box   -----
                                for i in range(nrof_faces):
                                    bb = det[i, :].astype(dtype=np.int32)
                                    img_and_crop = cv2.rectangle(
                                        img, (bb[0], bb[1]), (bb[2], bb[3]),
                                        (0, 255, 0))
                                    cv2.putText(img,
                                                '# {}'.format(i),
                                                (bb[0], bb[3] - bb[1]),
                                                cv2.FONT_HERSHEY_SIMPLEX,
                                                1.0, (255, 255, 255),
                                                lineType=cv2.LINE_AA)
                                image_tmp = cv2.cvtColor(
                                    img_and_crop, cv2.COLOR_BGR2RGB)
                                cv2.imshow('img_crp', image_tmp)
                                cv2.waitKey()
                                ##########################

                                det_arr = []
                                _landmark = None
                                img_size = np.asarray(img.shape)[0:2]

                                if nrof_faces > 1:

                                    if args.detect_multiple_faces:
                                        for i in range(nrof_faces):
                                            _landmark = points[:, i].reshape(
                                                (2, 5)).T
                                            _bb = np.squeeze(
                                                bounding_boxes[i, 0:4])

                                            warp0, M = alignment(
                                                img, _landmark, img_size)
                                            ### crop aligned images
                                            bounding_boxes_new, point = align.detect_face.detect_face(
                                                warp0, minsize, pnet, rnet,
                                                onet, threshold, factor)

                                            if np.shape(
                                                    bounding_boxes_new)[0] > 1:
                                                print(
                                                    '======find more faces after warp choose the most likely one! ======'
                                                )
                                                offsets = [
                                                    np.linalg.norm(
                                                        dist(point[:, i]))
                                                    for i in range(
                                                        np.shape(point)[1])
                                                ]
                                                index_min = np.argmin(offsets)
                                                det = bounding_boxes_new[
                                                    index_min, 0:4]
                                            elif np.shape(bounding_boxes_new
                                                          )[0] == 0:
                                                print('\n')
                                                print(
                                                    '====== #{} have faces, while warp has none ======'
                                                    .format(output_filename))
                                                print('\n')
                                                det = _bb
                                            else:
                                                det = bounding_boxes_new[0,
                                                                         0:4]

                                            # bb = det.astype(dtype=np.int32)
                                            # img_and_crop = cv2.rectangle(warp0, (bb[0], bb[1]), (bb[2], bb[3]),
                                            #                                  (0, 255, 0))
                                            # image_tmp = cv2.cvtColor(img_and_crop, cv2.COLOR_BGR2RGB)
                                            # cv2.imshow('img_crp', image_tmp)
                                            # cv2.waitKey()

                                            nrof_successfully_aligned = crop_face(
                                                i, det, args.margin, img_size,
                                                warp0, args.image_size,
                                                nrof_successfully_aligned,
                                                args.detect_multiple_faces,
                                                output_filename, text_file)

                                    else:
                                        bounding_box_size = (
                                            det[:, 2] - det[:, 0]) * (
                                                det[:, 3] - det[:, 1])
                                        img_center = img_size / 2
                                        offsets = np.vstack([
                                            (det[:, 0] + det[:, 2]) / 2 -
                                            img_center[1],
                                            (det[:, 1] + det[:, 3]) / 2 -
                                            img_center[0]
                                        ])
                                        offset_dist_squared = np.sum(
                                            np.power(offsets, 2.0), 0)
                                        index = np.argmax(
                                            2 * bounding_box_size -
                                            offset_dist_squared
                                        )  # some extra weight on the centering
                                        det_arr.append(det[index, :])
                                        _bbox = det[index, :]
                                        _landmark = points[:, index].reshape(
                                            (2, 5)).T
                                        warp0, _ = alignment(
                                            img, _landmark, img_size)
                                        #### crop aligned images
                                        bounding_boxes_new, point = align.detect_face.detect_face(
                                            warp0, minsize, pnet, rnet, onet,
                                            threshold, factor)

                                        if np.shape(bounding_boxes_new)[0] > 1:
                                            print(
                                                '======find more faces after warp choose the most likely one! ======'
                                            )
                                            offsets = [
                                                np.linalg.norm(
                                                    dist(point[:, i]))
                                                for i in range(
                                                    np.shape(point)[1])
                                            ]
                                            index_min = np.argmin(offsets)
                                            det = bounding_boxes_new[index_min,
                                                                     0:4]
                                        elif np.shape(
                                                bounding_boxes_new)[0] == 0:
                                            print('\n')
                                            print(
                                                '====== #{} have faces, while warp has none ======'
                                                .format(output_filename))
                                            print('\n')
                                            continue
                                        else:
                                            det = bounding_boxes_new[0, 0:4]

                                        nrof_successfully_aligned = crop_face(
                                            0, det, args.margin, img_size,
                                            warp0, args.image_size,
                                            nrof_successfully_aligned, False,
                                            output_filename, text_file)
                                else:
                                    _landmark = points.reshape((2, 5)).T
                                    warp0, M = alignment(
                                        img, _landmark, img_size)
                                    #### crop aligned images
                                    bounding_boxes_new, point = align.detect_face.detect_face(
                                        warp0, minsize, pnet, rnet, onet,
                                        threshold, factor)

                                    if np.shape(bounding_boxes_new)[0] > 1:
                                        print(
                                            '======find more faces after warp choose the most likely one! ======'
                                        )
                                        offsets = [
                                            np.linalg.norm(dist(point[:, i]))
                                            for i in range(np.shape(point)[1])
                                        ]
                                        index_min = np.argmin(offsets)
                                        det = bounding_boxes_new[index_min,
                                                                 0:4]
                                    elif np.shape(bounding_boxes_new)[0] == 0:
                                        print('\n')
                                        print(
                                            '====== #{} have faces, while warp has none ======'
                                            .format(output_filename))
                                        print('\n')
                                        continue
                                    else:
                                        det = bounding_boxes_new[0, 0:4]

                                    nrof_successfully_aligned = crop_face(
                                        0, det, args.margin, img_size, warp0,
                                        args.image_size,
                                        nrof_successfully_aligned, False,
                                        output_filename, text_file)

                            else:
                                print('Unable to align "%s"' % image_path)
                                text_file.write('%s\n' % (output_filename))
                                os.remove(image_path)
                        else:
                            print(
                                '====  No faces in this image in {}, remove it! ===='
                                .format(output_filename))
                            os.remove(image_path)

    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' %
          nrof_successfully_aligned)
Esempio n. 10
0
def main(args):

    network = importlib.import_module(args.model_def)
    image_size = (args.image_size, args.image_size)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir),
                           subdir)  #日志保存地址
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    # os.path.realpath(__file__)代表返回当前模块真实路径
    # os.path.split(os.path.realpath(__file__))代表返回路径的目录和文件名(元组形式)
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    # dataset是列表,列表元素为每一个类的ImageClass对象,定义见facenet.py文件
    dataset = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        dataset = filter_dataset(dataset,
                                 os.path.expanduser(args.filter_filename),
                                 args.filter_percentile,
                                 args.filter_min_nrof_images_per_class)

    # 划分训练集和测试集,train_set和val_set的形式和dataset一样
    if args.validation_set_split_ratio > 0.0:
        train_set, val_set = facenet.split_dataset(
            dataset, args.validation_set_split_ratio,
            args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
    else:
        train_set, val_set = dataset, []

    nrof_classes = len(train_set)  #类目数(即有多少人)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        # pairs也是列表,子元素也是列表,每个子列表包含pairs.txt的每一行
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        #lfw_paths为两两比较的列表,列表的元素为有2个元素的元祖,actual_issame为列表,表示每个元素是否为同一个人
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        # global_step对应的是全局批的个数,根据这个参数可以更新学习率
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list) > 0, 'The training set should not be empty'

        val_image_list, val_label_list = facenet.get_image_paths_and_labels(
            val_set)

        # Create a queue that produces indices into the image_list and label_list
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        # range_size的大小和样本个数一样
        range_size = array_ops.shape(labels)[0]
        #QueueRunner:保存的是队列中的入列操作,保存在一个list当中,其中每个enqueue运行在一个线程当中
        #range_input_producer:返回的是一个队列,队列中有打乱的整数,范围是从0到range size
        #range_size大小和总的样本个数一样
        #并将一个QueueRunner添加到当前图的QUEUE_RUNNER集合中
        index_queue = tf.train.range_input_producer(
            range_size, num_epochs=None, shuffle=True, seed=None,
            capacity=32)  #capacity代表队列容量

        # 返回的是一个出列操作,每次出列一个epoch需要用到的样本个数
        index_dequeue_op = index_queue.dequeue_many(
            args.batch_size * args.epoch_size, 'index_dequeue')

        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 1),
                                                 name='image_paths')
        labels_placeholder = tf.placeholder(tf.int32,
                                            shape=(None, 1),
                                            name='labels')
        control_placeholder = tf.placeholder(tf.int32,
                                             shape=(None, 1),
                                             name='control')

        # 上面的队列是一个样本索引的出队队列,只用来出列
        # 用来每次出列一个epoch中用到的样本
        # 这里是第二个队列,这个队列用来入列,每个元素的大小为shape=[(1,),(1,),(1,)]
        nrof_preprocess_threads = 4
        # 这里的shape代表每一个元素的维度
        input_queue = data_flow_ops.FIFOQueue(
            capacity=600000,
            dtypes=[tf.string, tf.int32, tf.int32],
            shapes=[(1, ), (1, ), (1, )],
            shared_name=None,
            name=None)

        # 这时一个入列的操作,这个操作将在session run的时候用到
        # 每次入列的是image_paths_placeholder, labels_placeholder对
        # 注意,这里只有2个队列,一个用来出列打乱的元素序号
        # 一个根据对应的需要读取指定的文件
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder, control_placeholder],
            name='enqueue_op')
        image_batch, label_batch = facenet.create_input_pipeline(
            input_queue, image_size, nrof_preprocess_threads,
            batch_size_placeholder)

        # image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')

        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))
        print('Building training graph')

        # Build the inference graph
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
        # arcface_logits = arcface_logits_compute(embeddings, label_batch, args, nrof_classes)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)
        with tf.variable_scope('Logits'):
            arcface_logits = arcface_logits_compute(embeddings, label_batch,
                                                    args, nrof_classes)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=label_batch,
                logits=arcface_logits,
                name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        correct_prediction = tf.cast(
            tf.equal(tf.argmax(arcface_logits, 1),
                     tf.cast(label_batch, tf.int64)), tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)

        # for weights in slim.get_variables_by_name('embedding_weights'):
        #    weight_regularization = tf.contrib.layers.l2_regularizer(args.weight_decay)(weights)
        #    tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, weight_regularization)

        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)

        if args.weight_decay == 0:
            total_loss = tf.add_n([cross_entropy_mean], name='total_loss')
        else:
            total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                                  name='total_loss')

        #define two saver in case under 'finetuning on different dataset' situation
        saver_save = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables(), args.log_histograms)
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():
            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                facenet.load_model(pretrained_model)

            print('Running arcface training')

            best_accuracy = 0.0
            for epoch in range(1, args.max_nrof_epochs + 1):
                step = sess.run(global_step, feed_dict=None)
                cont = train(
                    args, sess, epoch, image_list, label_list,
                    index_dequeue_op, enqueue_op, image_paths_placeholder,
                    labels_placeholder, learning_rate_placeholder,
                    phase_train_placeholder, batch_size_placeholder,
                    global_step, total_loss, accuracy, train_op, summary_op,
                    summary_writer, regularization_losses,
                    args.learning_rate_schedule_file, args.random_rotate,
                    args.random_crop, args.random_flip,
                    args.use_fixed_image_standardization, control_placeholder)

                if not cont:
                    break
                print('validation running...')
                if args.lfw_dir:
                    best_accuracy = evaluate(
                        sess, enqueue_op, image_paths_placeholder,
                        labels_placeholder, phase_train_placeholder,
                        batch_size_placeholder, control_placeholder,
                        embeddings, label_batch, lfw_paths, actual_issame,
                        args.lfw_batch_size, args.lfw_nrof_folds, log_dir,
                        step, summary_writer, best_accuracy, saver_save,
                        model_dir, subdir, args.lfw_subtract_mean,
                        args.lfw_use_flipped_images,
                        args.use_fixed_image_standardization,
                        args.lfw_distance_metric)
    return model_dir
Esempio n. 11
0
def main(argv=None):  # pylint: disable=unused-argument
  
    if FLAGS.model_name:
        subdir = FLAGS.model_name
        preload_model = True
    else:
        subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
        preload_model = False
    log_dir = os.path.join(os.path.expanduser(FLAGS.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.mkdir(log_dir)
    model_dir = os.path.join(os.path.expanduser(FLAGS.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.mkdir(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(argv))

    np.random.seed(seed=FLAGS.seed)
    dataset = facenet.get_dataset(FLAGS.data_dir)
    train_set, validation_set = facenet.split_dataset(dataset, FLAGS.train_set_fraction, FLAGS.split_mode)
    
    print('Model directory: %s' % model_dir)

    with tf.Graph().as_default():
        tf.set_random_seed(FLAGS.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for input images
        images_placeholder = tf.placeholder(tf.float32, shape=(None, FLAGS.image_size, FLAGS.image_size, 3), name='input')

        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Build the inference graph
        embeddings = network.inference(images_placeholder, FLAGS.pool_type, FLAGS.use_lrn, 
                                       FLAGS.keep_probability, phase_train=phase_train_placeholder)

        # Split example embeddings into anchor, positive and negative
        anchor, positive, negative = tf.split(0, 3, embeddings)

        # Calculate triplet loss
        loss = facenet.triplet_loss(anchor, positive, negative, FLAGS.alpha)

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op, _ = facenet.train(loss, global_step, FLAGS.optimizer, FLAGS.learning_rate, FLAGS.moving_average_decay)

        # Create a saver
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=0)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Build an initialization operation to run below.
        init = tf.initialize_all_variables()

        # Start running operations on the Graph.
        sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement))
        sess.run(init)

        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)

        with sess.as_default():

            if preload_model:
                ckpt = tf.train.get_checkpoint_state(model_dir)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                else:
                    raise ValueError('Checkpoint not found')

            # Training and validation loop
            for epoch in range(FLAGS.max_nrof_epochs):
                # Train for one epoch
                step = train(sess, train_set, epoch, images_placeholder, phase_train_placeholder,
                             global_step, embeddings, loss, train_op, summary_op, summary_writer)
                
                # Store the state of the random number generator
                rng_state = np.random.get_state()
                # Test on validation set
                np.random.seed(seed=FLAGS.seed)
                validate(sess, validation_set, epoch, images_placeholder, phase_train_placeholder,
                         global_step, embeddings, loss, 'validation', summary_writer)
                # Test on training set
                np.random.seed(seed=FLAGS.seed)
                validate(sess, train_set, epoch, images_placeholder, phase_train_placeholder,
                         global_step, embeddings, loss, 'training', summary_writer)
                # Restore state of the random number generator
                np.random.set_state(rng_state)
  
                if (epoch % FLAGS.checkpoint_period == 0) or (epoch==FLAGS.max_nrof_epochs-1):
                  # Save the model checkpoint
                  print('Saving checkpoint')
                  checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                  saver.save(sess, checkpoint_path, global_step=step)
def main(args):
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(),
                               '%Y%m%d-%H%M%S') + args.experiment_name
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    class_name = [
        'smile', 'oval_face', '5_ocloc_shadow', 'bald', 'archied_eyebrows',
        'big_lips', 'big_nose'
    ]
    class_num = len(class_name)
    class_index = [31, 25, 0, 4, 1, 6, 7]

    all_image_list = []
    all_label_list = []
    for i in range(class_num):

        image_list = []
        label_list = []

        train_set = facenet.get_sub_category_dataset(args.data_dir,
                                                     class_index[i])

        image_list_p, label_list_p = facenet.get_image_paths_and_labels_triplet(
            train_set[0], args)
        image_list_n, label_list_n = facenet.get_image_paths_and_labels_triplet(
            train_set[1], args)

        image_list.append(image_list_p)
        image_list.append(image_list_n)
        label_list.append(label_list_p)
        label_list.append(label_list_n)

        all_image_list.append(image_list)
        all_label_list.append(label_list)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:
        print('Pre-trained model: %s' %
              os.path.expanduser(args.pretrained_model))

    image_size = args.image_size
    batch_size = args.batch_size
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # image_paths_placeholder = tf.placeholder(tf.string, shape=(None,3), name='image_paths')
        image_placeholder = tf.placeholder(tf.float32,
                                           shape=(batch_size, image_size,
                                                  image_size, 3),
                                           name='images')
        labels_placeholder = tf.placeholder(tf.int64,
                                            shape=(batch_size, 3),
                                            name='labels')

        code_placeholder = tf.placeholder(tf.float32,
                                          shape=(batch_size, class_num, 1, 1),
                                          name='code')

        image_batch = normalized_image(image_placeholder)
        code_batch = code_placeholder

        control_code = tf.tile(code_placeholder,
                               [1, 1, args.embedding_size, 1])
        mask_array = np.ones((1, class_num, args.embedding_size, 1),
                             np.float32)

        # for i in range(class_num):
        #     mask_array[:,i,(args.embedding_size/class_num)*i:(args.embedding_size/class_num)*(i+1)] = 1

        mask_tensor = tf.get_variable('mask',
                                      dtype=tf.float32,
                                      trainable=args.learned_mask,
                                      initializer=tf.constant(mask_array))
        mask_tensor = tf.tile(mask_tensor, [batch_size, 1, 1, 1])
        control_code = tf.tile(code_placeholder,
                               [1, 1, args.embedding_size, 1])

        mask_out = tf.multiply(mask_tensor, control_code)
        mask_out = tf.reduce_sum(mask_out, axis=1)
        mask_out = tf.squeeze(mask_out)
        mask_out = tf.nn.relu(mask_out)

        mask0_array = np.ones((1, class_num, 128, 1), np.float32)
        mask0_tensor = tf.get_variable('mask0',
                                       dtype=tf.float32,
                                       trainable=args.learned_mask,
                                       initializer=tf.constant(mask0_array))
        mask0_tensor = tf.tile(mask0_tensor, [batch_size, 1, 1, 1])
        control0_code = tf.tile(code_placeholder, [1, 1, 128, 1])

        mask0_out = tf.multiply(mask0_tensor, control0_code)
        mask0_out = tf.reduce_sum(mask0_out, axis=1)
        mask0_out = tf.squeeze(mask0_out)
        mask0_out = tf.nn.relu(mask0_out)
        mask0_out = tf.expand_dims(mask0_out, 1)
        mask0_out = tf.expand_dims(mask0_out, 1)

        mask1_array = np.ones((1, class_num, 128, 1), np.float32)
        mask1_tensor = tf.get_variable('mask1',
                                       dtype=tf.float32,
                                       trainable=args.learned_mask,
                                       initializer=tf.constant(mask1_array))
        mask1_tensor = tf.tile(mask1_tensor, [batch_size, 1, 1, 1])
        control1_code = tf.tile(code_placeholder, [1, 1, 128, 1])

        mask1_out = tf.multiply(mask1_tensor, control1_code)
        mask1_out = tf.reduce_sum(mask1_out, axis=1)
        mask1_out = tf.squeeze(mask1_out)
        mask1_out = tf.nn.relu(mask1_out)
        mask1_out = tf.expand_dims(mask1_out, 1)
        mask1_out = tf.expand_dims(mask1_out, 1)

        # Build the inference graph
        prelogits, features = network.inference(
            image_batch,
            mask0_out,
            mask1_out,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)

        embeddings_pre = tf.multiply(mask_out, prelogits)

        embeddings = tf.nn.l2_normalize(embeddings_pre,
                                        1,
                                        1e-10,
                                        name='embeddings')
        anchor_index = list(range(0, batch_size, 3))
        positive_index = list(range(1, batch_size, 3))
        negative_index = list(range(2, batch_size, 3))

        a_indice = tf.constant(np.array(anchor_index))
        p_indice = tf.constant(np.array(positive_index))

        n_indice = tf.constant(np.array(negative_index))

        anchor = tf.gather(embeddings, a_indice)
        positive = tf.gather(embeddings, p_indice)
        negative = tf.gather(embeddings, n_indice)

        triplet_loss = facenet.triplet_loss(anchor, positive, negative,
                                            args.alpha)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([triplet_loss] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables())

        # Create a saver
        trainable_variables = tf.trainable_variables()
        saver = tf.train.Saver(trainable_variables, max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        # Initialize variables
        sess.run(tf.global_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})
        sess.run(tf.local_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})

        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                print('Restoring pretrained model: %s' % args.pretrained_model)
                saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            Accuracy = [0]
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                # epoch = step // args.epoch_size
                # Train for one epoch
                code_list = []
                triplets_list = []
                if (epoch + 1) % args.lr_epoch == 0:
                    args.learning_rate = 0.1 * args.learning_rate
                if args.random_trip:

                    for i in range(class_num):

                        code = np.zeros((batch_size, class_num, 1, 1),
                                        np.float32)
                        _class = i
                        code[:, _class, :, :] = 1

                        Triplets = triplet_random(
                            args, sess, all_image_list[i], all_image_list,
                            epoch, image_placeholder, batch_size_placeholder,
                            learning_rate_placeholder, phase_train_placeholder,
                            global_step, embeddings, total_loss, train_op,
                            summary_op, summary_writer,
                            args.learning_rate_schedule_file,
                            args.embedding_size, anchor, positive, negative,
                            triplet_loss)
                        triplets_list.append(Triplets)
                        code_list.append(code)

                for i in range(class_num):
                    Accuracy = test(
                        args, sess, image_list, epoch, image_placeholder,
                        code_placeholder, batch_size_placeholder,
                        learning_rate_placeholder, phase_train_placeholder,
                        global_step, embeddings, total_loss, train_op,
                        summary_op, summary_writer,
                        args.learning_rate_schedule_file, args.embedding_size,
                        anchor, positive, negative, triplet_loss,
                        triplets_list, Accuracy, features, mask0_out,
                        mask1_out, mask_out, class_name, i)

                # Save variables and the metagraph if it doesn't exist already
    sess.close()
    return model_dir
Esempio n. 13
0
def main(args):
  
    # 模型,定义在inception_resnet_v1 V2里(), --model_def models.inception_resnet_v1  
    network = importlib.import_module(args.model_def)
    image_size = (args.image_size, args.image_size)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    stat_file_name = os.path.join(log_dir, 'stat.h5')

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
        
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    dataset = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        dataset = filter_dataset(dataset, os.path.expanduser(args.filter_filename), 
            args.filter_percentile, args.filter_min_nrof_images_per_class)
        
    if args.validation_set_split_ratio>0.0:
        train_set, val_set = facenet.split_dataset(dataset, args.validation_set_split_ratio, args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
    else:
        train_set, val_set = dataset, []
        
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        # Get a list of image paths and their labels
        # 训练数据
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list)>0, 'The training set should not be empty'
        
        # 测试数据
        val_image_list, val_label_list = facenet.get_image_paths_and_labels(val_set)

        # Create a queue that produces indices into the image_list and label_list 
        # tf.convert_to_tensor用于将不同数据变成张量:比如可以让数组变成张量、也可以让列表变成张量。
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        # 多线程读取数据,shuffle=True表示不是按顺序存储,可以随机获取,并一直循环。
        # https://blog.csdn.net/lyg5623/article/details/69387917
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                             shuffle=True, seed=None, capacity=32)
        
        # epoch 大数据时迭代完一轮时次数,少量数据应该epoch = 全部数据个数/batch
        index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
        
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
        labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
        control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
        
        nrof_preprocess_threads = 4
        input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
                                    dtypes=[tf.string, tf.int32, tf.int32],
                                    shapes=[(1,), (1,), (1,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='enqueue_op')
        image_batch, label_batch = facenet.create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)

        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')
        
        print('Number of classes in training set: %d' % nrof_classes)
        print('Number of examples in training set: %d' % len(image_list))

        print('Number of classes in validation set: %d' % len(val_set))
        print('Number of examples in validation set: %d' % len(val_image_list))
        
        print('Building training graph')
        
        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
            weight_decay=args.weight_decay)
        # 因为模型输出的(bottleneck_layer_size)没有计算最后一层(映射到图片类型),这里计算最后一层             
        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None, 
                weights_initializer=slim.initializers.xavier_initializer(), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                scope='Logits', reuse=False)

        # 按行进行泛化,行的平方求和再求平方根,得到的值按行除每个行的元素,对深度层面泛化? interface里最后一层输出为128个节点,slim.fully_connected(net, bottleneck_layer_size, activation_fn=None, 
				#https://blog.csdn.net/abiggg/article/details/79368982
        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

				# 计算loss函数,当然还有其它训练参数也会加到这里来,通过比训练过程中一个weight加到正则化参数里来tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, weight)
				#  模型中最后会把这个加到优化的loss中来。
				#L= L_softmax + λL_cneter = Softmax(W_i + b_yj) + λ1/2||f(x_i) - c_yj ||_2^2
				
        # Norm for the prelogits
        eps = 1e-4
        prelogits_norm = tf.reduce_mean(tf.norm(tf.abs(prelogits)+eps, ord=args.prelogits_norm_p, axis=1))
        # 模型中最后输出(bottleneck_layer_size每个类型的输出值的个数)的平均值加到正则化loss中,但prelogits_norm_loss_factor貌似为0
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_norm * args.prelogits_norm_loss_factor)

        # 计算中心损失及增加的正则化loss中
        # Add center loss
        prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        
        # Calculate the average cross entropy loss across the batch
        # 计算预测损失,和上面框架的Softmax(W_i + b_yj) 
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch, logits=logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        # 预测损失平均值加到losses变量中
        tf.add_to_collection('losses', cross_entropy_mean)
        
        correct_prediction = tf.cast(tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)), tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)
        
        #计算总损失,cross_entropy_mean + 前面增加的一些正则化损失(包括模型中增加的),通过tf.GraphKeys.REGULARIZATION_LOSSES获取出来
        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
        
        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            nrof_steps = args.max_nrof_epochs*args.epoch_size
            nrof_val_samples = int(math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs))   # Validate every validate_every_n_epochs as well as in the last epoch
            stat = {
                'loss': np.zeros((nrof_steps,), np.float32),
                'center_loss': np.zeros((nrof_steps,), np.float32),
                'reg_loss': np.zeros((nrof_steps,), np.float32),
                'xent_loss': np.zeros((nrof_steps,), np.float32),
                'prelogits_norm': np.zeros((nrof_steps,), np.float32),
                'accuracy': np.zeros((nrof_steps,), np.float32),
                'val_loss': np.zeros((nrof_val_samples,), np.float32),
                'val_xent_loss': np.zeros((nrof_val_samples,), np.float32),
                'val_accuracy': np.zeros((nrof_val_samples,), np.float32),
                'lfw_accuracy': np.zeros((args.max_nrof_epochs,), np.float32),
                'lfw_valrate': np.zeros((args.max_nrof_epochs,), np.float32),
                'learning_rate': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_train': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_validate': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_evaluate': np.zeros((args.max_nrof_epochs,), np.float32),
                'prelogits_hist': np.zeros((args.max_nrof_epochs, 1000), np.float32),
              }
            for epoch in range(1,args.max_nrof_epochs+1):
                step = sess.run(global_step, feed_dict=None)
                # Train for one epoch
                t = time.time()
                # 训练模型
                cont = train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file,
                    stat, cross_entropy_mean, accuracy, learning_rate,
                    prelogits, prelogits_center_loss, args.random_rotate, args.random_crop, args.random_flip, prelogits_norm, args.prelogits_hist_max, args.use_fixed_image_standardization)
                stat['time_train'][epoch-1] = time.time() - t
                
                if not cont:
                    break
                # 在测试数据上计算正确率  
                t = time.time()
                if len(val_image_list)>0 and ((epoch-1) % args.validate_every_n_epochs == args.validate_every_n_epochs-1 or epoch==args.max_nrof_epochs):
                    validate(args, sess, epoch, val_image_list, val_label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
                        phase_train_placeholder, batch_size_placeholder, 
                        stat, total_loss, regularization_losses, cross_entropy_mean, accuracy, args.validate_every_n_epochs, args.use_fixed_image_standardization)
                stat['time_validate'][epoch-1] = time.time() - t

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, epoch)

                # Evaluate on LFW
                t = time.time()
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, 
                        embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer, stat, epoch, 
                        args.lfw_distance_metric, args.lfw_subtract_mean, args.lfw_use_flipped_images, args.use_fixed_image_standardization)
                stat['time_evaluate'][epoch-1] = time.time() - t

                print('Saving statistics')
                with h5py.File(stat_file_name, 'w') as f:
                    for key, value in stat.items():
                        f.create_dataset(key, data=value)
    
    return model_dir
Esempio n. 14
0
def main(args):
    sleep(random.random())
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir, max_items_per_class=99999)
    print('Creating networks and loading parameters')
    total = 0
    for cls in dataset:
        total += len(cls.image_paths)
    print(total)

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

    minsize = 50  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    # Add a random key to the filename to allow alignment using multiple processes
    random_key = np.random.randint(0, high=99999)
    bounding_boxes_filename = os.path.join(
        output_dir, 'bounding_boxes_%05d.txt' % random_key)

    with open(bounding_boxes_filename, "w") as text_file:
        nrof_images_total = 0
        nrof_successfully_aligned = 0
        if args.random_order:
            random.shuffle(dataset)
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            if not os.path.exists(output_class_dir):
                os.makedirs(output_class_dir)
                if args.random_order:
                    random.shuffle(cls.image_paths)
            for image_path in cls.image_paths:
                nrof_images_total += 1
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                output_filename = os.path.join(output_class_dir,
                                               filename + '.jpg')
                #print(image_path)
                if not os.path.exists(output_filename):
                    try:
                        #img = misc.imread(image_path)
                        img = Image.open(image_path)
                        img = np.asarray(img)

                    except (IOError, ValueError, IndexError) as e:
                        errorMessage = '{}: {}'.format(image_path, e)
                        print(errorMessage)
                    else:
                        if img.ndim < 2:
                            print('Not RGB, Unable to align "%s"' % image_path)
                            #text_file.write('%s\n' % (output_filename))
                            continue
                        if img.ndim == 2:
                            img = facenet.to_rgb(img)
                        img = img[:, :, 0:3]
                        #minsize = 0.5*img.shape[0]
                        bounding_boxes, _ = align.detect_face.detect_face(
                            img, minsize, pnet, rnet, onet, threshold, factor)
                        nrof_faces = bounding_boxes.shape[0]
                        if nrof_faces > 0:
                            det = bounding_boxes[:, 0:4]
                            img_size = np.asarray(img.shape)[0:2]
                            if nrof_faces > 1:
                                bounding_box_size = (det[:, 2] - det[:, 0]) * (
                                    det[:, 3] - det[:, 1])
                                img_center = img_size / 2
                                offsets = np.vstack([
                                    (det[:, 0] + det[:, 2]) / 2 -
                                    img_center[1],
                                    (det[:, 1] + det[:, 3]) / 2 - img_center[0]
                                ])
                                offset_dist_squared = np.sum(
                                    np.power(offsets, 2.0), 0)
                                index = np.argmax(
                                    bounding_box_size - offset_dist_squared *
                                    2.0)  # some extra weight on the centering
                                det = det[index, :]
                            det = np.squeeze(det)
                            bb = np.zeros(4, dtype=np.int32)
                            # width
                            margin = np.minimum(det[2] - det[0],
                                                det[3] - det[1]) * args.margin

                            bb[0] = np.maximum(det[0] - margin, 0)
                            bb[1] = np.maximum(det[1] - margin, 0)
                            bb[2] = np.minimum(det[2] + margin, img_size[1])
                            bb[3] = np.minimum(det[3] + margin, img_size[0])
                            # change the bouinding to square but aspect ratio doesn't change
                            diff = (bb[3] - bb[1]) - (bb[2] - bb[0])
                            side = 0
                            if diff > 0:
                                # height greater than width
                                side = bb[3] - bb[1]
                                bb[0] = np.maximum(bb[0] - diff / 2, 0)
                                bb[1] = bb[1]
                            else:
                                # height less than width
                                side = bb[2] - bb[0]
                                bb[0] = bb[0]
                                bb[1] = np.maximum(bb[1] - diff / 2, 0)
                            bb[3] = bb[1] + side
                            bb[2] = bb[0] + side

                            cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                            scaled = cropped
                            nrof_successfully_aligned += 1
                            misc.imsave(output_filename, scaled)
                        else:
                            print('Unable to align "%s"' % image_path)

    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' %
          nrof_successfully_aligned)
def main(args):

    #network = importlib.import_module(args.model_def, 'inception_v3')
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')

    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)

    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    with open(os.path.join(model_dir, 'args.txt'), 'w') as f:
        for arg in vars(args):
            f.write(arg + ' ' + str(getattr(args, arg)) + '\n')

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augument_data(
            image_list, label_list, args.image_size, args.batch_size,
            args.max_nrof_epochs, args.random_rotate, args.random_crop,
            args.random_flip, args.nrof_preprocess_threads)
        print('Total number of classes: %d' % len(train_set))
        print('Total number of examples: %d' % len(image_list))

        # Node for input images
        image_batch = tf.identity(image_batch, name='input')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Placeholder for keep probability
        keep_probability_placeholder = tf.placeholder(tf.float32,
                                                      name='keep_prob')

        # Build the inference graph
        # prelogits = network.inference(image_batch, keep_probability_placeholder,
        #    phase_train=phase_train_placeholder, weight_decay=args.weight_decay)
        batch_norm_params = {
            # Decay for the moving averages
            'decay': 0.995,
            # epsilon to prevent 0s in variance
            'epsilon': 0.001,
            # force in-place updates of mean and variance estimates
            'updates_collections': None,
            # Moving averages ends up in the trainable variables collection
            'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
            # Only update statistics during training mode
            'is_training': phase_train_placeholder
        }

        #prelogits, _ = network.inception_v3(image_batch, num_classes=len(train_set),is_training=True)
        prelogits, _ = network.inference(image_batch,
                                         args.keep_probability,
                                         phase_train=phase_train_placeholder,
                                         weight_decay=args.weight_decay)
        #prelogits = tf.identity(prelogits, name="prelogits")
        bottleneck = _fully_connected(prelogits,
                                      args.embedding_size,
                                      name='embedding')
        logits = _fully_connected(bottleneck, len(train_set), name='logits')
        """
        bottleneck = slim.fully_connected(prelogits, args.embedding_size, activation_fn=None, 
                weights_initializer=tf.truncated_normal_initializer(stddev=0.1), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params,
                scope='Bottleneck', reuse=False)

        logits = slim.fully_connected(bottleneck, len(train_set), activation_fn=None,
                weights_initializer=tf.truncated_normal_initializer(stddev=0.1), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                scope='Logits', reuse=False)

        logits = tf.identity(logits, name="logits")
        """
        # Add DeCov regularization loss
        if args.decov_loss_factor > 0.0:
            logits_decov_loss = facenet.decov_loss(
                logits) * args.decov_loss_factor
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                                 logits_decov_loss)

        # Add center loss
        update_centers = tf.no_op('update_centers')
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, update_centers = facenet.center_loss(
                bottleneck, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES,
                prelogits_center_loss * args.center_loss_factor)

        #embeddings = tf.nn.l2_normalize(bottleneck, 1, 1e-10, name='embeddings')

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch,
            logits=logits,
            name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        """
        # Multi-label loss: sigmoid loss
        sigmoid_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=label_batch, logits=logits, name='sigmoid_loss_per_example')
        sigmoid_loss_mean = tf.reduce_mean(sigmoid_loss, name='sigmoid_loss')
        """
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')
        #total_loss = tf.add_n([cross_entropy_mean], name='total_loss')

        # prediction
        prediction = tf.argmax(logits, axis=1, name='prediction')
        acc = slim.metrics.accuracy(predictions=tf.cast(prediction,
                                                        dtype=tf.int32),
                                    labels=tf.cast(label_batch,
                                                   dtype=tf.int32))

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables())

        # Create a saver
        # save_variables = list(set(tf.all_variables())-set([w])-set([b]))
        save_variables = tf.trainable_variables()
        saver = tf.train.Saver(save_variables, max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if pretrained_model:
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, phase_train_placeholder,
                      learning_rate_placeholder, keep_probability_placeholder,
                      global_step, total_loss, acc, train_op, summary_op,
                      summary_writer, regularization_losses,
                      args.learning_rate_schedule_file, update_centers)

                # Evaluate on LFW
                if args.lfw_dir:
                    start_time = time.time()
                    _, _, accuracy, val, val_std, far = lfw.validate(
                        sess,
                        lfw_paths,
                        actual_issame,
                        args.seed,
                        args.batch_size,
                        image_batch,
                        phase_train_placeholder,
                        keep_probability_placeholder,
                        embeddings,
                        nrof_folds=args.lfw_nrof_folds)
                    print('Accuracy: %1.3f+-%1.3f' %
                          (np.mean(accuracy), np.std(accuracy)))
                    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %
                          (val, val_std, far))
                    lfw_time = time.time() - start_time
                    # Add validation loss and accuracy to summary
                    summary = tf.Summary()
                    #pylint: disable=maybe-no-member
                    summary.value.add(tag='lfw/accuracy',
                                      simple_value=np.mean(accuracy))
                    summary.value.add(tag='lfw/val_rate', simple_value=val)
                    summary.value.add(tag='time/lfw', simple_value=lfw_time)
                    summary_writer.add_summary(summary, step)
                    with open(os.path.join(log_dir, 'lfw_result.txt'),
                              'at') as f:
                        f.write('%d\t%.5f\t%.5f\n' %
                                (step, np.mean(accuracy), val))

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

    return model_dir
Esempio n. 16
0
def main(args):
  
    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:
        print('Pre-trained model: %s' % os.path.expanduser(args.pretrained_model))
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
        
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
        
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,3), name='image_paths')
        labels_placeholder = tf.placeholder(tf.int64, shape=(None,3), name='labels')
        
        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                    dtypes=[tf.string, tf.int64],
                                    shapes=[(3,), (3,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder])
        
        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unpack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_png(file_contents)
                
                if args.random_crop:
                    image = tf.random_crop(image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)
    
                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])
    
        image_batch, labels_batch = tf.train.batch_join(
            images_and_labels, batch_size=batch_size_placeholder, 
            shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)

        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=phase_train_placeholder, weight_decay=args.weight_decay)
        pre_embeddings = slim.fully_connected(prelogits, args.embedding_size, activation_fn=None, scope='Embeddings', reuse=False)
        #embedding_size = 1792

        embeddings = tf.nn.l2_normalize(pre_embeddings, 1, 1e-10, name='embeddings')
        # Split embeddings into anchor, positive and negative and calculate triplet loss
        anchor, positive, negative = tf.unpack(tf.reshape(embeddings, [-1,3,args.embedding_size]), 3, 1)
        triplet_loss = facenet.triplet_loss(anchor, positive, negative, args.alpha)
        
        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([triplet_loss] + regularization_losses, name='total_loss')

        # Create list with variables to restore
        restore_vars = []
        update_gradient_vars = []
        if args.pretrained_model:
            update_gradient_vars = tf.global_variables()
            for var in tf.global_variables():
                if not 'Embeddings/' in var.op.name:
                    restore_vars.append(var)
                #else:
                    #update_gradient_vars.append(var)
        else:
            restore_vars = tf.global_variables()
            update_gradient_vars = tf.global_variables()

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, update_gradient_vars)
        
        # Create a saver
        restore_saver = tf.train.Saver(restore_vars)
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))        

        # Initialize variables
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                print('Restoring pretrained model: %s' % args.pretrained_model)
                restore_saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, train_set, epoch, image_paths_placeholder, labels_placeholder, labels_batch,
                    batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, input_queue, global_step, 
                    embeddings, total_loss, train_op, summary_op, summary_writer, args.learning_rate_schedule_file,
                    args.embedding_size, anchor, positive, negative, triplet_loss)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, lfw_paths, embeddings, labels_batch, image_paths_placeholder, labels_placeholder, 
                            batch_size_placeholder, learning_rate_placeholder, phase_train_placeholder, enqueue_op, actual_issame, args.batch_size, 
                            args.seed, args.lfw_nrof_folds, log_dir, step, summary_writer, args.embedding_size)

    return model_dir
Esempio n. 17
0
def main(args):
  
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)
    
    print('Creating networks and loading parameters')
    
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, '../../data/')
    
    minsize = 20 # minimum size of face
    threshold = [ 0.6, 0.7, 0.7 ]  # three steps's threshold
    factor = 0.709 # scale factor

    # Add a random key to the filename to allow alignment using multiple processes
    random_key = np.random.randint(0, high=99999)
    bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
    
    with open(bounding_boxes_filename, "w") as text_file:
        nrof_images_total = 0
        nrof_successfully_aligned = 0
        if args.random_order:
            random.shuffle(dataset)
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            if not os.path.exists(output_class_dir):
                os.makedirs(output_class_dir)
                if args.random_order:
                    random.shuffle(cls.image_paths)
            for image_path in cls.image_paths:
                nrof_images_total += 1
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                output_filename = os.path.join(output_class_dir, filename+'.png')
                print(image_path)
                if not os.path.exists(output_filename):
                    try:
                        img = misc.imread(image_path)
                    except (IOError, ValueError, IndexError) as e:
                        errorMessage = '{}: {}'.format(image_path, e)
                        print(errorMessage)
                    else:
                        if img.ndim<2:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))
                            continue
                        if img.ndim == 2:
                            img = facenet.to_rgb(img)
                        img = img[:,:,0:3]
    
                        bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
                        nrof_faces = bounding_boxes.shape[0]
                        if nrof_faces>0:
                            det = bounding_boxes[:,0:4]
                            img_size = np.asarray(img.shape)[0:2]
                            if nrof_faces>1:
                                bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
                                img_center = img_size / 2
                                offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
                                offset_dist_squared = np.sum(np.power(offsets,2.0),0)
                                index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
                                det = det[index,:]
                            det = np.squeeze(det)
                            bb = np.zeros(4, dtype=np.int32)
                            bb[0] = np.maximum(det[0]-args.margin/2, 0)
                            bb[1] = np.maximum(det[1]-args.margin/2, 0)
                            bb[2] = np.minimum(det[2]+args.margin/2, img_size[1])
                            bb[3] = np.minimum(det[3]+args.margin/2, img_size[0])
                            cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
                            scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
                            nrof_successfully_aligned += 1
                            misc.imsave(output_filename, scaled)
                            text_file.write('%s %d %d %d %d\n' % (output_filename, bb[0], bb[1], bb[2], bb[3]))
                        else:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))
                            
    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
Esempio n. 18
0
def generate_input_fn(set, mode=tf.estimator.ModeKeys.EVAL, batch_size=1):
    def parse_function(filename, label):
        image_string = tf.read_file(filename)
        # Don't use tf.image.decode_image, or the output shape will be undefined
        image = tf.image.decode_jpeg(image_string, channels=3)
        # This will convert to float values in [0, 1]
        image = tf.image.convert_image_dtype(image, tf.float32)
        image = tf.image.resize_images(image, [args.image_size, args.image_size])
        return image, label

    def _input_fn():
        is_training = (mode == tf.estimator.ModeKeys.TRAIN)

        image_list, label_list = facenet.get_image_paths_and_labels(set)
        assert len(image_list)>0, 'The training set should not be empty'
        dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
        dataset = dataset.shuffle(len(filenames))
        dataset = dataset.map(parse_function, num_parallel_calls=args.nrof_preprocess_threads)

        if is_training:

        dataset = dataset.map(train_preprocess, num_parallel_calls=args.nrof_preprocess_threads)
        dataset = dataset.batch(args.batch_size)
        dataset = dataset.prefetch(args.prefetch_value)
        features = {'images': images}
        return features, label_list
  
  return _input_fn

def get_feature_columns():
    feature_columns = {
    'images': tf.feature_column.numeric_column('images', (IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH)),
    }
    return feature_columns

def model_fn(features, labels, mode, params):
    # Create the input layers from the features                                                                                               
    feature_columns = list(get_feature_columns().values())

    images = tf.feature_column.input_layer(
    features=features, feature_columns=feature_columns)

    images = tf.reshape(
    images, shape=(-1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH))

    # Calculate logits through CNN  
    network = importlib.import_module(args.model_def)    

    learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
    batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
    phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
    image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
    labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
    control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')

    # Build the inference graph
    prelogits, _ = network.inference(image_batch, args.keep_probability, 
        phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
        weight_decay=args.weight_decay)
    logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None, 
            weights_initializer=slim.initializers.xavier_initializer(), 
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            scope='Logits', reuse=False)

    embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

    # Build a Graph that trains the model with one batch of examples and updates the model parameters
    train_op = facenet.train(total_loss, global_step, args.optimizer, 
        learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
    
    # Create a saver
    saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    if mode in (tf.estimator.ModeKeys.PREDICT, tf.estimator.ModeKeys.EVAL):
        predicted_indices = tf.argmax(input=logits, axis=1)
        probabilities = tf.nn.softmax(logits, name='softmax_tensor')

    if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
        global_step = tf.train.get_or_create_global_step()
        tf.logging.info("Global Step {}".format(global_step))
        # Norm for the prelogits
        eps = 1e-4
        prelogits_norm = tf.reduce_mean(tf.norm(tf.abs(prelogits)+eps, ord=args.prelogits_norm_p, axis=1))
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_norm * args.prelogits_norm_loss_factor)

        # Add center loss
        prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(params['learning_rate'], global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch, logits=logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)
        
        correct_prediction = tf.cast(tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)), tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)
        
        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
        label_indices = tf.argmax(input=labels, axis=1)
        loss = tf.losses.softmax_cross_entropy(
            onehot_labels=labels, logits=logits)
        tf.summary.scalar('cross_entropy', loss)

    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            'classes': predicted_indices,
            'probabilities': probabilities
        }
        export_outputs = {
            'predictions': tf.estimator.export.PredictOutput(predictions)
        }
        return tf.estimator.EstimatorSpec(
            mode, predictions=predictions, export_outputs=export_outputs)

    if mode == tf.estimator.ModeKeys.TRAIN:
         # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
        
        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
        
        log_dir=params['log_dir']
        stat_file_name=params['stat_file_name']

        summary.value.add(tag='time/total', simple_value=train_time)

        summary_hook = tf.train.SummarySaverHook(
            SAVE_EVERY_N_STEPS,
            output_dir=log_dir,
            summary_op=tf.summary.merge_all())
        return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train, training_hooks=[summary_hook])

    if mode == tf.estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy': tf.metrics.accuracy(label_indices, predicted_indices)
        }
        return tf.estimator.EstimatorSpec(
            mode, loss=loss, eval_metric_ops=eval_metric_ops)

def main(args):  
    network = importlib.import_module(args.model_def)

    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
        if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
            os.makedirs(log_dir)

    stat_file_name = os.path.join(log_dir, 'stat.h5')

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
        
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    dataset = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        dataset = filter_dataset(dataset, os.path.expanduser(args.filter_filename), 
            args.filter_percentile, args.filter_min_nrof_images_per_class)
        
    if args.validation_set_split_ratio>0.0:
        train_set, val_set = facenet.split_dataset(dataset, args.validation_set_split_ratio, args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
    else:
        train_set, val_set = dataset, []
        
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)

    print('Number of classes in training set: %d' % nrof_classes)
    print('Number of examples in training set: %d' % len(image_list))

    print('Number of classes in validation set: %d' % len(val_set))
    print('Number of examples in validation set: %d' % len(val_image_list))

    params={'log_dir':log_dir,'stat_file_name':stat_file_name,'stat':stat}
        
    with tf.Graph().as_default():
        # trainset, valset

        # Create a queue that produces indices into the image_list and label_list 
       
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
        labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
        control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
 
        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction,allow_growth=True)
        config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)
        config=tf.estimator.RunConfig(session_config=config,save_checkpoints_steps=args.save_checkpoints_steps,
        tf_random_seed=args.seed,
        model_dir=args.model_dir)

        estimator = tf.estimator.Estimator(
                model_fn=model_fn, 
                model_dir=FLAGS.model_dir,
                config=tf.contrib.learn.RunConfig(session_config=config),params=params)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            nrof_steps = args.max_nrof_epochs*args.epoch_size
            nrof_val_samples = int(math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs))   # Validate every validate_every_n_epochs as well as in the last epoch
            stat = {
                'loss': np.zeros((nrof_steps,), np.float32),
                'center_loss': np.zeros((nrof_steps,), np.float32),
                'reg_loss': np.zeros((nrof_steps,), np.float32),
                'xent_loss': np.zeros((nrof_steps,), np.float32),
                'prelogits_norm': np.zeros((nrof_steps,), np.float32),
                'accuracy': np.zeros((nrof_steps,), np.float32),
                'val_loss': np.zeros((nrof_val_samples,), np.float32),
                'val_xent_loss': np.zeros((nrof_val_samples,), np.float32),
                'val_accuracy': np.zeros((nrof_val_samples,), np.float32),
                'lfw_accuracy': np.zeros((args.max_nrof_epochs,), np.float32),
                'lfw_valrate': np.zeros((args.max_nrof_epochs,), np.float32),
                'learning_rate': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_train': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_validate': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_evaluate': np.zeros((args.max_nrof_epochs,), np.float32),
                'prelogits_hist': np.zeros((args.max_nrof_epochs, 1000), np.float32),
              }
            for epoch in range(1,args.max_nrof_epochs+1):
                step = sess.run(global_step, feed_dict=None)
                # Train for one epoch
                t = time.time()
                cont = train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file,
                    stat, cross_entropy_mean, accuracy, learning_rate,
                    prelogits, prelogits_center_loss, args.random_rotate, args.random_crop, args.random_flip, prelogits_norm, args.prelogits_hist_max, args.use_fixed_image_standardization)
                stat['time_train'][epoch-1] = time.time() - t
                
                if not cont:
                    break
                  
                t = time.time()
                if len(val_image_list)>0 and ((epoch-1) % args.validate_every_n_epochs == args.validate_every_n_epochs-1 or epoch==args.max_nrof_epochs):
                    validate(args, sess, epoch, val_image_list, val_label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
                        phase_train_placeholder, batch_size_placeholder, 
                        stat, total_loss, regularization_losses, cross_entropy_mean, accuracy, args.validate_every_n_epochs, args.use_fixed_image_standardization)
                stat['time_validate'][epoch-1] = time.time() - t

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, epoch)

                # Evaluate on LFW
                t = time.time()
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, 
                        embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer, stat, epoch, 
                        args.lfw_distance_metric, args.lfw_subtract_mean, args.lfw_use_flipped_images, args.use_fixed_image_standardization)
                stat['time_evaluate'][epoch-1] = time.time() - t

                print('Saving statistics')
                with h5py.File(stat_file_name, 'w') as f:
                    for key, value in stat.iteritems():
                        f.create_dataset(key, data=value)
    
    return model_dir
Esempio n. 19
0
def main(args):
    funnel_cmd = 'funnelReal'
    funnel_model = 'people.train'

    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the output directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)
    np.random.shuffle(dataset)
    # Scale the image such that the face fills the frame when cropped to crop_size
    #scale = float(args.face_size) / args.image_size
    with TemporaryDirectory() as tmp_dir:
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            tmp_output_class_dir = os.path.join(tmp_dir, cls.name)
            if not os.path.exists(output_class_dir) and not os.path.exists(
                    tmp_output_class_dir):
                print('Aligning class %s:' % cls.name)
                tmp_filenames = []
                if not os.path.exists(tmp_output_class_dir):
                    os.makedirs(tmp_output_class_dir)
                input_list_filename = os.path.join(tmp_dir, 'input_list.txt')
                output_list_filename = os.path.join(tmp_dir, 'output_list.txt')
                input_file = open(input_list_filename, 'w')
                output_file = open(output_list_filename, 'w')
                for image_path in cls.image_paths:
                    filename = os.path.split(image_path)[1]
                    input_file.write(image_path + '\n')
                    output_filename = os.path.join(tmp_output_class_dir,
                                                   filename)
                    output_file.write(output_filename + '\n')
                    tmp_filenames.append(output_filename)
                input_file.close()
                output_file.close()
                cmd = args.funnel_dir+funnel_cmd + ' ' + input_list_filename + \
                    ' ' + args.funnel_dir+funnel_model + ' ' + output_list_filename
                subprocess.call(cmd, shell=True)

                # Resize and crop images
                if not os.path.exists(output_class_dir):
                    os.makedirs(output_class_dir)
                scale = 1.0
                for tmp_filename in tmp_filenames:
                    img = misc.imread(tmp_filename)
                    img_scale = misc.imresize(img, scale)
                    sz1 = img.shape[1] / 2
                    sz2 = args.image_size / 2
                    img_crop = img_scale[int(sz1 - sz2):int(sz1 + sz2),
                                         int(sz1 - sz2):int(sz1 + sz2), :]
                    filename = os.path.splitext(
                        os.path.split(tmp_filename)[1])[0]
                    output_filename = os.path.join(output_class_dir,
                                                   filename + '.png')
                    print('Saving image %s' % output_filename)
                    misc.imsave(output_filename, img_crop)

                # Remove tmp directory with images
                shutil.rmtree(tmp_output_class_dir)
def main(args):

    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    if not args.no_store_revision_info:
        src_path, _ = os.path.split(os.path.realpath(__file__))
        facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)

    logging.info('Model directory: %s' % model_dir)
    logging.info('Log directory: %s' % log_dir)
    logging.info('Train set size:%d' % len(train_set))
    if args.pretrained_model:
        logging.info('Pre-trained model: %s' %
                     os.path.expanduser(args.pretrained_model))

    if args.lfw_dir:
        logging.info('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        # pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        # lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
        lfw_paths, actual_issame, _ = lfw.get_paths(args.lfw_dir,
                                                    args.labels_file)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 3),
                                                 name='image_paths')
        labels_placeholder = tf.placeholder(tf.int64,
                                            shape=(None, 3),
                                            name='labels')

        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                              dtypes=[tf.string, tf.int64],
                                              shapes=[(3, ), (3, )],
                                              shared_name=None,
                                              name=None)
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder])

        nrof_preprocess_threads = 4
        images_and_labels = []

        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            # _sess = tf.Session()
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_jpeg(file_contents)
                # logging.info('111111111111')
                # _filename = _sess.run([filename])
                # logging.info('222222222222')
                image = tf.image.resize_images(
                    image, [args.image_size, args.image_size])
                # logging.info(image.shape)
                # if tf.equal(image.shape[2], 1):
                #     logging.info('msg' * 10)
                # else:
                # if _image[-1] == 1:
                # image = tf.image.grayscale_to_rgb(image)
                try:
                    image = porcessor.preprocess_image(image,
                                                       args.image_size,
                                                       args.image_size,
                                                       is_training=True)
                except Exception as e:
                    logging.error(e)
                    continue
                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])

        image_batch, labels_batch = tf.train.batch_join(
            images_and_labels,
            batch_size=batch_size_placeholder,
            shapes=[(args.image_size, args.image_size, 3), ()],
            enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)

        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')

        batch_norm_params = {
            # Decay for the moving averages
            'decay': 0.995,
            # epsilon to prevent 0s in variance
            'epsilon': 0.001,
            # force in-place updates of mean and variance estimates
            'updates_collections': None,
            # Moving averages ends up in the trainable variables collection
            'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
            # Only update statistics during training mode
            'is_training': phase_train_placeholder
        }
        # Build the inference graph
        prelogits, _ = network.inference(image_batch,
                                         args.keep_probability,
                                         phase_train=phase_train_placeholder,
                                         weight_decay=args.weight_decay)
        pre_embeddings = slim.fully_connected(
            prelogits,
            args.embedding_size,
            activation_fn=None,
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params,
            scope='Bottleneck',
            reuse=False)

        embeddings = tf.nn.l2_normalize(pre_embeddings,
                                        1,
                                        1e-10,
                                        name='embeddings')
        # Split embeddings into anchor, positive and negative and calculate triplet loss
        logging.info('embeddings shape:%s' % embeddings.shape)
        anchor, positive, negative = tf.unstack(
            tf.reshape(embeddings, [-1, 3, args.embedding_size]), 3, 1)
        triplet_loss = facenet.triplet_loss(anchor, positive, negative,
                                            args.alpha)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([triplet_loss] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables())

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=100)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        # Initialize variables
        sess.run(tf.global_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})
        sess.run(tf.local_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})

        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                logging.info('Restoring pretrained model: %s' %
                             args.pretrained_model)
                saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, train_set, epoch, image_paths_placeholder,
                      labels_placeholder, labels_batch, batch_size_placeholder,
                      learning_rate_placeholder, phase_train_placeholder,
                      enqueue_op, input_queue, global_step, embeddings,
                      total_loss, train_op, summary_op, summary_writer,
                      args.learning_rate_schedule_file, args.embedding_size,
                      anchor, positive, negative, triplet_loss)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, lfw_paths, embeddings, labels_batch,
                             image_paths_placeholder, labels_placeholder,
                             batch_size_placeholder, learning_rate_placeholder,
                             phase_train_placeholder, enqueue_op,
                             actual_issame, args.batch_size,
                             args.lfw_nrof_folds, log_dir, step,
                             summary_writer, args.embedding_size)

    sess.close()
    return model_dir
Esempio n. 21
0
def main(args):
    #define three networks
    network_G = importlib.import_module(args.model_def)              #import G Network  
    network_D = importlib.import_module(args.discriminator_def)      #import D Network (connected with G)
    network_F = importlib.import_module(args.Net_def)                #F network(same with G)

    #model dir
    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')      #model name (named by time)1
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):                                   # Create the log directory if it doesn't exist 1
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):                                 # Create the model directory if it doesn't exist 1 
        os.makedirs(model_dir)

    # Write arguments to a text file 1
    facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))    #mark arguments
        
    # Store some git revision info in a text file in the log directory 1
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    train_set = facenet.get_dataset(args.data_dir)            #get train dataset
    if args.filter_filename:                           #not used
        train_set = filter_dataset(train_set, os.path.expanduser(args.filter_filename), 
            args.filter_percentile, args.filter_min_nrof_images_per_class)
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)       #facenet model
        print('Pre-trained model: %s' % pretrained_model)
   
    # not used here
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)



    # copy from faceNet
    def get_image_paths_and_labels(dataset):
        image_paths_flat = []
        labels_flat = []
        for i in range(len(dataset)):
            image_paths_flat += dataset[i].image_paths
            #print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')#++++++++++++++++++++++++++++++++
            #print(dataset[i].image_paths)#++++++++++++++++++++++++++++++++
            labels_flat += [i] * len(dataset[i].image_paths)#labels_flat=age??????????????????????????????????????
        return image_paths_flat, labels_flat
    
    
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        # Get a list of image paths and their labels
        image_list, label_list = get_image_paths_and_labels(train_set)   #[path, path, ..., path], [1,1,2,2,2,3,3,4,...,9]
        assert len(image_list)>0, 'The dataset should not be empty'

        # Create a queue that produces indices into the image_list and label_list 
        #labels and images into queue
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        
        range_size = array_ops.shape(labels)[0]
        
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                             shuffle=True, seed=None, capacity=32) 
        index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
        
        #placeholder
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')    #connected to validate
        
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')

        image_embeddings_placeholder = tf.placeholder(tf.float32, shape=(None,1,args.embedding_size), name='image_embs')

        labels_placeholder = tf.placeholder(tf.int64, shape=(None,1), name='labels')
        
        input_queue = data_flow_ops.FIFOQueue(capacity=100000, dtypes=[tf.string, tf.int64,tf.float32],
                                    shapes=[(1,), (1,),(1,args.embedding_size,)], shared_name=None, name=None)   #(image path, labels, embedding)
        
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder,image_embeddings_placeholder], name='enqueue_op')
        
        nrof_preprocess_threads = 4
        images_and_labels = []

        #preprocess some images to extend database

        for _ in range(nrof_preprocess_threads):
            filenames, label ,image_embeddings= input_queue.dequeue()    #get (image path, labels, embedding) from queue
            print('filenames.shape, label.shape, image_embeddings.shape:')
            print(filenames.shape,label.shape,image_embeddings.shape)
            #print(filenames[1,1])  #queueeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeecheck
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents, channels=3)  #BGR, , Decode .PNG read an image from filename=image path
                
                if args.random_rotate:
                    image = tf.py_func(facenet.random_rotate_image, [image], tf.uint8)
                if args.random_crop:
                    image = tf.random_crop(image, [args.image_size, args.image_size, 3])
                else:  #run here
                    image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)
               
                
                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image)) #add into image set named images
            #print(len(images))                                                  #1?????
            images_and_labels.append([images, label,image_embeddings])

        #from queue into batch
        image_batch, label_batch ,embeddings_batch= tf.train.batch_join(
            images_and_labels, batch_size=batch_size_placeholder, 
            shapes=[(args.image_size, args.image_size, 3), (),(128)], enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        print('Shape of embeddings_batch:')
        print(embeddings_batch.shape)
        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')
        embeddings_batch = tf.identity(embeddings_batch, 'emb_batch')
        
        print('Total number of classes:%d' %nrof_classes)
        print('Length of image list:%d' % len(image_list))
        print('Building training graph ...')

        # Build the inference graph, from inception resnet.py
        prelogits, ep = network_G.inference(image_batch, args.keep_probability,
            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
            weight_decay=args.weight_decay)
        prelogits2, _ = network_D.inference(prelogits,ep, args.keep_probability,
            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
            weight_decay=args.weight_decay)
        prelogits3, ep2 = network_F.inference(image_batch, args.keep_probability,
            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(prelogits2, len(train_set), activation_fn=None, 
                weights_initializer=tf.truncated_normal_initializer(stddev=0.1), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                scope='DiscriminatorLogits', reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')     #G feature
        embeddings2 = tf.nn.l2_normalize(prelogits3, 1, 1e-10, name='embeddings2')    #F feature
        labels = tf.nn.l2_normalize(prelogits2, 1, 1e-10, name='labels')              #D decision
        print('embeddingsshape:',embeddings.shape,embeddings_batch.shape)

        emb_loss = args.beta * tf.reduce_mean(tf.square(embeddings - embeddings_batch)) #!!!!!!!!!!!!!!!!!!!!!!!emb_loss=pretrained feature-F feature
        #print('emb_loss')
        #print(np.sum(emb_loss))

        # Add center loss for D
        center_loss_all=0.0
        if args.center_loss_factor>0.0:
            prelogits_center_loss, _ = facenet.center_loss(prelogits2, label_batch, args.center_loss_alfa, nrof_classes)
            center_loss_all = prelogits_center_loss * args.center_loss_factor
            # Do not add the center loss to regularization
            #regularization_losses = tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch 1
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch, logits=logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)
        
        # Calculate the reg losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

        #********************************************consistency_loss************************************************************************
        consistency_loss_G = args.betaG * tf.reduce_mean(tf.square(embeddings - embeddings2))
        consistency_loss_F = args.betaF * tf.reduce_mean(tf.square(embeddings - embeddings2))  #different coefficients

        #*******************************************unsupervised id loss*********************************************************************
        #anchor, positive, negative = tf.unstack(tf.reshape(embeddings, [-1,3,args.embedding_size]), 3, 1)
        anchor = embeddings
        positive = embeddings2
        negative = embeddings
        id_loss = args.betaI * identity_loss(anchor, positive, negative, args.alpha, args.batch_size)

        #************************************************************************************************************************************


        # losses of three networks
        total_loss_G = tf.add_n([-cross_entropy_mean] +[-center_loss_all]+ [emb_loss]+[consistency_loss_G]+ regularization_losses, name='total_loss_G')   # loss of G_network, to decrease the discriminant
                                                                                                                                        #  of age while not change embeddings
        total_loss_D = tf.add_n([cross_entropy_mean] +[center_loss_all]+ regularization_losses, name='total_loss_D') 
        total_loss_F = tf.add_n([id_loss]+ [consistency_loss_F]+ regularization_losses, name='total_loss_F') 


        #optimizer###########################################################################################################################
        optimizer=args.optimizer
        loss_averages_op1 = facenet._add_loss_summaries(total_loss_D)
        loss_averages_op2 = facenet._add_loss_summaries(total_loss_G)
        loss_averages_op3 = facenet._add_loss_summaries(total_loss_F)  #just add an op?

        learning_rate2=learning_rate
        learning_rate3=learning_rate
        if optimizer=='ADAGRAD':
            opt1 = tf.train.AdagradOptimizer(learning_rate)
            opt2 = tf.train.AdagradOptimizer(learning_rate2)
            opt3 = tf.train.AdagradOptimizer(learning_rate3)
        elif optimizer=='ADADELTA':
            opt1 = tf.train.AdadeltaOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
            opt2 = tf.train.AdadeltaOptimizer(learning_rate2, rho=0.9, epsilon=1e-6)
            opt3 = tf.train.AdadeltaOptimizer(learning_rate3, rho=0.9, epsilon=1e-6)
        elif optimizer=='ADAM':
            opt1 = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
            opt2 = tf.train.AdamOptimizer(learning_rate2, beta1=0.9, beta2=0.999, epsilon=0.1)
            opt3 = tf.train.AdadeltaOptimizer(learning_rate3, beta1=0.9, beta2=0.999, epsilon=0.1)
        elif optimizer=='RMSPROP':
            opt1 = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
            opt2 = tf.train.RMSPropOptimizer(learning_rate2, decay=0.9, momentum=0.9, epsilon=1.0)
            opt3 = tf.train.AdadeltaOptimizer(learning_rate3, decay=0.9, momentum=0.9, epsilon=1.0)
        elif optimizer=='MOM':
            opt1 = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
            opt2 = tf.train.MomentumOptimizer(learning_rate2, 0.9, use_nesterov=True)
            opt3 = tf.train.AdadeltaOptimizer(learning_rate3, 0.9, use_nesterov=True)
        else:
            raise ValueError('Invalid optimization algorithm2')
        t_vars=tf.trainable_variables()

        varlog=open('tvar.txt','w')
        for var in t_vars:
            varlog.write(var.name+'\n')
        varlog.close()

        #variables
        d_vars=[var for var in t_vars if 'Discriminator' in var.name]
        g_vars=[var for var in t_vars if 'InceptionResnetV1' in var.name]    #Generator? how to 
        f_vars=[var for var in t_vars if 'InceptionResnetV2' in var.name]    #Generator? how to
        #true loss for age discriminator and false loss for generator
        dt_grads = opt1.compute_gradients(total_loss_D, d_vars)
        df_grads = opt2.compute_gradients(total_loss_G, g_vars)
        di_grads = opt3.compute_gradients(total_loss_F, f_vars)
        dt_train_op = opt1.apply_gradients(dt_grads, global_step=global_step)
        df_train_op = opt2.apply_gradients(df_grads, global_step=global_step)
        di_train_op = opt3.apply_gradients(di_grads, global_step=global_step)
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)


        # Add histograms for gradients. copy from train2
        
        for grad, var in dt_grads:
            if grad is not None:
                tf.summary.histogram(var.op.name + '/gradients', grad)
        for grad, var in df_grads:
            if grad is not None:
                tf.summary.histogram(var.op.name + '/gradients', grad)
        for grad, var in di_grads:
            if grad is not None:
                tf.summary.histogram(var.op.name + '/gradients', grad)
  
        # Track the moving averages of all trainable variables.
        variable_averages = tf.train.ExponentialMovingAverage(
            args.moving_average_decay, global_step)
        variables_averages_op = variable_averages.apply(tf.trainable_variables())
  
        with tf.control_dependencies([dt_train_op,df_train_op, variables_averages_op]):
            train_op = tf.no_op(name='train')
        
        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3) # save all variables++++++++++++++++++++++++++++++++++++0
        saver_G = tf.train.Saver(g_vars, max_to_keep=3)   #only save G variables0
        saver_F = tf.train.Saver(f_vars, max_to_keep=3)   #only save G variables0

        # Build the summary operation based on the TF collection of Summaries. 1
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph. 1
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)


        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver_G.restore(sess, pretrained_model)

            print('Running pretrain embeddings')#see embs**********************************
            len_all=len(label_list)  #*****This is ok 397459
            #print('len_all=')
            #print(len_all)
            emb_list=np.zeros((len_all,args.embedding_size))
            print(len_all//(args.batch_size*args.epoch_size)+1)

            #obtain pretain feature
            for ii in range(0,len_all//(args.batch_size*args.epoch_size)+1):
                emb_list=first_train(args, sess, 0, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    image_embeddings_placeholder,emb_list,embeddings,label_batch,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step, 
                    total_loss_D, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file)


            for ii in range(0,len_all):
                #print('pretrain embeddings')
                #print(np.sum(emb_list[ii,:]),file=f)    #cannot be printed,why~~~~~~~~~~~~~~~~~~~~~~~~~~~~~!
                print(emb_list[ii,:],file=f) 

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size//2
                # Train for one epoch
                train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    image_embeddings_placeholder,emb_list,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step, 
                    total_loss_D, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file, anchor, positive, negative, id_loss, consistency_loss_G, emb_loss)

                # Save variables and the metagraph if it doesn't exist already 1
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

                # Evaluate on LFW 1
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, 
                        embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer)
    return model_dir
Esempio n. 22
0
def main(args):
    project_dir = os.path.dirname(os.getcwd())
    network = importlib.import_module(args.model_def)

    with open(join(project_dir, 'config.yaml'), 'r') as f:
        cfg = yaml.load(f)

    if cfg['specs']['set_gpu']:
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['base_conf']['gpu_num'])

    subdir = '%s_center_loss_factor_%1.2f' % (args.data_dir, args.center_loss_factor)

    # test = os.path.expanduser(args.logs_base_dir)
    log_dir = os.path.join(project_dir, 'fine_tuning_process', 'logs', subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(project_dir, 'fine_tuning_process', 'models', subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    data_dir = os.path.join(project_dir, 'fine_tuning_process', 'data', args.data_dir, 'train')

    train_set = facenet.get_dataset(data_dir)
    if args.filter_filename:
        train_set = filter_dataset(train_set, os.path.expanduser(args.filter_filename),
                                   args.filter_percentile, args.filter_min_nrof_images_per_class)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.join(project_dir, 'fine_tuning_process', 'models', args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    lfw_dir = os.path.join(project_dir, 'fine_tuning_process', 'data', args.data_dir, 'test')
    print('LFW directory: %s' % lfw_dir)
    # Read the file containing the pairs used for testing
    lfw_pairs = os.path.join(project_dir, 'fine_tuning_process', 'data', args.data_dir, 'pairs.txt')
    pairs = lfw.read_pairs(lfw_pairs)
    # Get the paths for the corresponding images
    lfw_paths, actual_issame = lfw.get_paths_personal(lfw_dir, pairs)

    data_paths = tools.get_format_file(data_dir, 2, r'.+\.jpeg$')

    meta_data_path = os.path.join(model_dir, 'metadata.tsv')
    with open(meta_data_path, 'w') as f:
        f.write("Index\tLabel\n")
        for d in data_paths:
            tmp = os.path.split(d)
            t = os.path.split(tmp[0])
            f.write("%s\t%s\n" % (tmp[1], t[1]))

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # get soft labels
        with open(join(data_dir, 'soft_label.pk'), 'rb') as f:
            confidence_score = pickle.load(f)
        image_list, soft_labels_list = facenet.get_image_paths_and_soft_labels(train_set, confidence_score)
        soft_labels_array = np.array(soft_labels_list)
        soft_labels = ops.convert_to_tensor(soft_labels_array, dtype=tf.float32)

        assert len(image_list) > 0, 'The dataset should not be empty'

        # Create a queue that produces indices into the image_list and label_list
        range_size = array_ops.shape(soft_labels)[0]
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                                                    shuffle=True, seed=None, capacity=32)

        index_dequeue_op = index_queue.dequeue_many(args.batch_size * args.epoch_size, 'index_dequeue')

        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        image_paths_placeholder = tf.placeholder(tf.string, shape=(None, 1), name='image_paths')

        soft_labels_placeholder = tf.placeholder(tf.float32, shape=(None, nrof_classes), name='soft_labels')

        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                              dtypes=[tf.string, tf.float32],
                                              shapes=[(1,), (nrof_classes,)],
                                              shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, soft_labels_placeholder],
                                              name='enqueue_op')

        nrof_preprocess_threads = 4
        images_and_softlabels = []
        for _ in range(nrof_preprocess_threads):
            filenames, soft_labels = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents, channels=3)
                if args.random_rotate:
                    image = tf.py_func(facenet.random_rotate_image, [image], tf.uint8)
                if args.random_crop:
                    image = tf.random_crop(image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)

                # pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))

            images_and_softlabels.append([images, soft_labels])

        image_batch, soft_label_batch = tf.train.batch_join(
            images_and_softlabels, batch_size=batch_size_placeholder)
        image_batch = tf.squeeze(image_batch, 1)

        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        soft_label_batch = tf.identity(soft_label_batch, 'soft_label_batch')

        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))

        print('Building training graph')

        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability,
                                         phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size,
                                         weight_decay=args.weight_decay)
        # fine_tuning = slim.fully_connected(prelogits, args.embedding_size, activation_fn=None,
        #                            scope='FineTuning', reuse=False, trainable=True)

        logits = slim.fully_connected(prelogits, nrof_classes, activation_fn=None,
                                      weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                      weights_regularizer=slim.l2_regularizer(args.weight_decay),
                                      scope='Logits', reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Add center loss
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, _ = facenet.fuzzy_center_loss(prelogits, soft_label_batch,
                                                                 args.center_loss_alfa, args.fuzzier, nrof_classes)
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)
            tf.summary.scalar('prelogits_center_loss', prelogits_center_loss)


        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
                                                   args.learning_rate_decay_epochs * args.epoch_size,
                                                   args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
            labels=soft_label_batch, logits=logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)

        # Create a saver
        all_vars = tf.trainable_variables()
        var_to_restore = [v for v in all_vars if not v.name.startswith('Logits')]
        saver = tf.train.Saver(var_to_restore, max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():
            if args.pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)
                # result = sess.graph.get_tensor_by_name("InceptionResnetV1/Bottleneck/weights:0")
                # pre = sess.graph.get_tensor_by_name("InceptionResnetV1/Block8/Branch_1/Conv2d_0c_3x1/weights:0")
                # tf.stop_gradient(persisted_result)
                # print(result.eval())
                # print("======")
                # print(pre.eval())

            # Training and validation loop
            print('Running training')
            epoch = 0
            pre_acc = -1
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, image_list, soft_labels_array, index_dequeue_op, enqueue_op,
                      image_paths_placeholder, soft_labels_placeholder,
                      learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step,
                      total_loss, train_op, summary_op, summary_writer, regularization_losses,
                      args.learning_rate_schedule_file, logits)
                # print(result.eval())
                # print("======")
                # print(pre.eval())

                # Save variables and the metagraph if it doesn't exist already
                # Evaluate on LFW
                if args.lfw_dir:
                    acc = evaluate(sess, enqueue_op, image_paths_placeholder, soft_labels_placeholder, phase_train_placeholder,
                             batch_size_placeholder,
                             embeddings, soft_label_batch, lfw_paths, actual_issame, args.lfw_batch_size,
                             args.lfw_nrof_folds, log_dir, step, summary_writer, nrof_classes, prelogits_center_loss)
                    if acc > pre_acc:
                        save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)
                        pre_acc = acc
    return model_dir
Esempio n. 23
0
def main(args):
    sleep(random.random())
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)

    print('Creating networks and loading parameters')

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    # Add a random key to the filename to allow alignment using multiple processes
    random_key = np.random.randint(0, high=99999)
    bounding_boxes_filename = os.path.join(
        output_dir, 'bounding_boxes_%05d.txt' % random_key)

    with open(bounding_boxes_filename, "w") as text_file:
        nrof_images_total = 0
        nrof_successfully_aligned = 0
        if args.random_order:
            random.shuffle(dataset)
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            if not os.path.exists(output_class_dir):
                os.makedirs(output_class_dir)
                if args.random_order:
                    random.shuffle(cls.image_paths)
            for image_path in cls.image_paths:
                nrof_images_total += 1
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                output_filename = os.path.join(output_class_dir,
                                               filename + '.png')
                print(image_path)
                if not os.path.exists(output_filename):
                    try:
                        img = misc.imread(image_path)
                    except (IOError, ValueError, IndexError) as e:
                        errorMessage = '{}: {}'.format(image_path, e)
                        print(errorMessage)
                    else:
                        if img.ndim < 2:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))
                            continue
                        if img.ndim == 2 or img.shape[2] < 3:
                            img = facenet.to_rgb(img)
                        img = img[:, :, 0:3]
                        maxSize = 2000
                        if img.shape[0] > maxSize:
                            img = misc.imresize(
                                img,
                                (maxSize,
                                 int((maxSize * img.shape[1]) / img.shape[0])),
                                interp='bilinear')
                            print('Image has been resized to the new shape: ',
                                  img.shape)
                        if img.shape[1] > maxSize:
                            img = misc.imresize(img, (int(
                                (maxSize * img.shape[0]) / img.shape[1]),
                                                      maxSize),
                                                interp='bilinear')
                            print('Image has been resized to the new shape: ',
                                  img.shape)

                        bounding_boxes, points = align.detect_face.detect_face(
                            img, minsize, pnet, rnet, onet, threshold, factor)
                        nrof_faces = bounding_boxes.shape[0]
                        if nrof_faces > 0:
                            index = 0
                            filename_split = os.path.splitext(output_filename)
                            img_size = np.asarray(img.shape)[0:2]
                            for i in range(nrof_faces):
                                det = bounding_boxes[i, 0:4]
                                det = np.squeeze(det)
                                bb = np.zeros(4, dtype=np.int32)
                                bb[0] = np.maximum(det[0] - args.margin / 2, 0)
                                bb[1] = np.maximum(det[1] - args.margin / 2, 0)
                                bb[2] = np.minimum(det[2] + args.margin / 2,
                                                   img_size[1])
                                bb[3] = np.minimum(det[3] + args.margin / 2,
                                                   img_size[0])
                                cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                                scaled = misc.imresize(
                                    cropped,
                                    (args.image_size, args.image_size),
                                    interp='bilinear')
                                output_filename = filename_split[
                                    0] + '_' + str(i) + filename_split[1]
                                misc.imsave(output_filename, scaled)
                            nrof_successfully_aligned += 1
                            text_file.write(
                                '%s %d %d %d %d\n' %
                                (output_filename, bb[0], bb[1], bb[2], bb[3]))
                        else:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))

    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' %
          nrof_successfully_aligned)
    return nrof_successfully_aligned
Esempio n. 24
0
def main(args):

    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augument_data(
            image_list, label_list, args.image_size, args.batch_size,
            args.max_nrof_epochs, args.random_crop, args.random_flip,
            args.nrof_preprocess_threads)
        print('Total number of classes: %d' % len(train_set))
        print('Total number of examples: %d' % len(image_list))

        # Node for input images
        image_batch = tf.identity(image_batch, name='input')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Build the inference graph
        prelogits, _ = network.inference(image_batch,
                                         args.keep_probability,
                                         phase_train=phase_train_placeholder,
                                         weight_decay=args.weight_decay)
        with tf.variable_scope('Logits'):
            n = int(prelogits.get_shape()[1])
            m = len(train_set)
            w = tf.get_variable(
                'w',
                shape=[n, m],
                dtype=tf.float32,
                initializer=tf.truncated_normal_initializer(stddev=0.1),
                regularizer=slim.l2_regularizer(args.weight_decay),
                trainable=True)
            b = tf.get_variable('b', [m], initializer=None, trainable=True)
            logits = tf.matmul(prelogits, w) + b

        # Add DeCov regularization loss
        if args.decov_loss_factor > 0.0:
            logits_decov_loss = facenet.decov_loss(
                logits) * args.decov_loss_factor
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                                 logits_decov_loss)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits, label_batch, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.all_variables())

        # Create a saver
        save_variables = list(set(tf.all_variables()) - set([w]) - set([b]))
        saver = tf.train.Saver(save_variables, max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())
        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if pretrained_model:
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, phase_train_placeholder,
                      learning_rate_placeholder, global_step, total_loss,
                      train_op, summary_op, summary_writer,
                      regularization_losses, args.learning_rate_schedule_file)

                # Evaluate on LFW
                if args.lfw_dir:
                    start_time = time.time()
                    _, _, accuracy, val, val_std, far = lfw.validate(
                        sess,
                        lfw_paths,
                        actual_issame,
                        args.seed,
                        args.batch_size,
                        image_batch,
                        phase_train_placeholder,
                        embeddings,
                        nrof_folds=args.lfw_nrof_folds)
                    print('Accuracy: %1.3f+-%1.3f' %
                          (np.mean(accuracy), np.std(accuracy)))
                    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %
                          (val, val_std, far))
                    lfw_time = time.time() - start_time
                    # Add validation loss and accuracy to summary
                    summary = tf.Summary()
                    #pylint: disable=maybe-no-member
                    summary.value.add(tag='lfw/accuracy',
                                      simple_value=np.mean(accuracy))
                    summary.value.add(tag='lfw/val_rate', simple_value=val)
                    summary.value.add(tag='time/lfw', simple_value=lfw_time)
                    summary_writer.add_summary(summary, step)
                    with open(os.path.join(log_dir, 'lfw_result.txt'),
                              'at') as f:
                        f.write('%d\t%.5f\t%.5f\n' %
                                (step, np.mean(accuracy), val))

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

    return model_dir
Esempio n. 25
0
def main(args):

    #此处导入的是:models.inception_resnet_v1模型,以后再看怎么更改模型
    network = importlib.import_module(args.model_def)
    #用当前日期来命名模型
    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    #日志保存在c:\\users\\Administrator\logs\facenet\ 文件夹里
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)  #没有日志文件就创建一个
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # 把参数写在日志文件中
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    #arg_string:'E:/facenet/train_tripletloss.py'   output_dir:'C:\\Users\\Administrator/logs/facenet\\20180314-181556'
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    #获取数据集,train_set是包含文件路径与标签的集合
    #先输入一个父路径 path:'E:/facenet/data/lfw_160',接着输入每个子路径
    # 输出:一个list,每个元素是一个ImageClass,里边包含图片地址的list(image_paths)以及对应的人名(name)[以后可能会直接调用这几个属性]
    train_set, class_indices = radar_io.get_h5dataset(args.data_dir, 7)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:  #用在判断是否有预训练模型,但是如果有,怎么加载呢?
        print('Pre-trained model: %s' %
              os.path.expanduser(args.pretrained_model))

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    #建立图

#with语句适用于对资源进行访问的场合,确保使用过程中是否发生异常都会执行必要嘚瑟“清理”操作
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # 学习率 Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        #用于判断是训练还是测试
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        # batch 图像输入
        image_placeholder = tf.placeholder(tf.float32,
                                           shape=(None, train_set.shape[1],
                                                  train_set.shape[2],
                                                  train_set.shape[3]),
                                           name='image_paths')
        # 图像标签
        labels_placeholder = tf.placeholder(tf.int32,
                                            shape=(None),
                                            name='labels')

        # Build the inference (构造计算图)
        #其中prelogits是最后一层的输出
        prelogits, end_points = network.inference(
            image_placeholder,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)

        #L2正则化(范化)函数
        # embeddings = tf.nn.l2_normalize(输入向量, L2范化的维数(取0(列L2范化)或1(行L2范化)), 泛化的最小值边界, name='embeddings')
        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
        # Split embeddings into anchor, positive and negative and calculate triplet loss
        anchor, positive, negative = tf.unstack(
            tf.reshape(embeddings, [-1, 3, args.embedding_size]), 3, 1)
        triplet_loss = facenet.triplet_loss(anchor, positive, negative,
                                            args.alpha)
        #将指数衰减应用在学习率上
        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # 计算损失
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        #构建L2正则化
        total_loss = tf.add_n([triplet_loss] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        # 确定优化方法并根据损失函数求梯度,在这里,每更行一次参数,global_step会加1
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables())

        # Create a saver创建一个saver用来保存或者从内存中回复一个模型参数
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.能够在GPU上分配的最大内存
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        # Initialize variables
        sess.run(tf.global_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})
        sess.run(tf.local_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})

        #写log文件
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        #获取线程坐标
        coord = tf.train.Coordinator()
        #将队列中的多用sunner开始执行
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                print('Restoring pretrained model: %s' % args.pretrained_model)
                facenet.load_model(args.pretrained_model)

            # Training and validation loop
            epoch = 0
            #将所有数据过一遍的次数   默认500
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                #epoch_size是一个epoch中批的个数,这个epoch是全局的批处理个数以一个epoch中。。。这个epoch将用于求学习率
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, class_indices, train_set, epoch,
                      image_placeholder, labels_placeholder,
                      learning_rate_placeholder, phase_train_placeholder,
                      global_step, embeddings, total_loss, train_op,
                      summary_op, summary_writer,
                      args.learning_rate_schedule_file, args.embedding_size,
                      anchor, positive, negative, triplet_loss)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, lfw_paths, embeddings, image_placeholder,
                             labels_placeholder, learning_rate_placeholder,
                             phase_train_placeholder, actual_issame,
                             args.batch_size, args.lfw_nrof_folds, log_dir,
                             step, summary_writer, args.embedding_size)

    return model_dir
Esempio n. 26
0
def main(args):
    sleep(random.random())
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)

    print('Loading predictor model')

    # Create a HOG face detector using the built-in dlib class
    face_detector = dlib.get_frontal_face_detector()
    face_pose_predictor = dlib.shape_predictor(args.model)
    face_aligner = openface.AlignDlib(args.model)

    # Add a random key to the filename to allow alignment using multiple processes
    random_key = np.random.randint(0, high=99999)

    nrof_images_total = 0
    nrof_successfully_aligned = 0
    nrof_unchanged_images = 0
    print('Total number of classes:', len(dataset))
    print('Number of classes to align:', args.num_classes)
    for cls in dataset[:args.num_classes]:
        print('Processing class:', cls.name)
        output_class_dir = os.path.join(output_dir, cls.name)
        if not os.path.exists(output_class_dir):
            os.makedirs(output_class_dir)
        for image_path in cls.image_paths:
            nrof_images_total += 1
            filename = os.path.splitext(os.path.split(image_path)[1])[0]
            output_filename = os.path.join(output_class_dir, filename + '.png')
            if not os.path.exists(output_filename):
                try:
                    img = misc.imread(image_path)
                except (IOError, ValueError, IndexError) as e:
                    errorMessage = '{}: {}'.format(image_path, e)
                    print(errorMessage)
                else:
                    if img.ndim < 2:
                        print('Unable to align "%s"' % image_path)
                        continue
                    if img.ndim == 2:
                        img = facenet.to_rgb(img)
                    img = img[:, :, 0:3]

                    # Run the HOG face detector on the image data
                    detected_face = face_detector(img, 1)

                    if len(detected_face) > 0:
                        # As face(s) detected is/ are stored in a list, we need to pull it out.
                        face_rect = detected_face[0]
                        # Get the the face's pose
                        pose_landmarks = face_pose_predictor(img, face_rect)
                        # Use openface to calculate and perform the face alignment
                        alignedFace = face_aligner.align(
                            160,
                            img,
                            face_rect,
                            landmarkIndices=openface.AlignDlib.
                            OUTER_EYES_AND_NOSE)
                        misc.imsave(output_filename, alignedFace)
                        nrof_successfully_aligned += 1

                    else:
                        filename_base, file_extension = os.path.splitext(
                            output_filename)
                        # Add 'fail' to indicates that dlib cannot detect face in this image.
                        output_filename = "{}{}".format(
                            filename_base + '_failed', file_extension)
                        misc.imsave(output_filename, img)
                        nrof_unchanged_images += 1

    print('Total number of images:', nrof_images_total)
    print('Number of successfully aligned images:', nrof_successfully_aligned)
    print(
        'Number of resized images (MTCNN cannot detect face in these images):',
        nrof_unchanged_images)
Esempio n. 27
0
def main(args):
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        train_set = filter_dataset(train_set,
                                   os.path.expanduser(args.filter_filename),
                                   args.filter_percentile,
                                   args.filter_min_nrof_images_per_class)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list) > 0, 'The dataset should not be empty'

        # Create a queue that produces indices into the image_list and label_list
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size,
                                                    num_epochs=None,
                                                    shuffle=True,
                                                    seed=None,
                                                    capacity=32)

        index_dequeue_op = index_queue.dequeue_many(
            args.batch_size * args.epoch_size, 'index_dequeue')

        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 1),
                                                 name='image_paths')

        labels_placeholder = tf.placeholder(tf.int64,
                                            shape=(None, 1),
                                            name='labels')

        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                              dtypes=[tf.string, tf.int64],
                                              shapes=[(1, ), (1, )],
                                              shared_name=None,
                                              name=None)
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder], name='enqueue_op')

        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents, channels=3)
                if args.random_rotate:
                    image = tf.py_func(facenet.random_rotate_image, [image],
                                       tf.uint8)
                if args.random_crop:
                    image = tf.random_crop(
                        image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(
                        image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)

                # pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])

        image_batch, label_batch = tf.train.batch_join(
            images_and_labels,
            batch_size=batch_size_placeholder,
            shapes=[(args.image_size, args.image_size, 3), ()],
            enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')

        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))

        print('Building training graph')

        # Build the inference graph
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(
            prelogits,
            len(train_set),
            activation_fn=None,
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            scope='Logits',
            reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Add center loss
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, _ = facenet.center_loss(
                prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES,
                prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch,
            logits=logits,
            name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables(), args.log_histograms)

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, image_list, label_list,
                      index_dequeue_op, enqueue_op, image_paths_placeholder,
                      labels_placeholder, learning_rate_placeholder,
                      phase_train_placeholder, batch_size_placeholder,
                      global_step, total_loss, train_op, summary_op,
                      summary_writer, regularization_losses,
                      args.learning_rate_schedule_file)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder,
                             labels_placeholder, phase_train_placeholder,
                             batch_size_placeholder, embeddings, label_batch,
                             lfw_paths, actual_issame, args.lfw_batch_size,
                             args.lfw_nrof_folds, log_dir, step,
                             summary_writer)
    return model_dir
Esempio n. 28
0
def main(args):
  
    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:
        print('Pre-trained model: %s' % os.path.expanduser(args.pretrained_model))
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
        
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for input images
        images_placeholder = tf.placeholder(tf.float32, shape=(None, args.image_size, args.image_size, 3), name='input')

        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        # Build the inference graph
        prelogits, _ = network.inference(images_placeholder, args.keep_probability, 
            phase_train=True, weight_decay=args.weight_decay)
        pre_embeddings = slim.fully_connected(prelogits, 128, activation_fn=None, scope='Embeddings', reuse=False)

        # Split example embeddings into anchor, positive and negative and calculate triplet loss
        embeddings = tf.nn.l2_normalize(pre_embeddings, 1, 1e-10, name='embeddings')
        anchor, positive, negative = tf.split(0, 3, embeddings)
        triplet_loss = facenet.triplet_loss(anchor, positive, negative, args.alpha)
        
        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([triplet_loss] + regularization_losses, name='total_loss')

        # Create list with variables to restore
        restore_vars = []
        update_gradient_vars = []
        if args.pretrained_model:
            for var in tf.all_variables():
                if not 'Embeddings/' in var.op.name:
                    restore_vars.append(var)
                else:
                    update_gradient_vars.append(var)
        else:
            restore_vars = tf.all_variables()
            update_gradient_vars = tf.all_variables()

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, update_gradient_vars)
        
        # Create a saver
        restore_saver = tf.train.Saver(restore_vars)
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))        

        # Initialize variables
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())

        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                restore_saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                step = train(args, sess, train_set, epoch, images_placeholder, 
                    learning_rate_placeholder, global_step, embeddings, total_loss, train_op, summary_op, summary_writer)
                if args.lfw_dir:
                    _, _, accuracy, val, val_std, far = lfw.validate(sess, lfw_paths,
                        actual_issame, args.seed, 60, images_placeholder, phase_train_placeholder, embeddings, nrof_folds=args.lfw_nrof_folds)
                    print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
                    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
                    # Add validation loss and accuracy to summary
                    summary = tf.Summary()
                    #pylint: disable=maybe-no-member
                    summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
                    summary.value.add(tag='lfw/val_rate', simple_value=val)
                    summary_writer.add_summary(summary, step)

                # Save the model checkpoint
                print('Saving checkpoint')
                checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    return model_dir
def main(args):
    sleep(random.random())
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = get_dataset(args.input_dir)
    print('Creating networks and loading parameters')
    
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
    minsize = 20 # minimum size of face
    threshold = [ 0.6, 0.7, 0.7 ]  # three steps's threshold
    factor = 0.709 # scale factor

    # Add a random key to the filename to allow alignment using multiple processes
    random_key = np.random.randint(0, high=99999)
    bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)

    start = time.time()

    with open(bounding_boxes_filename, "w") as text_file:
        nrof_images_total = 0
        nrof_successfully_aligned = 0
        if args.random_order:
            random.shuffle(dataset)
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            if not os.path.exists(output_class_dir):
                os.makedirs(output_class_dir)
                if args.random_order:
                    random.shuffle(cls.videoset)
            for video in cls.videoset:
                output_video_dir=os.path.join(output_class_dir, video.name)
                if not os.path.exists(output_video_dir):
                    os.makedirs(output_video_dir)

            for video in cls.videoset:
                output_video_dir = os.path.join(output_class_dir, video.name)
                for image_path in video.image_paths:
                    nrof_images_total += 1
                    filename = os.path.splitext(os.path.split(image_path)[1])[0]
                    output_filename = os.path.join(output_video_dir, filename+'.png')
                    if not os.path.exists(output_filename):
                        try:
                            img = misc.imread(image_path)
                        except (IOError, ValueError, IndexError) as e:
                            errorMessage = '{}: {}'.format(image_path, e)
                            print(errorMessage)
                        else:
                            if img.ndim<2: # 如果图像小于二维
                                print('Unable to align "%s"' % image_path)
                                text_file.write('%s\n' % (output_filename))
                                continue
                            if img.ndim == 2: # 如果图像是二维的,将其转换成rgb格式
                                img = facenet.to_rgb(img)
                            img = img[:,:,0:3] # img是一个三维数组,其中第一二维表示长宽,第三维表示rgb

                            bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
                            nrof_faces = bounding_boxes.shape[0]
                            if nrof_faces>0:
                                det = bounding_boxes[:,0:4]
                                det_arr = []
                                img_size = np.asarray(img.shape)[0:2]
                                if nrof_faces>1:
                                    # print("There are %d faces" % nrof_faces)
                                    if args.detect_multiple_faces:
                                        temp_arr = []
                                        for i in range(nrof_faces):
                                            temp_arr.append(np.squeeze(det[i]))  # 从数组的形状中删除单维条目,即把shape中为1的维度去掉
                                        det_arr.append(temp_arr)
                                    else:
                                        bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
                                        img_center = img_size / 2
                                        offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
                                        offset_dist_squared = np.sum(np.power(offsets,2.0),0)
                                        index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
                                        det_arr.append(det[index,:])
                                else:
                                    det_arr.append(np.squeeze(det))
                                for i, det in enumerate(det_arr):
                                    raw_pic = Image.open(image_path)
                                    draw_pic = ImageDraw.Draw(raw_pic)
                                    if isinstance(det,list):
                                        for eachfacedet in det:
                                            eachfacedet=np.squeeze(eachfacedet)
                                            bb = np.zeros(4, dtype=np.int32)
                                            bb[0] = np.maximum(eachfacedet[0] - args.margin / 2, 0)
                                            bb[1] = np.maximum(eachfacedet[1] - args.margin / 2, 0)
                                            bb[2] = np.minimum(eachfacedet[2] + args.margin / 2, img_size[1])
                                            bb[3] = np.minimum(eachfacedet[3] + args.margin / 2, img_size[0])
                                            draw_pic.line((bb[0], bb[1], bb[2], bb[1]), "red", width=2)
                                            draw_pic.line((bb[0], bb[1], bb[0], bb[3]), "red", width=2)
                                            draw_pic.line((bb[2], bb[3], bb[2], bb[1]), "red", width=2)
                                            draw_pic.line((bb[2], bb[3], bb[0], bb[3]), "red", width=2)
                                    else:
                                        det = np.squeeze(det)
                                        bb = np.zeros(4, dtype=np.int32)
                                        bb[0] = np.maximum(det[0]-args.margin/2, 0)
                                        bb[1] = np.maximum(det[1]-args.margin/2, 0)
                                        bb[2] = np.minimum(det[2]+args.margin/2, img_size[1])
                                        bb[3] = np.minimum(det[3]+args.margin/2, img_size[0])
                                        # draw the boxing of the face
                                        # draw four lines to make a rectangle
                                        draw_pic.line((bb[0], bb[1], bb[2], bb[1]), "red", width=2)
                                        draw_pic.line((bb[0], bb[1], bb[0], bb[3]), "red", width=2)
                                        draw_pic.line((bb[2], bb[3], bb[2], bb[1]), "red", width=2)
                                        draw_pic.line((bb[2], bb[3], bb[0], bb[3]), "red", width=2)
                                        # 人脸检测的结果,人脸所在范围为cropped范围
                                        cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
                                        scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
                                    nrof_successfully_aligned += 1
                                    # 保存的文件名字
                                    filename_base, file_extension = os.path.splitext(output_filename)
                                    output_filename_n = "{}{}".format(filename_base, file_extension)
                                    # raw_pic.save(output_filename_n)
                                    misc.imsave(output_filename_n, scaled)
                                    text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))
                            else:
                                print('Unable to align "%s"' % image_path)
                                text_file.write('%s\n' % (output_filename))

    end = time.time()
    print('Total time of processing : %d, which means %g pictures per second' % (end-start, nrof_images_total/(end-start)))
    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
Esempio n. 30
0
def main(args):

    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augument_data(
            image_list, label_list, args.image_size, args.batch_size,
            args.max_nrof_epochs, args.random_crop, args.random_flip,
            args.nrof_preprocess_threads)
        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))

        print('Building training graph')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        # Build the inference graph
        prelogits, _ = network.inference(image_batch,
                                         args.keep_probability,
                                         phase_train=True,
                                         weight_decay=args.weight_decay)
        logits = slim.fully_connected(
            prelogits,
            len(train_set),
            activation_fn=None,
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            scope='Logits',
            reuse=False)

        # Add DeCov regularization loss
        if args.decov_loss_factor > 0.0:
            logits_decov_loss = facenet.decov_loss(
                logits) * args.decov_loss_factor
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                                 logits_decov_loss)

        # Add center loss
        update_centers = tf.no_op('update_centers')
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, update_centers = facenet.center_loss(
                prelogits, label_batch, args.center_loss_alfa)
            tf.add_to_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES,
                prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits, label_batch, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.all_variables(), args.log_histograms)

        # Evaluation
        print('Building evaluation graph')
        lfw_label_list = range(0, len(lfw_paths))
        assert (
            len(lfw_paths) % args.lfw_batch_size == 0
        ), "The number of images in the LFW test set need to be divisible by the lfw_batch_size"
        eval_image_batch, eval_label_batch = facenet.read_and_augument_data(
            lfw_paths,
            lfw_label_list,
            args.image_size,
            args.lfw_batch_size,
            None,
            False,
            False,
            args.nrof_preprocess_threads,
            shuffle=False)
        # Node for input images
        eval_image_batch.set_shape((None, args.image_size, args.image_size, 3))
        eval_image_batch = tf.identity(eval_image_batch, name='input')
        eval_prelogits, _ = network.inference(eval_image_batch,
                                              1.0,
                                              phase_train=False,
                                              weight_decay=0.0,
                                              reuse=True)
        eval_embeddings = tf.nn.l2_normalize(eval_prelogits,
                                             1,
                                             1e-10,
                                             name='embeddings')

        # Create a saver
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())
        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, learning_rate_placeholder,
                      global_step, total_loss, train_op, summary_op,
                      summary_writer, regularization_losses,
                      args.learning_rate_schedule_file, update_centers)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, eval_embeddings, eval_label_batch,
                             actual_issame, args.lfw_batch_size, args.seed,
                             args.lfw_nrof_folds, log_dir, step,
                             summary_writer)

    return model_dir
def main(args):
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:
        print('Pre-trained model: %s' %
              os.path.expanduser(args.pretrained_model))

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 3),
                                                 name='image_paths')
        labels_placeholder = tf.placeholder(tf.int64,
                                            shape=(None, 3),
                                            name='labels')

        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                              dtypes=[tf.string, tf.int64],
                                              shapes=[(3, ), (3, )],
                                              shared_name=None,
                                              name=None)
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder])

        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents, channels=3)

                if args.random_crop:
                    image = tf.random_crop(
                        image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(
                        image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)

                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])

        image_batch, labels_batch = tf.train.batch_join(
            images_and_labels,
            batch_size=batch_size_placeholder,
            shapes=[(args.image_size, args.image_size, 3), ()],
            enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        labels_batch = tf.identity(labels_batch, 'label_batch')

        # Build the inference graph
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
        # Split embeddings into anchor, positive and negative and calculate triplet loss
        anchor, positive, negative = tf.unstack(
            tf.reshape(embeddings, [-1, 3, args.embedding_size]), 3, 1)
        triplet_loss = facenet.triplet_loss(anchor, positive, negative,
                                            args.alpha)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([triplet_loss] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables())

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        # Initialize variables
        sess.run(tf.global_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})
        sess.run(tf.local_variables_initializer(),
                 feed_dict={phase_train_placeholder: True})

        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                print('Restoring pretrained model: %s' % args.pretrained_model)
                saver.restore(
                    sess,
                    'C:\\Users\\fengmaniu/models/facenet\\20180701-003604\\model-20180701-003604.ckpt-29344'
                )
                # saver.restore(sess,  os.path.expanduser(args.pretrained_model))
                # facenet.load_model(args.pretrained_model)

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, train_set, epoch, image_paths_placeholder,
                      labels_placeholder, labels_batch, batch_size_placeholder,
                      learning_rate_placeholder, phase_train_placeholder,
                      enqueue_op, input_queue, global_step, embeddings,
                      total_loss, train_op, summary_op, summary_writer,
                      args.learning_rate_schedule_file, args.embedding_size,
                      anchor, positive, negative, triplet_loss)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, lfw_paths, embeddings, labels_batch,
                             image_paths_placeholder, labels_placeholder,
                             batch_size_placeholder, learning_rate_placeholder,
                             phase_train_placeholder, enqueue_op,
                             actual_issame, args.batch_size,
                             args.lfw_nrof_folds, log_dir, step,
                             summary_writer, args.embedding_size)

    return model_dir
def main(args):
    sleep(random.random())
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)

    print('Creating networks and loading parameters')

    thresh = 0.8

    gpuid = 0
    detector = RetinaFace('/content/insightface/models/R50', 0, gpuid, 'net3')

    # Add a random key to the filename to allow alignment using multiple processes
    random_key = np.random.randint(0, high=99999)
    bounding_boxes_filename = os.path.join(
        output_dir, 'bounding_boxes_%05d.txt' % random_key)

    with open(bounding_boxes_filename, "w") as text_file:
        nrof_images_total = 0
        nrof_successfully_aligned = 0
        if args.random_order:
            random.shuffle(dataset)
        for cls in dataset:
            output_class_dir = os.path.join(output_dir, cls.name)
            if not os.path.exists(output_class_dir):
                os.makedirs(output_class_dir)
                if args.random_order:
                    random.shuffle(cls.image_paths)
            for image_path in cls.image_paths:
                nrof_images_total += 1
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                output_filename = os.path.join(output_class_dir,
                                               filename + '.png')
                print(image_path)
                if not os.path.exists(output_filename):
                    try:
                        img = cv2.imread(image_path)
                    except (IOError, ValueError, IndexError) as e:
                        errorMessage = '{}: {}'.format(image_path, e)
                        print(errorMessage)
                    else:
                        if img.ndim < 2:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))
                            continue
                        if img.ndim == 2:
                            img = facenet.to_rgb(img)
                        img = img[:, :, 0:3]
                        im_shape = img.shape
                        scales = [1024, 1980]
                        target_size = scales[0]
                        max_size = scales[1]
                        im_size_min = np.min(im_shape[0:2])
                        im_size_max = np.max(im_shape[0:2])
                        im_scale = float(target_size) / float(im_size_min)
                        if np.round(im_scale * im_size_max) > max_size:
                            im_scale = float(max_size) / float(im_size_max)
                        scales = [im_scale]
                        flip = False
                        bounding_boxes, _ = detector.detect(img,
                                                            thresh,
                                                            scales=scales,
                                                            do_flip=flip)
                        nrof_faces = bounding_boxes.shape[0]
                        if nrof_faces > 0:
                            det = bounding_boxes[:, 0:4]
                            det_arr = []
                            img_size = np.asarray(img.shape)[0:2]
                            if nrof_faces > 1:
                                if args.detect_multiple_faces:
                                    for i in range(nrof_faces):
                                        det_arr.append(np.squeeze(det[i]))
                                else:
                                    bounding_box_size = (
                                        det[:, 2] - det[:, 0]) * (det[:, 3] -
                                                                  det[:, 1])
                                    img_center = img_size / 2
                                    offsets = np.vstack([
                                        (det[:, 0] + det[:, 2]) / 2 -
                                        img_center[1],
                                        (det[:, 1] + det[:, 3]) / 2 -
                                        img_center[0]
                                    ])
                                    offset_dist_squared = np.sum(
                                        np.power(offsets, 2.0), 0)
                                    index = np.argmax(
                                        bounding_box_size -
                                        offset_dist_squared * 2.0
                                    )  # some extra weight on the centering
                                    det_arr.append(det[index, :])
                            else:
                                det_arr.append(np.squeeze(det))

                            for i, det in enumerate(det_arr):
                                det = np.squeeze(det)
                                bb = np.zeros(4, dtype=np.int32)
                                bb[0] = np.maximum(det[0] - args.margin / 2, 0)
                                bb[1] = np.maximum(det[1] - args.margin / 2, 0)
                                bb[2] = np.minimum(det[2] + args.margin / 2,
                                                   img_size[1])
                                bb[3] = np.minimum(det[3] + args.margin / 2,
                                                   img_size[0])
                                cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                                scaled = cv2.resize(
                                    cropped,
                                    (args.image_size, args.image_size),
                                    interpolation=cv2.INTER_LINEAR)
                                nrof_successfully_aligned += 1
                                filename_base, file_extension = os.path.splitext(
                                    output_filename)
                                if args.detect_multiple_faces:
                                    output_filename_n = "{}_{}{}".format(
                                        filename_base, i, file_extension)
                                else:
                                    output_filename_n = "{}{}".format(
                                        filename_base, file_extension)
                                cv2.imwrite(output_filename_n, scaled)
                                text_file.write('%s %d %d %d %d\n' %
                                                (output_filename_n, bb[0],
                                                 bb[1], bb[2], bb[3]))
                        else:
                            print('Unable to align "%s"' % image_path)
                            text_file.write('%s\n' % (output_filename))

    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' %
          nrof_successfully_aligned)
def main(args):
    align = align_dlib.AlignDlib(os.path.expanduser(args.dlib_face_predictor))
    landmarkIndices = align_dlib.AlignDlib.OUTER_EYES_AND_NOSE
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)
    random.shuffle(dataset)
    # Scale the image such that the face fills the frame when cropped to crop_size
    scale = float(args.face_size) / args.image_size
    nrof_images_total = 0
    nrof_prealigned_images = 0
    nrof_successfully_aligned = 0
    for cls in dataset:
        output_class_dir = os.path.join(output_dir, cls.name)
        if not os.path.exists(output_class_dir):
            os.makedirs(output_class_dir)
        random.shuffle(cls.image_paths)
        for image_path in cls.image_paths:
            nrof_images_total += 1
            filename = os.path.splitext(os.path.split(image_path)[1])[0]
            output_filename = os.path.join(output_class_dir, filename+'.png')
            if not os.path.exists(output_filename):
                try:
                    img = misc.imread(image_path)
                except (IOError, ValueError, IndexError) as e:
                    errorMessage = '{}: {}'.format(image_path, e)
                    print(errorMessage)
                else:
                    if img.ndim == 2:
                        img = facenet.to_rgb(img)
                    if args.use_center_crop:
                        scaled = misc.imresize(img, args.prealigned_scale, interp='bilinear')
                        sz1 = scaled.shape[1]/2
                        sz2 = args.image_size/2
                        aligned = scaled[(sz1-sz2):(sz1+sz2),(sz1-sz2):(sz1+sz2),:]
                    else:
                        aligned = align.align(args.image_size, img, landmarkIndices=landmarkIndices, 
                                              skipMulti=False, scale=scale)
                    if aligned is not None:
                        print(image_path)
                        nrof_successfully_aligned += 1
                        misc.imsave(output_filename, aligned)
                    elif args.prealigned_dir:
                        # Face detection failed. Use center crop from pre-aligned dataset
                        class_name = os.path.split(output_class_dir)[1]
                        image_path_without_ext = os.path.join(os.path.expanduser(args.prealigned_dir), 
                                                              class_name, filename)
                        # Find the extension of the image
                        exts = ('jpg', 'png')
                        for ext in exts:
                            temp_path = image_path_without_ext + '.' + ext
                            image_path = ''
                            if os.path.exists(temp_path):
                                image_path = temp_path
                                break
                        try:
                            img = misc.imread(image_path)
                        except (IOError, ValueError, IndexError) as e:
                            errorMessage = '{}: {}'.format(image_path, e)
                            print(errorMessage)
                        else:
                            scaled = misc.imresize(img, args.prealigned_scale, interp='bilinear')
                            sz1 = scaled.shape[1]/2
                            sz2 = args.image_size/2
                            cropped = scaled[(sz1-sz2):(sz1+sz2),(sz1-sz2):(sz1+sz2),:]
                            print(image_path)
                            nrof_prealigned_images += 1
                            misc.imsave(output_filename, cropped)
                    else:
                        print('Unable to align "%s"' % image_path)
                            
    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
    print('Number of pre-aligned images: %d' % nrof_prealigned_images)
Esempio n. 34
0
def main(args):
    #sleep(random.random())
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))

    #dataset = facenet.get_dataset(args.input_dir)
    align_dlib = AlignDlib(
        '/home/arthur/facenet.git/trunk/src/align/shape_predictor_68_face_landmarks.dat'
    )
    DATA_BASE = "/home/arthur/caffe-master/data/lfw/"
    POSITIVE_TEST_FILE = "/home/arthur/facenet.git/trunk/src/align/positive_pairs_path.txt"
    NEGATIVE_TEST_FILE = "/home/arthur/facenet.git/trunk/src/align/negative_pairs_path.txt"

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

            print('Creating networks and loading parameters')
            ###################tensorflow predictor init#############################
            FLAGS = utils.load_tf_flags()
            facenet.load_model(FLAGS.model_dir)
            facenet.store_revision_info(src_path, output_dir,
                                        ' '.join(sys.argv))
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            ###########################################################

            minsize = 10  # minimum size of face
            threshold = [0.5, 0.5, 0.7]  # three steps's threshold
            factor = 0.709  # scale factor

            # Add a random key to the filename to allow alignment using multiple processes
            random_key = np.random.randint(0, high=99999)
            # Read video file frame
            #cap = cv2.VideoCapture('/home/wuqianliang/test/VID_20171013_121412.mp4')
            # start capture video
            print('start test on lfw dataset')

            def get_feature(image_x):
                img = cv2.imread(image_x)
                img = img[:, :, 0:3]
                bounding_boxes, _ = align.detect_face.detect_face(
                    img, minsize, pnet, rnet, onet, threshold, factor)
                #print(bounding_boxes)
                nrof_faces = bounding_boxes.shape[0]
                if nrof_faces > 0 and len(bounding_boxes) == 1:
                    bbox = bounding_boxes[0]
                    det = bbox[0:5]
                    detect_confidence = det[4]
                    #print(detect_confidence)
                    if detect_confidence > 0.8:  #0.8:
                        left = int(bbox[0])
                        top = int(bbox[1])
                        right = int(bbox[2])
                        bottom = int(bbox[3])
                        alignedFace = align_dlib.align(
                            160,  # 96x96x3
                            img,
                            dlib.rectangle(left, top, right, bottom),
                            landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
                        #print(alignedFace.shape)
                        cropped = alignedFace.reshape((1, 160, 160, 3))
                        emb_dict = list(
                            sess.run(
                                [embeddings],
                                feed_dict={
                                    images_placeholder: np.array(cropped),
                                    phase_train_placeholder: False
                                })[0])
                        return emb_dict
                return None

            for thershold in np.arange(0.25, 0.35, 0.01):
                True_Positive = 0
                True_Negative = 0
                False_Positive = 0
                False_Negative = 0

                # Positive Test
                f_positive = open(POSITIVE_TEST_FILE, "r")
                PositiveDataList = f_positive.readlines()
                f_positive.close()
                f_negative = open(NEGATIVE_TEST_FILE, "r")
                NegativeDataList = f_negative.readlines()
                f_negative.close()
                for index in range(len(PositiveDataList)):
                    filepath_1 = PositiveDataList[index].split(' ')[0]
                    filepath_2 = PositiveDataList[index].split(' ')[1][:-1]
                    feature_1 = get_feature(DATA_BASE + filepath_1)
                    feature_2 = get_feature(DATA_BASE + filepath_2)
                    if feature_1 is None or feature_2 is None:
                        print(filepath_1, filepath_2, 'not detected')
                        continue
                    result = compare_pic(feature_1, feature_2)
                    if result >= thershold:
                        print('Same Guy\n')
                        True_Positive += 1
                    else:
                        print('Wrong\n\n')
                        False_Positive += 1

                for index in range(len(NegativeDataList)):
                    filepath_1 = NegativeDataList[index].split(' ')[0]
                    filepath_2 = NegativeDataList[index].split(' ')[1][:-1]
                    feature_1 = get_feature(DATA_BASE + filepath_1)
                    feature_2 = get_feature(DATA_BASE + filepath_2)

                    if feature_1 is None or feature_2 is None:
                        print(filepath_1, filepath_2, 'not detected')
                        continue
                    result = compare_pic(feature_1, feature_2)
                    if result >= thershold:
                        print('Wrong Guy\n')
                        False_Negative += 1
                    else:
                        print('Correct\n')
                        True_Negative += 1

                print("thershold: " + str(thershold))
                print("Accuracy: " +
                      str(float(True_Positive + True_Negative) / TEST_SUM) +
                      " %")
                print("True_Positive: " +
                      str(float(True_Positive) / TEST_SUM) + " %")
                print("True_Negative: " +
                      str(float(True_Negative) / TEST_SUM) + " %")
                print("False_Positive: " +
                      str(float(False_Positive) / TEST_SUM) + " %")
                print("False_Negative: " +
                      str(float(False_Negative) / TEST_SUM) + " %")
def main(args):

    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    # 创建模型文件夹
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
    if args.baihe_pack_file:
        print('load baihe dataset')
        lfw_paths, actual_issame = msgpack_numpy.load(open(args.baihe_pack_file))

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        # 迭代轮数, 不同的轮数可以使用不同的学习率
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augment_data(image_list, label_list, args.image_size,
            args.batch_size, args.max_nrof_epochs, args.random_crop, args.random_flip, args.random_rotate,
            args.nrof_preprocess_threads)
        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))

        print('Building training graph')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        # Build the inference graph, 返回的是网络结构
        prelogits, _ = network.inference(image_batch, args.keep_probability, phase_train=True,
                                         weight_decay=args.weight_decay)
        # 初始化采用截断的正态分布噪声, 标准差为0.1
        # tf.truncated_normal_initializer(stddev=0.1)
        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None,
                                      weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                      weights_regularizer=slim.l2_regularizer(args.weight_decay),
                                      scope='Logits', reuse=False)

        # Add DeCov regularization loss
        if args.decov_loss_factor > 0.0:
            logits_decov_loss = facenet.decov_loss(logits) * args.decov_loss_factor
            # 将decov_loss加入到名字为tf.GraphKeys.REGULARIZATION_LOSSES的集合当中来
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, logits_decov_loss)

        # Add center loss (center_loss作为一个正则项加入到collections)
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            # 将center加入到名字为tf.GraphKeys.REGULARIZATION_LOSSES的集合当中来
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        # 对学习率进行指数衰退
        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        # 将softmax和交叉熵一起做,得到最后的损失函数,提高效率
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits, label_batch, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        # 获取正则loss
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay, tf.all_variables(), args.log_histograms)

        # Evaluation
        print('Building evaluation graph')
        lfw_label_list = range(0, len(lfw_paths))
        assert (len(lfw_paths) % args.lfw_batch_size == 0), \
            "The number of images in the LFW test set need to be divisible by the lfw_batch_size"
        eval_image_batch, eval_label_batch = facenet.read_and_augment_data(lfw_paths, lfw_label_list, args.image_size,
                                                                            args.lfw_batch_size, None, False, False,
                                                                            False, args.nrof_preprocess_threads,
                                                                            shuffle=False)
        # Node for input images
        eval_image_batch.set_shape((None, args.image_size, args.image_size, 3))
        eval_image_batch = tf.identity(eval_image_batch, name='input')
        eval_prelogits, _ = network.inference(eval_image_batch, 1.0,
                                              phase_train=False, weight_decay=0.0, reuse=True)
        eval_embeddings = tf.nn.l2_normalize(eval_prelogits, 1, 1e-10, name='embeddings')

        # Create a saver
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=10)
        # saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        # sess.run(tf.global_variables_initializer())
        # sess.run(tf.local_variables_initializer())
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())
        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        # 将队列runner启动,队列就开始运行,返回启动的线程
        # 注意input_queue是先入列,再出列,由于入列的时候输入是place holder,因此到后的线程的时候,会阻塞,
        # 直到下train中sess run (enqueue_op)的时候,  会向队列中载入值,后面的出列才有对象,才在各自的队列中开始执行

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                try:
                    step = sess.run(global_step, feed_dict=None)
                    epoch = step // args.epoch_size
                    # Train for one epoch
                    train(args, sess, epoch, learning_rate_placeholder, global_step, total_loss, train_op, summary_op,
                          summary_writer, regularization_losses, args.learning_rate_schedule_file)

                    # Save variables and the metagraph if it doesn't exist already
                    save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

                    # Evaluate on LFW
                    if args.lfw_dir:
                        evaluate(sess, eval_embeddings, eval_label_batch, actual_issame, args.lfw_batch_size, args.seed,
                                 args.lfw_nrof_folds, log_dir, step, summary_writer)
                    # Evaluate on baihe_data
                    if args.baihe_pack_file:
                        evaluate(sess, eval_embeddings, eval_label_batch, actual_issame, args.lfw_batch_size, args.seed,
                                 args.lfw_nrof_folds, log_dir, step, summary_writer)
                except:
                    traceback.print_exc()
                    continue
    return model_dir
Esempio n. 36
0
def main(args):
    align = align_dlib.AlignDlib(os.path.expanduser(args.dlib_face_predictor))
    landmarkIndices = align_dlib.AlignDlib.OUTER_EYES_AND_NOSE
    output_dir = os.path.expanduser(args.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
    dataset = facenet.get_dataset(args.input_dir)
    random.shuffle(dataset)
    # Scale the image such that the face fills the frame when cropped to crop_size
    scale = float(args.face_size) / args.image_size
    nrof_images_total = 0
    nrof_prealigned_images = 0
    nrof_successfully_aligned = 0
    nrof_not_successfully_aligned = 0
    lfw_align_fail_list = open("lfw_align_fail_list.txt", "w")
    for cls in dataset:
        output_class_dir = os.path.join(output_dir, cls.name)
        if not os.path.exists(output_class_dir):
            os.makedirs(output_class_dir)
        random.shuffle(cls.image_paths)
        for image_path in cls.image_paths:
            nrof_images_total += 1
            filename = os.path.splitext(os.path.split(image_path)[1])[0]
            output_filename = os.path.join(output_class_dir, filename + '.png')
            if not os.path.exists(output_filename):
                try:
                    img = misc.imread(image_path)
                except (IOError, ValueError, IndexError) as e:
                    errorMessage = '{}: {}'.format(image_path, e)
                    print(errorMessage)
                else:
                    if img.ndim == 2:
                        img = facenet.to_rgb(img)
                    if args.use_center_crop:
                        scaled = misc.imresize(img,
                                               args.prealigned_scale,
                                               interp='bilinear')
                        sz1 = scaled.shape[1] / 2
                        sz2 = args.image_size / 2
                        aligned = scaled[(sz1 - sz2):(sz1 + sz2),
                                         (sz1 - sz2):(sz1 + sz2), :]
                    else:
                        aligned = align.align(args.image_size,
                                              img,
                                              landmarkIndices=landmarkIndices,
                                              skipMulti=False,
                                              scale=scale)
                    if aligned is not None:
                        print(image_path)
                        nrof_successfully_aligned += 1
                        misc.imsave(output_filename, aligned)
                    elif args.prealigned_dir:
                        # Face detection failed. Use center crop from pre-aligned dataset
                        class_name = os.path.split(output_class_dir)[1]
                        image_path_without_ext = os.path.join(
                            os.path.expanduser(args.prealigned_dir),
                            class_name, filename)
                        # Find the extension of the image
                        exts = ('jpg', 'png')
                        for ext in exts:
                            temp_path = image_path_without_ext + '.' + ext
                            image_path = ''
                            if os.path.exists(temp_path):
                                image_path = temp_path
                                break
                        try:
                            img = misc.imread(image_path)
                        except (IOError, ValueError, IndexError) as e:
                            errorMessage = '{}: {}'.format(image_path, e)
                            print(errorMessage)
                        else:
                            scaled = misc.imresize(img,
                                                   args.prealigned_scale,
                                                   interp='bilinear')
                            sz1 = scaled.shape[1] / 2
                            sz2 = args.image_size / 2
                            cropped = scaled[(sz1 - sz2):(sz1 + sz2),
                                             (sz1 - sz2):(sz1 + sz2), :]
                            print(image_path)
                            nrof_prealigned_images += 1
                            misc.imsave(output_filename, cropped)
                    else:
                        nrof_not_successfully_aligned += 1
                        print('Unable to align "%s"' % image_path)
                        lfw_align_fail_list.write(
                            "Case " + str(nrof_not_successfully_aligned) +
                            ": unable to align " + image_path + "\n")

    print('Total number of images: %d' % nrof_images_total)
    print('Number of successfully aligned images: %d' %
          nrof_successfully_aligned)
    print('Number of pre-aligned images: %d' % nrof_prealigned_images)
    lfw_align_fail_list.close()
Esempio n. 37
0
def main(args):
  
    network = importlib.import_module(args.model_def)
    image_size = (args.image_size, args.image_size)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    stat_file_name = os.path.join(log_dir, 'stat.h5')

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
        
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    dataset = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        dataset = filter_dataset(dataset, os.path.expanduser(args.filter_filename), 
            args.filter_percentile, args.filter_min_nrof_images_per_class)
        
    if args.validation_set_split_ratio>0.0:
        train_set, val_set = facenet.split_dataset(dataset, args.validation_set_split_ratio, args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
    else:
        train_set, val_set = dataset, []
        
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list)>0, 'The training set should not be empty'
        
        val_image_list, val_label_list = facenet.get_image_paths_and_labels(val_set)

        # Create a queue that produces indices into the image_list and label_list 
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                             shuffle=True, seed=None, capacity=32)
        
        index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
        
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
        labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
        control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
        
        nrof_preprocess_threads = 4
        input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
                                    dtypes=[tf.string, tf.int32, tf.int32],
                                    shapes=[(1,), (1,), (1,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='enqueue_op')
        image_batch, label_batch = facenet.create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)

        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')
        
        print('Number of classes in training set: %d' % nrof_classes)
        print('Number of examples in training set: %d' % len(image_list))

        print('Number of classes in validation set: %d' % len(val_set))
        print('Number of examples in validation set: %d' % len(val_image_list))
        
        print('Building training graph')
        
        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None, 
                weights_initializer=slim.initializers.xavier_initializer(), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                scope='Logits', reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Norm for the prelogits
        eps = 1e-4
        prelogits_norm = tf.reduce_mean(tf.norm(tf.abs(prelogits)+eps, ord=args.prelogits_norm_p, axis=1))
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_norm * args.prelogits_norm_loss_factor)

        # Add center loss
        prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch, logits=logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)
        
        correct_prediction = tf.cast(tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)), tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)
        
        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
        
        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            nrof_steps = args.max_nrof_epochs*args.epoch_size
            nrof_val_samples = int(math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs))   # Validate every validate_every_n_epochs as well as in the last epoch
            stat = {
                'loss': np.zeros((nrof_steps,), np.float32),
                'center_loss': np.zeros((nrof_steps,), np.float32),
                'reg_loss': np.zeros((nrof_steps,), np.float32),
                'xent_loss': np.zeros((nrof_steps,), np.float32),
                'prelogits_norm': np.zeros((nrof_steps,), np.float32),
                'accuracy': np.zeros((nrof_steps,), np.float32),
                'val_loss': np.zeros((nrof_val_samples,), np.float32),
                'val_xent_loss': np.zeros((nrof_val_samples,), np.float32),
                'val_accuracy': np.zeros((nrof_val_samples,), np.float32),
                'lfw_accuracy': np.zeros((args.max_nrof_epochs,), np.float32),
                'lfw_valrate': np.zeros((args.max_nrof_epochs,), np.float32),
                'learning_rate': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_train': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_validate': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_evaluate': np.zeros((args.max_nrof_epochs,), np.float32),
                'prelogits_hist': np.zeros((args.max_nrof_epochs, 1000), np.float32),
              }
            for epoch in range(1,args.max_nrof_epochs+1):
                step = sess.run(global_step, feed_dict=None)
                # Train for one epoch
                t = time.time()
                cont = train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file,
                    stat, cross_entropy_mean, accuracy, learning_rate,
                    prelogits, prelogits_center_loss, args.random_rotate, args.random_crop, args.random_flip, prelogits_norm, args.prelogits_hist_max, args.use_fixed_image_standardization)
                stat['time_train'][epoch-1] = time.time() - t
                
                if not cont:
                    break
                  
                t = time.time()
                if len(val_image_list)>0 and ((epoch-1) % args.validate_every_n_epochs == args.validate_every_n_epochs-1 or epoch==args.max_nrof_epochs):
                    validate(args, sess, epoch, val_image_list, val_label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
                        phase_train_placeholder, batch_size_placeholder, 
                        stat, total_loss, regularization_losses, cross_entropy_mean, accuracy, args.validate_every_n_epochs, args.use_fixed_image_standardization)
                stat['time_validate'][epoch-1] = time.time() - t

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, epoch)

                # Evaluate on LFW
                t = time.time()
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, 
                        embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer, stat, epoch, 
                        args.lfw_distance_metric, args.lfw_subtract_mean, args.lfw_use_flipped_images, args.use_fixed_image_standardization)
                stat['time_evaluate'][epoch-1] = time.time() - t

                print('Saving statistics')
                with h5py.File(stat_file_name, 'w') as f:
                    for key, value in stat.iteritems():
                        f.create_dataset(key, data=value)
    
    return model_dir
Esempio n. 38
0
def main(args):

    network = importlib.import_module(args.model_def, 'inference')

    if args.model_name:
        subdir = args.model_name
        preload_model = True
    else:
        subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
        preload_model = False
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.mkdir(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.mkdir(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)

    # Read the file containing the pairs used for testing
    pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))

    # Get the paths for the corresponding images
    paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir),
                                         pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for input images
        images_placeholder = tf.placeholder(tf.float32,
                                            shape=(None, args.image_size,
                                                   args.image_size, 3),
                                            name='input')

        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Build the inference graph
        embeddings = network.inference(images_placeholder,
                                       args.pool_type,
                                       args.use_lrn,
                                       args.keep_probability,
                                       phase_train=phase_train_placeholder,
                                       weight_decay=args.weight_decay)

        # Split example embeddings into anchor, positive and negative
        anchor, positive, negative = tf.split(0, 3, embeddings)

        # Calculate triplet loss
        loss = facenet.triplet_loss(anchor, positive, negative, args.alpha)

        # Calculate the total loss
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([loss] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op, _ = facenet.train(
            total_loss, global_step, args.optimizer, args.learning_rate,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor, args.moving_average_decay)

        # Create a saver
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=0)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Build an initialization operation to run below.
        init = tf.initialize_all_variables()

        # Start running operations on the Graph.
        sess = tf.Session()
        sess.run(init)

        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)

        with sess.as_default():

            if preload_model:
                ckpt = tf.train.get_checkpoint_state(model_dir)
                #pylint: disable=maybe-no-member
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                else:
                    raise ValueError('Checkpoint not found')

            # Training and validation loop
            for epoch in range(args.max_nrof_epochs):
                # Train for one epoch
                step = train(args, sess, train_set, epoch, images_placeholder,
                             phase_train_placeholder, global_step, embeddings,
                             loss, train_op, summary_op, summary_writer)

                _, _, accuracy, val, val_std, far = lfw.validate(
                    sess,
                    paths,
                    actual_issame,
                    args.seed,
                    args.batch_size,
                    images_placeholder,
                    phase_train_placeholder,
                    embeddings,
                    nrof_folds=args.lfw_nrof_folds)
                print('Accuracy: %1.3f+-%1.3f' %
                      (np.mean(accuracy), np.std(accuracy)))
                print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %
                      (val, val_std, far))
                # Add validation loss and accuracy to summary
                summary = tf.Summary()
                #pylint: disable=maybe-no-member
                summary.value.add(tag='lfw/accuracy',
                                  simple_value=np.mean(accuracy))
                summary.value.add(tag='lfw/val_rate', simple_value=val)
                summary_writer.add_summary(summary, step)

                if (epoch % args.checkpoint_period
                        == 0) or (epoch == args.max_nrof_epochs - 1):
                    # Save the model checkpoint
                    print('Saving checkpoint')
                    checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
    return model_dir
Esempio n. 39
0
def main(args):
  
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        train_set = filter_dataset(train_set, os.path.expanduser(args.filter_filename), 
            args.filter_percentile, args.filter_min_nrof_images_per_class)
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list)>0, 'The dataset should not be empty'
        
        # Create a queue that produces indices into the image_list and label_list 
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                             shuffle=True, seed=None, capacity=32)
        
        index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
        
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')

        labels_placeholder = tf.placeholder(tf.int64, shape=(None,1), name='labels')
        
        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                    dtypes=[tf.string, tf.int64],
                                    shapes=[(1,), (1,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder], name='enqueue_op')
        
        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_png(file_contents)
                if args.random_rotate:
                    image = tf.py_func(facenet.random_rotate_image, [image], tf.uint8)
                if args.random_crop:
                    image = tf.random_crop(image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)
    
                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])
    
        image_batch, label_batch = tf.train.batch_join(
            images_and_labels, batch_size=batch_size_placeholder, 
            shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')
        
        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))
        
        print('Building training graph')
        
        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None, 
                weights_initializer=tf.truncated_normal_initializer(stddev=0.1), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                scope='Logits', reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Add center loss
        if args.center_loss_factor>0.0:
            prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch, logits=logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)
        
        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
        
        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, 
                        embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer)
    sess.close()
    return model_dir
Esempio n. 40
0
def main(args):
  
    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    if args.pretrained_model:
        print('Pre-trained model: %s' % os.path.expanduser(args.pretrained_model))
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
        
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Placeholder for input images
        images_placeholder = tf.placeholder(tf.float32, shape=(None, args.image_size, args.image_size, 3), name='input')

        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        # Build the inference graph
        prelogits, _ = network.inference(images_placeholder, args.keep_probability, 
            phase_train=True, weight_decay=args.weight_decay)
        pre_embeddings = slim.fully_connected(prelogits, 128, activation_fn=None, scope='Embeddings', reuse=False)

        # Split example embeddings into anchor, positive and negative and calculate triplet loss
        embeddings = tf.nn.l2_normalize(pre_embeddings, 1, 1e-10, name='embeddings')
        anchor, positive, negative = tf.split(0, 3, embeddings)
        triplet_loss = facenet.triplet_loss(anchor, positive, negative, args.alpha)
        
        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([triplet_loss] + regularization_losses, name='total_loss')

        # Create list with variables to restore
        restore_vars = []
        update_gradient_vars = []
        if args.pretrained_model:
            for var in tf.all_variables():
                if not 'Embeddings/' in var.op.name:
                    restore_vars.append(var)
                else:
                    update_gradient_vars.append(var)
        else:
            restore_vars = tf.all_variables()
            update_gradient_vars = tf.all_variables()

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, update_gradient_vars)
        
        # Create a saver
        restore_saver = tf.train.Saver(restore_vars)
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))        

        # Initialize variables
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())

        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if args.pretrained_model:
                restore_saver.restore(sess, os.path.expanduser(args.pretrained_model))

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                step = train(args, sess, train_set, epoch, images_placeholder, 
                    learning_rate_placeholder, global_step, embeddings, total_loss, train_op, summary_op, summary_writer)
                if args.lfw_dir:
                    _, _, accuracy, val, val_std, far = lfw.validate(sess, lfw_paths,
                        actual_issame, args.seed, 60, images_placeholder, phase_train_placeholder, embeddings, nrof_folds=args.lfw_nrof_folds)
                    print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
                    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
                    # Add validation loss and accuracy to summary
                    summary = tf.Summary()
                    #pylint: disable=maybe-no-member
                    summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
                    summary.value.add(tag='lfw/val_rate', simple_value=val)
                    summary_writer.add_summary(summary, step)

                # Save the model checkpoint
                print('Saving checkpoint')
                checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    return model_dir