コード例 #1
0
ファイル: export_embeddings.py プロジェクト: citysir/facenet
def main(args):
    train_set = facenet.get_dataset(args.data_dir)
    image_list, label_list = facenet.get_image_paths_and_labels(train_set)
    label_strings = [name for name in os.listdir(os.path.expanduser(args.data_dir)) if os.path.isdir(os.path.join(os.path.expanduser(args.data_dir), name))]

    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Load the model
            facenet.load_model(args.model_dir)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            nrof_images = len(image_list)
            print('Number of images: ', nrof_images)
            batch_size = args.image_batch
            if nrof_images % batch_size == 0:
                nrof_batches = nrof_images // batch_size
            else:
                nrof_batches = (nrof_images // batch_size) + 1
            print('Number of batches: ', nrof_batches)
            embedding_size = embeddings.get_shape()[1]
            emb_array = np.zeros((nrof_images, embedding_size))
            start_time = time.time()

            for i in range(nrof_batches):
                if i == nrof_batches -1:
                    n = nrof_images
                else:
                    n = i*batch_size + batch_size
                # Get images for the batch
                if args.is_aligned is True:
                    images = facenet.load_data(image_list[i*batch_size:n], False, False, args.image_size)
                else:
                    images = load_and_align_data(image_list[i*batch_size:n], args.image_size, args.margin, args.gpu_memory_fraction)
                feed_dict = { images_placeholder: images, phase_train_placeholder:False }
                # Use the facenet model to calcualte embeddings
                embed = sess.run(embeddings, feed_dict=feed_dict)
                emb_array[i*batch_size:n, :] = embed
                print('Completed batch', i+1, 'of', nrof_batches)

            run_time = time.time() - start_time
            print('Run time: ', run_time)

            #   export emedings and labels
            label_list  = np.array(label_list)

            np.save(args.embeddings_name, emb_array)
            np.save(args.labels_name, label_list)
            np.save(args.labels_strings_name, label_strings)
コード例 #2
0
def main(args):

    dataset = facenet.get_dataset(args.dir)
    paths, _ = facenet.get_image_paths_and_labels(dataset)
    t = np.zeros((len(paths)))
    x = time.time()
    for i, path in enumerate(paths):
        start_time = time.time()
        with open(path, mode='rb') as f:
            _ = f.read()
        duration = time.time() - start_time
        t[i] = duration
        if i % 1000 == 0 or i==len(paths)-1:
            print('File %d/%d  Total time: %.2f  Avg: %.3f  Std: %.3f' % (i, len(paths), time.time()-x, np.mean(t[0:i])*1000, np.std(t[0:i])*1000))
コード例 #3
0
def save_position_into_file(src, dest):
    # storing bounding boxes' centers with their respective images name
    d = {}
    with open(src) as f:
        for line in f:
            line = line.split()
            line[0] = line[0].split('/')[-1]
            d[line[0]] = ((((int(line[1]) + int(line[3])) // 2) /
                           (1280 // 2)) - 1,
                          (((int(line[2]) + int(line[4])) // 2) /
                           (720 // 2)) - 1)

    # getting name of images in order
    # files = [os.path.join(dest, f) for f in os.listdir(dest)]
    # files.sort()
    # names = []
    # for p in [f for f in files if os.path.isdir(os.path.join(dest, f))]:
    #     img_names = os.listdir(p)
    #     img_names.sort()
    #     names.extend(img_names)

    datadir = '/Users/zarnihpyoe/wpi/mqp/data5/classified'
    dataset = facenet.get_dataset(datadir)
    paths, _ = facenet.get_image_paths_and_labels(dataset)
    names = [path.split('/')[-1] for path in paths]
    # making positions matrix (m, 2)
    embeddings, labels = np.load('{}/embeddings.npy'.format(dest))
    print(embeddings.shape)
    positions = np.zeros((embeddings.shape[1], 2))

    for i, n in enumerate(names):
        c = d[n]
        positions[i] = [c[0], c[1]]

    # appending positions matrix to the end of the embeddings (512+2, m)
    # ext_embeddings = np.vstack((positions.T, embeddings))
    # print(ext_embeddings.shape, len(labels))
    # np.save(os.path.join(dest, 'ext_embeddings.npy'), ext_embeddings)
    # np.save(os.path.join(dest, 'labels.npy'), labels)

    # just saving the positions of the detected faces
    print(positions.shape)
    np.save(os.path.join(dest, 'face_positions.npy'), positions.T)
コード例 #4
0
    def main_train(self):
        with tf.Graph().as_default():
            with tf.Session() as sess:
                if os._exists()
                img_data = facenet.get_dataset(self.datadir)
                path, label = facenet.get_image_paths_and_labels(img_data)
                print('Classes: %d' % len(img_data))
                print('Images: %d' % len(path))

                facenet.load_model(self.modeldir)
                images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
                embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
                phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
                embedding_size = embeddings.get_shape()[1]

                print('Extracting features of images for model')
                batch_size = 1000
                image_size = 160
                nrof_images = len(path)
                nrof_batches_per_epoch = int(math.ceil(1.0 * nrof_images / batch_size))
                emb_array = np.zeros((nrof_images, embedding_size))
                for i in range(nrof_batches_per_epoch):
                    start_index = i * batch_size
                    end_index = min((i + 1) * batch_size, nrof_images)
                    paths_batch = path[start_index:end_index]
                    images = facenet.load_data(paths_batch, False, False, image_size)
                    feed_dict = {images_placeholder: images, phase_train_placeholder: False}
                    emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)

                classifier_file_name = os.path.expanduser(self.classifier_filename)

                # Training Started
                print('Training Started')
                model = SVC(kernel='linear', probability=True)
                model.fit(emb_array, label)

                class_names = [cls.name.replace('_', ' ') for cls in img_data]

                # Saving model
                with open(classifier_file_name, 'wb') as outfile:
                    pickle.dump((model, class_names), outfile)
                return classifier_file_name
コード例 #5
0
    def makeClassifier(self):
        """Read database, calculate embeddings and create a linearSVM to classify"""
        dataset = facenet.get_dataset(self.database)
        for cls in dataset:
            assert (
                len(cls.image_paths) > 0,
                'There must be at least one image for each class in the dataset'
            )
        paths, labels = facenet.get_image_paths_and_labels(dataset)

        print('Number of classes: %d' % len(dataset))
        print('Number of images: %d' % len(paths))

        if len(paths) > 1:
            with self.graph.as_default():
                with self.sess.as_default():
                    embedding_size = self.embeddings.get_shape()[1]
                    print('Calculating features for images')
                    nrof_images = len(paths)
                    nrof_batches_per_epoch = int(
                        np.ceil(1.0 * nrof_images / self.batch_size))
                    emb_array = np.zeros((nrof_images, embedding_size))
                    for i in range(nrof_batches_per_epoch):
                        start_index = i * self.batch_size
                        end_index = min((i + 1) * self.batch_size, nrof_images)
                        paths_batch = paths[start_index:end_index]
                        images = facenet.load_data(paths_batch, False, False,
                                                   self.image_size)
                        feed_dict = {
                            self.images_placeholder: images,
                            self.phase_train_placeholder: False
                        }
                        emb_array[start_index:end_index, :] = self.sess.run(
                            self.embeddings, feed_dict=feed_dict)
                    self.svm = LinearSVC()
                    self.svm.fit(emb_array, labels)
                    class_names = [
                        cls.name.replace('_', ' ') for cls in dataset
                    ]
                    self.labels = class_names
                print self.labels
コード例 #6
0
    def main_train(self):
        with tf.Graph().as_default():
            with tf.Session() as sess:
                img_data = facenet.get_dataset(self.datadir)
                path, label = facenet.get_image_paths_and_labels(img_data)
                print('Classes: %d' % len(img_data))
                print('Images: %d' % len(path))

                facenet.load_model(self.modeldir)
                images_placeholder = tf.get_default_graph().get_tensor_by_name(
                    "input:0")
                embeddings = tf.get_default_graph().get_tensor_by_name(
                    "embeddings:0")
                phase_train_placeholder = tf.get_default_graph(
                ).get_tensor_by_name("phase_train:0")
                embedding_size = embeddings.get_shape()[1]

                print('Extracting features of images for model')
                batch_size = 10
                image_size = 160
                nrof_images = len(path)
                nrof_batches_per_epoch = int(
                    math.ceil(1.0 * nrof_images / batch_size))
                emb_array = np.zeros((nrof_images, embedding_size))
                for i in range(nrof_batches_per_epoch):
                    start_index = i * batch_size
                    end_index = min((i + 1) * batch_size, nrof_images)
                    paths_batch = path[start_index:end_index]
                    images = facenet.load_data(paths_batch, False, False,
                                               image_size)
                    feed_dict = {
                        images_placeholder: images,
                        phase_train_placeholder: False
                    }
                    emb_array[start_index:end_index, :] = sess.run(
                        embeddings, feed_dict=feed_dict)
                label = np.array(label).reshape(75, 1)
                print(label.shape)
                emb_array_with_label = np.append(emb_array, label, axis=1)
                np.save(self.classifier_filename, emb_array_with_label)
                return 'emb_array'
コード例 #7
0
ファイル: reg.py プロジェクト: nijoj/FaceRecognition_project
def register(answer):
    dirName = 'dataset\\' + answer
    if not os.path.exists(dirName):
        os.mkdir(dirName)
        print(dirName, " Created ")
        embd, image = create_embd()
        #load previously stored encodings
        with open('embd.pickle', 'rb') as f:
            feature_array = pickle.load(f)

        dataset = facenet.get_dataset('dataset')  #for getting names of people
        paths, labels = facenet.get_image_paths_and_labels(dataset)
        class_names = [cls.name.replace('_', ' ') for cls in dataset]
        for i, name in enumerate(class_names):
            if name == answer:
                break
        feature_array.append(list((i, embd)))
        with open('embd.pickle', 'wb') as f:
            pickle.dump(feature_array, f)
        cv2.imwrite(dirName + 'reg.png', image)
    else:
        print(dirName, " already exists")
コード例 #8
0
def main(args):
    del_count = 0
    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.expanduser(args.logs_base_dir)
    print('log dir: %s' % log_dir)
    log_file = os.path.join(log_dir, subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)

    dataset = facenet.get_dataset(args.dir)
    paths, _ = facenet.get_image_paths_and_labels(dataset)
    t = np.zeros((len(paths)))
    x = time.time()
    for i, path in enumerate(paths):
        start_time = time.time()
        # with open(path, mode='rb') as f:
        #     _ = f.read()
        try:
            misc.imread(path)
        except:
            del_count += 1
            print(path)
            os.remove(path)
            with open(log_file, 'a') as fw:
                fw.write('%s\n' % path)

        duration = time.time() - start_time
        t[i] = duration
        if i % 1000 == 0 or i == len(paths) - 1:
            print('File %d/%d  Total time: %.2f  Avg: %.3f  Std: %.3f' %
                  (i, len(paths), time.time() - x, np.mean(t[0:i]) * 1000,
                   np.std(t[0:i]) * 1000))

    print('totally %d illegal images' % del_count)
    with open(log_file, 'a') as fw:
        fw.write('totally %d illegal images' % del_count)
コード例 #9
0
def main():

    train_filename = './train.tfrecords'
    writer = tf.python_io.TFRecordWriter(train_filename)

    dataset = facenet.get_dataset("~/")
    image_list, label_list = facenet.get_image_paths_and_labels(dataset)
    assert (len(image_list) == len(label_list))
    for i in range(0, len(image_list)):
        image = image_list[i]
        label = label_list[i]
        img = Image.open(image)
        img = img.resize((112, 112))
        img_raw = img.tobytes()
        example = tf.train.Example(features=tf.train.Features(
            feature={
                "label":
                tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
                'img_raw':
                tf.train.Feature(bytes_list=tf.train.BytesList(
                    value=[img_raw]))
            }))
        writer.write(example.SerializeToString())
    writer.close()
コード例 #10
0
def main():
    #load processed image, its original and bounding boxes from camera image
    images, orginal, bb = load_camera_image()

    dataset = facenet.get_dataset('dataset')  #for getting names of people
    paths, labels = facenet.get_image_paths_and_labels(dataset)
    class_names = [cls.name.replace('_', ' ') for cls in dataset]
    #load previously stored encodings
    with open('embd.pickle', 'rb') as f:
        feature_array = pickle.load(f)

## Step 1: Compute the target "encoding/embedding" for the image
    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Load the model
            facenet.load_model(model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            emb = sess.run(embeddings, feed_dict=feed_dict)

        nrof_images = len(feature_array)

        ## Step 2: Find the closest encoding ##

        # Initialize "min_dist" to a large value, say 100
        min_dist = 100
        # Loop over the database names and encodings.
        for i in range(nrof_images):
            # Compute L2 distance between the target "encoding" and the current "emb" from the database.
            dist = np.linalg.norm(feature_array[i][1, :] - emb[0, :])
            # If this distance is less than the min_dist, then set min_dist to dist, and identity to name.
            if dist < min_dist:
                min_dist = dist
                identity = class_names[feature_array[i][0]]
        if min_dist > 0.95:  #check for threshold
            print("Not in the database.")
            print('Unauthorized Person')
            print("it's " + str(identity) + ", the distance is " +
                  str(min_dist))  #dont mind this code
            #alert.sendmail() #send mail if unauthorised
            exit(0)
        else:
            print("it's " + str(identity) + ", the distance is " +
                  str(min_dist))
            print('Authorized Person')

            cv2.rectangle(orginal, (bb[0], bb[1]), (bb[2], bb[3]), (0, 255, 0),
                          2)  #boxing face in the image

            #plot result id under box
            text_x = bb[0]
            text_y = bb[3] + 20
            cv2.putText(orginal,
                        identity, (text_x, text_y),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL,
                        1, (0, 0, 255),
                        thickness=1,
                        lineType=2)
            cv2.imshow("Hi", orginal)

            #ser=serial.Serial('/dev/ttyACM0',9600) #open the door
            #ser.write(b'1')

            if cv2.waitKey(100000) == ord('q'):
                #ser=serial.Serial('/dev/ttyACM0',9600) #close the doors
                #ser.write(b'2')
                exit(0)
            cv2.destroyAllWindows()
コード例 #11
0
def main(args):
    dataset = facenet.get_dataset(args.data_dir)
    # Check that there are at least one training image per class
    for cls in dataset:
        assert len(cls.image_paths) > 0, 'There must be at least one image for each class in the dataset'

    paths, labels = facenet.get_image_paths_and_labels(dataset)

    print('Number of classes: %d' % len(dataset))
    print('Number of images: %d' % len(paths))

    # Load the model
    print('Loading feature extraction model')

    # Load driver
    drv = driver.load_driver(args.driver)
    # Instantinate driver
    serving = drv(
        preprocess=serving_hook.preprocess,
        postprocess=serving_hook.postprocess,
        init_hook=serving_hook.init_hook,
        classifier=args.classifier,
        use_tf='False',
        use_face_detection='True',
        face_detection_path=args.face_detection_path
    )
    serving.load_model(
        args.model,
        inputs='input:0,phase_train:0',
        outputs='embeddings:0',
        device=args.device,
        flexible_batch_size=True,
    )

    # Run forward pass to calculate embeddings
    print('Calculating features for images')
    time_requests = 0.0
    epochs = 2
    start_time = time.time()
    for j in range(epochs):
        for path in paths:
            print('Processing %s...' % path)
            with open(path, 'rb') as f:
                data = f.read()

            t = time.time()

            feed_dict = {'input': np.array(data)}
            outputs = serving.predict_hooks(feed_dict, context=Context())

            delta = (time.time() - t) * 1000
            time_requests += delta

    duration = float(time.time() - start_time)
    print()
    print('Total time: %.3fs' % duration)
    per_request_ms = float(time_requests) / epochs / len(paths)
    print('Time per request: %.3fms' % per_request_ms)

    speed = 1 / (per_request_ms / 1000)
    print('Speed: {} sample/sec'.format(speed))
コード例 #12
0
def classify(src, dir):
    with tf.Graph().as_default():

        with tf.Session() as sess:

            datadir = src
            dataset = facenet.get_dataset(datadir)
            paths, labels = facenet.get_image_paths_and_labels(dataset)
            print('Number of classes: %d' % len(dataset))
            print('Number of images: %d' % len(paths))

            print('Loading feature extraction model')
            modeldir = '20170511-185253/20170511-185253.pb'
            facenet.load_model(modeldir)

            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Calculating features for images')
            batch_size = 1000
            image_size = 160
            nrof_images = len(paths)
            nrof_batches_per_epoch = int(
                math.ceil(1.0 * nrof_images / batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches_per_epoch):
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False,
                                           image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            classifier_filename = dir
            classifier_filename_exp = os.path.expanduser(classifier_filename)

            # Train classifier
            print('Training classifier')
            model = SVC(kernel='linear', probability=True)
            model.fit(emb_array, labels)

            # Create a list of class names
            class_names = [cls.name.replace('_', ' ') for cls in dataset]
            print(class_names)

            # Saving classifier model
            with open(classifier_filename_exp, 'wb') as outfile:
                pickle.dump((model, class_names), outfile)
            print('Saved classifier model to file "%s"' %
                  classifier_filename_exp)
            print('Goodluck')
コード例 #13
0
def main(args):

    network = importlib.import_module(args.model_def)
    image_size = (args.image_size, args.image_size)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        try:
            os.makedirs(log_dir)
        except OSError as exc:
            print(exc)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        try:
            os.makedirs(model_dir)
        except OSError as exc:
            print(exc)

    stat_file_name = os.path.join(log_dir, 'stat.h5')

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    dataset = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        dataset = filter_dataset(dataset,
                                 os.path.expanduser(args.filter_filename),
                                 args.filter_percentile,
                                 args.filter_min_nrof_images_per_class)

    if args.validation_set_split_ratio > 0.0:
        train_set, val_set = facenet.split_dataset(
            dataset, args.validation_set_split_ratio,
            args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
    else:
        train_set, val_set = dataset, []

    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list) > 0, 'The training set should not be empty'

        val_image_list, val_label_list = facenet.get_image_paths_and_labels(
            val_set)

        # Create a queue that produces indices into the image_list and label_list
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size,
                                                    num_epochs=None,
                                                    shuffle=True,
                                                    seed=None,
                                                    capacity=32)

        index_dequeue_op = index_queue.dequeue_many(
            args.batch_size * args.epoch_size, 'index_dequeue')

        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 1),
                                                 name='image_paths')
        labels_placeholder = tf.placeholder(tf.int32,
                                            shape=(None, 1),
                                            name='labels')
        control_placeholder = tf.placeholder(tf.int32,
                                             shape=(None, 1),
                                             name='control')

        nrof_preprocess_threads = 4
        input_queue = data_flow_ops.FIFOQueue(
            capacity=2000000,
            dtypes=[tf.string, tf.int32, tf.int32],
            shapes=[(1, ), (1, ), (1, )],
            shared_name=None,
            name=None)
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder, control_placeholder],
            name='enqueue_op')
        image_batch, label_batch = facenet.create_input_pipeline(
            input_queue, image_size, nrof_preprocess_threads,
            batch_size_placeholder)

        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')

        print('Number of classes in training set: %d' % nrof_classes)
        print('Number of examples in training set: %d' % len(image_list))

        print('Number of classes in validation set: %d' % len(val_set))
        print('Number of examples in validation set: %d' % len(val_image_list))

        print('Building training graph')

        # Build the inference graph
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(
            prelogits,
            len(train_set),
            activation_fn=None,
            weights_initializer=slim.initializers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            scope='Logits',
            reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Norm for the prelogits
        eps = 1e-4
        prelogits_norm = tf.reduce_mean(
            tf.norm(tf.abs(prelogits) + eps, ord=args.prelogits_norm_p,
                    axis=1))
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                             prelogits_norm * args.prelogits_norm_loss_factor)

        # Add center loss
        prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch,
                                                       args.center_loss_alfa,
                                                       nrof_classes)
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                             prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch,
            logits=logits,
            name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        correct_prediction = tf.cast(
            tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)),
            tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables(), args.log_histograms)

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        config = tf.ConfigProto(gpu_options=gpu_options,
                                log_device_placement=False)
        config.gpu_options.visible_device_list = str(hvd.local_rank())
        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        # Horovod: broadcast initial variable states from rank 0 to all other processes.
        # This is necessary to ensure consistent initialization of all workers when
        # training is started with random weights or restored from a checkpoint.
        bcast = hvd.broadcast_global_variables(0)

        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            nrof_steps = args.max_nrof_epochs * args.epoch_size
            nrof_val_samples = int(
                math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs)
            )  # Validate every validate_every_n_epochs as well as in the last epoch
            stat = {
                'loss':
                np.zeros((nrof_steps, ), np.float32),
                'center_loss':
                np.zeros((nrof_steps, ), np.float32),
                'reg_loss':
                np.zeros((nrof_steps, ), np.float32),
                'xent_loss':
                np.zeros((nrof_steps, ), np.float32),
                'prelogits_norm':
                np.zeros((nrof_steps, ), np.float32),
                'accuracy':
                np.zeros((nrof_steps, ), np.float32),
                'val_loss':
                np.zeros((nrof_val_samples, ), np.float32),
                'val_xent_loss':
                np.zeros((nrof_val_samples, ), np.float32),
                'val_accuracy':
                np.zeros((nrof_val_samples, ), np.float32),
                'lfw_accuracy':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'lfw_valrate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'learning_rate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'time_train':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'time_validate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'time_evaluate':
                np.zeros((args.max_nrof_epochs, ), np.float32),
                'prelogits_hist':
                np.zeros((args.max_nrof_epochs, 1000), np.float32),
            }
            for epoch in range(1, args.max_nrof_epochs + 1):
                step = sess.run(global_step, feed_dict=None)
                # Train for one epoch
                t = time.time()
                cont = train(
                    args, sess, epoch, image_list, label_list,
                    index_dequeue_op, enqueue_op, image_paths_placeholder,
                    labels_placeholder, learning_rate_placeholder,
                    phase_train_placeholder, batch_size_placeholder,
                    control_placeholder, global_step, total_loss, train_op,
                    summary_op, summary_writer, regularization_losses,
                    args.learning_rate_schedule_file, stat, cross_entropy_mean,
                    accuracy, learning_rate, prelogits, prelogits_center_loss,
                    args.random_rotate, args.random_crop, args.random_flip,
                    prelogits_norm, args.prelogits_hist_max,
                    args.use_fixed_image_standardization)
                stat['time_train'][epoch - 1] = time.time() - t

                if not cont:
                    break

                t = time.time()
                if len(val_image_list) > 0 and (
                    (epoch - 1) % args.validate_every_n_epochs
                        == args.validate_every_n_epochs - 1
                        or epoch == args.max_nrof_epochs):
                    validate(args, sess, epoch, val_image_list, val_label_list,
                             enqueue_op, image_paths_placeholder,
                             labels_placeholder, control_placeholder,
                             phase_train_placeholder, batch_size_placeholder,
                             stat, total_loss, regularization_losses,
                             cross_entropy_mean, accuracy,
                             args.validate_every_n_epochs,
                             args.use_fixed_image_standardization)
                stat['time_validate'][epoch - 1] = time.time() - t

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, epoch)

                # Evaluate on LFW
                t = time.time()
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder,
                             labels_placeholder, phase_train_placeholder,
                             batch_size_placeholder, control_placeholder,
                             embeddings, label_batch, lfw_paths, actual_issame,
                             args.lfw_batch_size, args.lfw_nrof_folds, log_dir,
                             step, summary_writer, stat, epoch,
                             args.lfw_distance_metric, args.lfw_subtract_mean,
                             args.lfw_use_flipped_images,
                             args.use_fixed_image_standardization)
                stat['time_evaluate'][epoch - 1] = time.time() - t

                print('Saving statistics')
                with h5py.File(stat_file_name, 'w') as f:
                    for key, value in stat.items():
                        f.create_dataset(key, data=value)

    return model_dir
コード例 #14
0
def main(args):
        train_set = facenet.get_dataset(args.data_dir)
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        # fetch the classes (labels as strings) exactly as it's done in get_dataset
        path_exp = os.path.expanduser(args.data_dir)
        classes = [path for path in os.listdir(path_exp) \
                if os.path.isdir(os.path.join(path_exp, path))]
        classes.sort()
        # get the label strings
        label_strings = [name for name in classes if \
        os.path.isdir(os.path.join(path_exp, name))]

        with tf.Graph().as_default():

            with tf.Session() as sess:

            # Load the model
            facenet.load_model(args.model_dir)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            nrof_images = len(image_list)
            print('Number of images: ', nrof_images)
            batch_size = args.image_batch
            if nrof_images % batch_size == 0:
                nrof_batches = nrof_images // batch_size
            else:
                nrof_batches = (nrof_images // batch_size) + 1
            print('Number of batches: ', nrof_batches)
            embedding_size = embeddings.get_shape()[1]
            emb_array = np.zeros((nrof_images, embedding_size))
            start_time = time.time()

            for i in range(nrof_batches):
                if i == nrof_batches -1:
                    n = nrof_images
                else:
                    n = i*batch_size + batch_size
                # Get images for the batch
                if args.is_aligned is True:
                    images = facenet.load_data(image_list[i*batch_size:n], False, False, args.image_size)
                else:
                    images = load_and_align_data(image_list[i*batch_size:n], args.image_size, args.margin, args.gpu_memory_fraction)
                feed_dict = { images_placeholder: images, phase_train_placeholder:False }
                # Use the facenet model to calcualte embeddings
                embed = sess.run(embeddings, feed_dict=feed_dict)
                emb_array[i*batch_size:n, :] = embed
                print('Completed batch', i+1, 'of', nrof_batches)

            run_time = time.time() - start_time
            print('Run time: ', run_time)

            #   export emedings and labels
            label_list  = np.array(label_list)

            np.save(args.embeddings_name, emb_array)
            np.save(args.labels_name, label_list)
            label_strings = np.array(label_strings)
            np.save(args.labels_strings_name, label_strings[label_list])


def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):

    minsize = 20 # minimum size of face
    threshold = [ 0.6, 0.7, 0.7 ]  # three steps's threshold
    factor = 0.709 # scale factor

    print('Creating networks and loading parameters')
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

    nrof_samples = len(image_paths)
    img_list = [None] * nrof_samples
    for i in xrange(nrof_samples):
        print(image_paths[i])
        img = misc.imread(os.path.expanduser(image_paths[i]))
        img_size = np.asarray(img.shape)[0:2]
        bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
        det = np.squeeze(bounding_boxes[0,0:4])
        bb = np.zeros(4, dtype=np.int32)
        bb[0] = np.maximum(det[0]-margin/2, 0)
        bb[1] = np.maximum(det[1]-margin/2, 0)
        bb[2] = np.minimum(det[2]+margin/2, img_size[1])
        bb[3] = np.minimum(det[3]+margin/2, img_size[0])
        cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
        aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
        prewhitened = facenet.prewhiten(aligned)
        img_list[i] = prewhitened
    images = np.stack(img_list)
    return images

def parse_arguments(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('model_dir', type=str,
        help='Directory containing the meta_file and ckpt_file')
    parser.add_argument('data_dir', type=str,
        help='Directory containing images. If images are not already aligned and cropped include --is_aligned False.')
    parser.add_argument('--is_aligned', type=str,
        help='Is the data directory already aligned and cropped?', default=True)
    parser.add_argument('--image_size', type=int,
        help='Image size (height, width) in pixels.', default=160)
    parser.add_argument('--margin', type=int,
        help='Margin for the crop around the bounding box (height, width) in pixels.',
        default=44)
    parser.add_argument('--gpu_memory_fraction', type=float,
        help='Upper bound on the amount of GPU memory that will be used by the process.',
        default=1.0)
    parser.add_argument('--image_batch', type=int,
        help='Number of images stored in memory at a time. Default 500.',
        default=500)

    #   numpy file Names
    parser.add_argument('--embeddings_name', type=str,
        help='Enter string of which the embeddings numpy array is saved as.',
        default='embeddings.npy')
    parser.add_argument('--labels_name', type=str,
        help='Enter string of which the labels numpy array is saved as.',
        default='labels.npy')
    parser.add_argument('--labels_strings_name', type=str,
        help='Enter string of which the labels as strings numpy array is saved as.',
        default='label_strings.npy')
    return parser.parse_args(argv)

if __name__ == '__main__':
    main(parse_arguments(sys.argv[1:]))
コード例 #15
0
ファイル: fine_tune.py プロジェクト: Wayfear/AutoTune_v3
def main(args):
    project_dir = os.path.dirname(os.getcwd())
    network = importlib.import_module(args.model_def)

    with open(join(project_dir, 'config.yaml'), 'r') as f:
        cfg = yaml.load(f)

    if cfg['specs']['set_gpu']:
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['base_conf']['gpu_num'])

    subdir = '%s_center_loss_factor_%1.2f' % (args.data_dir,
                                              args.center_loss_factor)

    # test = os.path.expanduser(args.logs_base_dir)
    log_dir = os.path.join(project_dir, 'fine_tuning_process', 'logs', subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(project_dir, 'fine_tuning_process', 'models',
                             subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    data_dir = os.path.join(project_dir, 'fine_tuning_process', 'data',
                            args.data_dir, 'train')
    train_set = facenet.get_dataset(data_dir)
    if args.filter_filename:
        train_set = filter_dataset(train_set,
                                   os.path.expanduser(args.filter_filename),
                                   args.filter_percentile,
                                   args.filter_min_nrof_images_per_class)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = os.path.join(project_dir, 'fine_tuning_process',
                                    'models',
                                    cfg['model_map'][args.embedding_size])
    print('Pre-trained model: %s' % pretrained_model)

    # if args.lfw_dir:
    lfw_dir = os.path.join(project_dir, 'fine_tuning_process', 'data',
                           args.data_dir, 'test')
    print('LFW directory: %s' % lfw_dir)
    # Read the file containing the pairs used for testing
    lfw_pairs = os.path.join(project_dir, 'fine_tuning_process', 'data',
                             args.data_dir, 'pairs.txt')
    pairs = lfw.read_pairs(lfw_pairs)
    # Get the paths for the corresponding images
    lfw_paths, actual_issame = lfw.get_paths_personal(lfw_dir, pairs)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # if args.soft_label:
        #     # TODO: read conf
        #     with open(args.soft_label, 'rb') as f:
        #         confidence_sorce = pickle.load(f)
        #     image_list, label_list = facenet.get_image_paths_and_soft_labels(train_set, confidence_sorce)
        # else:
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Get a list of image paths and their labels
        # image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        # image_list, label_list, nrof_classes = get_image_paths_and_labels('data')
        assert len(image_list) > 0, 'The dataset should not be empty'

        # Create a queue that produces indices into the image_list and label_list
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size,
                                                    num_epochs=None,
                                                    shuffle=True,
                                                    seed=None,
                                                    capacity=32)

        index_dequeue_op = index_queue.dequeue_many(
            args.batch_size * args.epoch_size, 'index_dequeue')

        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 1),
                                                 name='image_paths')

        labels_placeholder = tf.placeholder(tf.int64,
                                            shape=(None, 1),
                                            name='labels')

        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                              dtypes=[tf.string, tf.int64],
                                              shapes=[(1, ), (1, )],
                                              shared_name=None,
                                              name=None)
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder], name='enqueue_op')

        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents, channels=3)
                if args.random_rotate:
                    image = tf.py_func(facenet.random_rotate_image, [image],
                                       tf.uint8)
                if args.random_crop:
                    image = tf.random_crop(
                        image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(
                        image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)

                # pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])

        image_batch, label_batch = tf.train.batch_join(
            images_and_labels,
            batch_size=batch_size_placeholder,
            shapes=[(args.image_size, args.image_size, 3), ()],
            enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')

        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))

        print('Building training graph')

        # Build the inference graph
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)
        # fine_tuning = slim.fully_connected(prelogits, args.embedding_size, activation_fn=None,
        #                            scope='FineTuning', reuse=False, trainable=True)

        logits = slim.fully_connected(
            prelogits,
            nrof_classes,
            activation_fn=None,
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            scope='Logits',
            reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Add center loss
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, _ = facenet.center_loss(
                prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES,
                prelogits_center_loss * args.center_loss_factor)
            tf.summary.scalar('prelogits_center_loss', prelogits_center_loss)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch,
            logits=logits,
            name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables(), args.log_histograms)

        # Create a saver
        all_vars = tf.trainable_variables()
        var_to_restore = [
            v for v in all_vars if not v.name.startswith('Logits')
        ]
        saver = tf.train.Saver(var_to_restore, max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():
            if args.pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)
                result = sess.graph.get_tensor_by_name(
                    "InceptionResnetV1/Bottleneck/weights:0")
                pre = sess.graph.get_tensor_by_name(
                    "InceptionResnetV1/Block8/Branch_1/Conv2d_0c_3x1/weights:0"
                )
                # tf.stop_gradient(persisted_result)
                # print(result.eval())
                # print("======")
                # print(pre.eval())

            # Training and validation loop
            print('Running training')
            epoch = 0
            pre_acc = -1
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, image_list, label_list,
                      index_dequeue_op, enqueue_op, image_paths_placeholder,
                      labels_placeholder, learning_rate_placeholder,
                      phase_train_placeholder, batch_size_placeholder,
                      global_step, total_loss, train_op, summary_op,
                      summary_writer, regularization_losses,
                      args.learning_rate_schedule_file, logits)
                # print(result.eval())
                # print("======")
                # print(pre.eval())

                # Save variables and the metagraph if it doesn't exist already
                # Evaluate on LFW
                if lfw_dir:
                    acc = evaluate(sess, enqueue_op, image_paths_placeholder,
                                   labels_placeholder, phase_train_placeholder,
                                   batch_size_placeholder, embeddings,
                                   label_batch, lfw_paths, actual_issame,
                                   args.lfw_batch_size, args.lfw_nrof_folds,
                                   log_dir, step, summary_writer, total_loss,
                                   prelogits_center_loss)
                if acc > pre_acc:
                    save_variables_and_metagraph(sess, saver, summary_writer,
                                                 model_dir, subdir, step)
                    pre_acc = acc
    return model_dir
コード例 #16
0
def main(args):

    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augument_data(
            image_list, label_list, args.image_size, args.batch_size,
            args.max_nrof_epochs, args.random_crop, args.random_flip,
            args.nrof_preprocess_threads)
        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))

        print('Building training graph')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        # Build the inference graph
        prelogits, _ = network.inference(image_batch,
                                         args.keep_probability,
                                         phase_train=True,
                                         weight_decay=args.weight_decay)
        logits = slim.fully_connected(
            prelogits,
            len(train_set),
            activation_fn=None,
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            scope='Logits',
            reuse=False)

        # Add DeCov regularization loss
        if args.decov_loss_factor > 0.0:
            logits_decov_loss = facenet.decov_loss(
                logits) * args.decov_loss_factor
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                                 logits_decov_loss)

        # Add center loss
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, _ = facenet.center_loss(
                prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES,
                prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits, label_batch, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.all_variables(), args.log_histograms)

        # Evaluation
        print('Building evaluation graph')
        lfw_label_list = range(0, len(lfw_paths))
        assert (
            len(lfw_paths) % args.lfw_batch_size == 0
        ), "The number of images in the LFW test set need to be divisible by the lfw_batch_size"
        eval_image_batch, eval_label_batch = facenet.read_and_augument_data(
            lfw_paths,
            lfw_label_list,
            args.image_size,
            args.lfw_batch_size,
            None,
            False,
            False,
            args.nrof_preprocess_threads,
            shuffle=False)
        # Node for input images
        eval_image_batch.set_shape((None, args.image_size, args.image_size, 3))
        eval_image_batch = tf.identity(eval_image_batch, name='input')
        eval_prelogits, _ = network.inference(eval_image_batch,
                                              1.0,
                                              phase_train=False,
                                              weight_decay=0.0,
                                              reuse=True)
        eval_embeddings = tf.nn.l2_normalize(eval_prelogits,
                                             1,
                                             1e-10,
                                             name='embeddings')

        # Create a saver
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())
        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, learning_rate_placeholder,
                      global_step, total_loss, train_op, summary_op,
                      summary_writer, regularization_losses,
                      args.learning_rate_schedule_file)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, eval_embeddings, eval_label_batch,
                             actual_issame, args.lfw_batch_size, args.seed,
                             args.lfw_nrof_folds, log_dir, step,
                             summary_writer)

    return model_dir
コード例 #17
0
def main(args):

    img_mean = np.array([134.10714722, 102.52040863, 87.15436554])
    img_stddev = np.sqrt(
        np.array([3941.30175781, 2856.94287109, 2519.35791016]))

    vae_def = importlib.import_module(args.vae_def)
    vae = vae_def.Vae(args.latent_var_size)
    gen_image_size = vae.get_image_size()

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)
    log_file_name = os.path.join(model_dir, 'logs.h5')

    # Write arguments to a text file
    facenet.write_arguments_to_file(args,
                                    os.path.join(model_dir, 'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, model_dir, ' '.join(sys.argv))

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        train_set = facenet.get_dataset(args.data_dir)
        image_list, _ = facenet.get_image_paths_and_labels(train_set)

        # Create the input queue
        input_queue = tf.train.string_input_producer(image_list, shuffle=True)

        nrof_preprocess_threads = 4
        image_per_thread = []
        for _ in range(nrof_preprocess_threads):
            file_contents = tf.read_file(input_queue.dequeue())
            image = tf.image.decode_image(file_contents, channels=3)
            image = tf.image.resize_image_with_crop_or_pad(
                image, args.input_image_size, args.input_image_size)
            image.set_shape((args.input_image_size, args.input_image_size, 3))
            image = tf.cast(image, tf.float32)
            #pylint: disable=no-member
            image_per_thread.append([image])

        images = tf.train.batch_join(image_per_thread,
                                     batch_size=args.batch_size,
                                     capacity=4 * nrof_preprocess_threads *
                                     args.batch_size,
                                     allow_smaller_final_batch=False)

        # Normalize
        images_norm = (images - img_mean) / img_stddev

        # Resize to appropriate size for the encoder
        images_norm_resize = tf.image.resize_images(
            images_norm, (gen_image_size, gen_image_size))

        # Create encoder network
        mean, log_variance = vae.encoder(images_norm_resize, True)

        epsilon = tf.random_normal((tf.shape(mean)[0], args.latent_var_size))
        std = tf.exp(log_variance / 2)
        latent_var = mean + epsilon * std

        # Create decoder network
        reconstructed_norm = vae.decoder(latent_var, True)

        # Un-normalize
        reconstructed = (reconstructed_norm * img_stddev) + img_mean

        # Create reconstruction loss
        if args.reconstruction_loss_type == 'PLAIN':
            images_resize = tf.image.resize_images(
                images, (gen_image_size, gen_image_size))
            reconstruction_loss = tf.reduce_mean(
                tf.reduce_sum(tf.pow(images_resize - reconstructed, 2)))
        elif args.reconstruction_loss_type == 'PERCEPTUAL':
            network = importlib.import_module(args.model_def)

            reconstructed_norm_resize = tf.image.resize_images(
                reconstructed_norm,
                (args.input_image_size, args.input_image_size))

            # Stack images from both the input batch and the reconstructed batch in a new tensor
            shp = [-1] + images_norm.get_shape().as_list()[1:]
            input_images = tf.reshape(
                tf.stack([images_norm, reconstructed_norm_resize], axis=0),
                shp)
            _, end_points = network.inference(input_images,
                                              1.0,
                                              phase_train=False,
                                              bottleneck_layer_size=128,
                                              weight_decay=0.0)

            # Get a list of feature names to use for loss terms
            feature_names = args.loss_features.replace(' ', '').split(',')

            # Calculate L2 loss between original and reconstructed images in feature space
            reconstruction_loss_list = []
            for feature_name in feature_names:
                feature_flat = slim.flatten(end_points[feature_name])
                image_feature, reconstructed_feature = tf.unstack(tf.reshape(
                    feature_flat, [2, args.batch_size, -1]),
                                                                  num=2,
                                                                  axis=0)
                reconstruction_loss = tf.reduce_mean(tf.reduce_sum(
                    tf.pow(image_feature - reconstructed_feature, 2)),
                                                     name=feature_name +
                                                     '_loss')
                reconstruction_loss_list.append(reconstruction_loss)
            # Sum up the losses in for the different features
            reconstruction_loss = tf.add_n(reconstruction_loss_list,
                                           'reconstruction_loss')
        else:
            pass

        # Create KL divergence loss
        kl_loss = kl_divergence_loss(mean, log_variance)
        kl_loss_mean = tf.reduce_mean(kl_loss)

        total_loss = args.alfa * kl_loss_mean + args.beta * reconstruction_loss

        learning_rate = tf.train.exponential_decay(
            args.initial_learning_rate,
            global_step,
            args.learning_rate_decay_steps,
            args.learning_rate_decay_factor,
            staircase=True)

        # Calculate gradients and make sure not to include parameters for the perceptual loss model
        opt = tf.train.AdamOptimizer(learning_rate)
        grads = opt.compute_gradients(total_loss,
                                      var_list=get_variables_to_train())

        # Apply gradients
        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
        with tf.control_dependencies([apply_gradient_op]):
            train_op = tf.no_op(name='train')

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        facenet_saver = tf.train.Saver(get_facenet_variables_to_restore())

        # Start running operations on the Graph
        gpu_memory_fraction = 1.0
        gpu_options = tf.compat.v1.GPUOptions(
            per_process_gpu_memory_fraction=gpu_memory_fraction)
        sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
            gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if args.reconstruction_loss_type == 'PERCEPTUAL':
                if not args.pretrained_model:
                    raise ValueError(
                        'A pretrained model must be specified when using perceptual loss'
                    )
                pretrained_model_exp = os.path.expanduser(
                    args.pretrained_model)
                print('Restoring pretrained model: %s' % pretrained_model_exp)
                facenet_saver.restore(sess, pretrained_model_exp)

            log = {
                'total_loss': np.zeros((0, ), np.float),
                'reconstruction_loss': np.zeros((0, ), np.float),
                'kl_loss': np.zeros((0, ), np.float),
                'learning_rate': np.zeros((0, ), np.float),
            }

            step = 0
            print('Running training')
            while step < args.max_nrof_steps:
                start_time = time.time()
                step += 1
                save_state = step > 0 and (step % args.save_every_n_steps == 0
                                           or step == args.max_nrof_steps)
                if save_state:
                    _, reconstruction_loss_, kl_loss_mean_, total_loss_, learning_rate_, rec_ = sess.run(
                        [
                            train_op, reconstruction_loss, kl_loss_mean,
                            total_loss, learning_rate, reconstructed
                        ])
                    img = facenet.put_images_on_grid(rec_, shape=(16, 8))
                    misc.imsave(
                        os.path.join(model_dir,
                                     'reconstructed_%06d.png' % step), img)
                else:
                    _, reconstruction_loss_, kl_loss_mean_, total_loss_, learning_rate_ = sess.run(
                        [
                            train_op, reconstruction_loss, kl_loss_mean,
                            total_loss, learning_rate
                        ])
                log['total_loss'] = np.append(log['total_loss'], total_loss_)
                log['reconstruction_loss'] = np.append(
                    log['reconstruction_loss'], reconstruction_loss_)
                log['kl_loss'] = np.append(log['kl_loss'], kl_loss_mean_)
                log['learning_rate'] = np.append(log['learning_rate'],
                                                 learning_rate_)

                duration = time.time() - start_time
                print(
                    'Step: %d \tTime: %.3f \trec_loss: %.3f \tkl_loss: %.3f \ttotal_loss: %.3f'
                    % (step, duration, reconstruction_loss_, kl_loss_mean_,
                       total_loss_))

                if save_state:
                    print('Saving checkpoint file')
                    checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                    saver.save(sess,
                               checkpoint_path,
                               global_step=step,
                               write_meta_graph=False)
                    print('Saving log')
                    with h5py.File(log_file_name, 'w') as f:
                        for key, value in iteritems(log):
                            f.create_dataset(key, data=value)
コード例 #18
0
def main(args):
    train_set = facenet.get_dataset(args.data_dir)
    image_list, label_list = facenet.get_image_paths_and_labels(train_set)
    # fetch the classes (labels as strings) exactly as it's done in get_dataset
    path_exp = os.path.expanduser(args.data_dir)
    classes = [path for path in os.listdir(path_exp) \
               if os.path.isdir(os.path.join(path_exp, path))]
    classes.sort()
    # get the label strings
    label_strings = [name for name in classes if \
       os.path.isdir(os.path.join(path_exp, name))]

    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Load the model
            facenet.load_model(args.model_dir)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            nrof_images = len(image_list)
            print('Number of images: ', nrof_images)
            batch_size = args.image_batch
            if nrof_images % batch_size == 0:
                nrof_batches = nrof_images // batch_size
            else:
                nrof_batches = (nrof_images // batch_size) + 1
            print('Number of batches: ', nrof_batches)
            embedding_size = embeddings.get_shape()[1]
            emb_array = np.zeros((nrof_images, embedding_size))
            start_time = time.time()

            for i in range(nrof_batches):
                if i == nrof_batches -1:
                    n = nrof_images
                else:
                    n = i*batch_size + batch_size
                # Get images for the batch

                images = facenet.load_data(image_list[i*batch_size:n], False, False, args.image_size)
                print('shapes of images', np.shape(images))

                feed_dict = {images_placeholder: images, phase_train_placeholder:False }
                # Use the facenet model to calcualte embeddings
                embed = sess.run(embeddings, feed_dict=feed_dict)
                emb_array[i*batch_size:n, :] = embed
                print('Completed batch', i+1, 'of', nrof_batches)

            run_time = time.time() - start_time
            print('Run time: ', run_time)

            #   export emedings and labels
            label_list  = np.array(label_list)

            np.save(args.embeddings_name, emb_array)
            np.save(args.labels_name, label_list)
            label_strings = np.array(label_strings)
            np.save(args.labels_strings_name, label_strings[label_list])
コード例 #19
0
def main(args):

    with tf.Graph().as_default():

        with tf.Session() as sess:

            np.random.seed(seed=args.seed)

            if args.use_split_dataset:
                dataset_tmp = facenet.get_dataset(args.data_dir)
                train_set, test_set = split_dataset(dataset_tmp, args.min_nrof_images_per_class, args.nrof_train_images_per_class)
                if (args.mode == 'TRAIN'):
                    dataset = train_set
                elif (args.mode == 'CLASSIFY'):
                    dataset = test_set
            else:
                dataset = facenet.get_dataset(args.data_dir)

            # Check that there are at least one training image per class
            for cls in dataset:
                assert(len(cls.image_paths) > 0, 'There must be at least one image for each class in the dataset')

            paths, labels = facenet.get_image_paths_and_labels(dataset)

            print('Number of classes: %d' % len(dataset))
            print('Number of images: %d' % len(paths))

            # Load the model
            print('Loading feature extraction model')
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Calculating features for images')
            nrof_images = len(paths)
            nrof_batches_per_epoch = int(math.ceil(1.0 * nrof_images / args.batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches_per_epoch):
                start_index = i * args.batch_size
                end_index = min((i + 1) * args.batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False, args.image_size)
                feed_dict = {images_placeholder: images, phase_train_placeholder: False}
                emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)

            classifier_filename_exp = os.path.expanduser(args.classifier_filename)

            if (args.mode == 'TRAIN'):
                # Train classifier
                print('Training classifier')
                model = SVC(kernel='linear', probability=True)
                model.fit(emb_array, labels)

                # Create a list of class names
                class_names = [cls.name.replace('_', ' ') for cls in dataset]

                # Saving classifier model
                with open(classifier_filename_exp, 'wb') as outfile:
                    pickle.dump((model, class_names), outfile)
                print('Saved classifier model to file "%s"' % classifier_filename_exp)

            elif (args.mode == 'CLASSIFY'):
                # Classify images
                print('Testing classifier')
                with open(classifier_filename_exp, 'rb') as infile:
                    (model, class_names) = pickle.load(infile)

                print('Loaded classifier model from file "%s"' % classifier_filename_exp)

                predictions = model.predict_proba(emb_array)
                best_class_indices = np.argmax(predictions, axis=1)
                best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]

                for i in range(len(best_class_indices)):
                    print('%4d  %s: %.3f' % (i, class_names[best_class_indices[i]], best_class_probabilities[i]))

                accuracy = np.mean(np.equal(best_class_indices, labels))
                print('Accuracy: %.3f' % accuracy)
コード例 #20
0
ファイル: train_vae.py プロジェクト: NickyGeorge/facenet
def main(args):
  
    img_mean = np.array([134.10714722, 102.52040863, 87.15436554])
    img_stddev = np.sqrt(np.array([3941.30175781, 2856.94287109, 2519.35791016]))
  
    vae_def = importlib.import_module(args.vae_def)
    vae = vae_def.Vae(args.latent_var_size)
    gen_image_size = vae.get_image_size()

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)
    log_file_name = os.path.join(model_dir, 'logs.h5')
    
    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(model_dir, 'arguments.txt'))
        
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, model_dir, ' '.join(sys.argv))
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        train_set = facenet.get_dataset(args.data_dir)
        image_list, _ = facenet.get_image_paths_and_labels(train_set)
        
        # Create the input queue
        input_queue = tf.train.string_input_producer(image_list, shuffle=True)
    
        nrof_preprocess_threads = 4
        image_per_thread = []
        for _ in range(nrof_preprocess_threads):
            file_contents = tf.read_file(input_queue.dequeue())
            image = tf.image.decode_image(file_contents, channels=3)
            image = tf.image.resize_image_with_crop_or_pad(image, args.input_image_size, args.input_image_size)
            image.set_shape((args.input_image_size, args.input_image_size, 3))
            image = tf.cast(image, tf.float32)
            #pylint: disable=no-member
            image_per_thread.append([image])
    
        images = tf.train.batch_join(
            image_per_thread, batch_size=args.batch_size,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=False)
        
        # Normalize
        images_norm = (images-img_mean) / img_stddev

        # Resize to appropriate size for the encoder 
        images_norm_resize = tf.image.resize_images(images_norm, (gen_image_size,gen_image_size))
        
        # Create encoder network
        mean, log_variance = vae.encoder(images_norm_resize, True)
        
        epsilon = tf.random_normal((tf.shape(mean)[0], args.latent_var_size))
        std = tf.exp(log_variance/2)
        latent_var = mean + epsilon * std
        
        # Create decoder network
        reconstructed_norm = vae.decoder(latent_var, True)
        
        # Un-normalize
        reconstructed = (reconstructed_norm*img_stddev) + img_mean
        
        # Create reconstruction loss
        if args.reconstruction_loss_type=='PLAIN':
            images_resize = tf.image.resize_images(images, (gen_image_size,gen_image_size))
            reconstruction_loss = tf.reduce_mean(tf.reduce_sum(tf.pow(images_resize - reconstructed,2)))
        elif args.reconstruction_loss_type=='PERCEPTUAL':
            network = importlib.import_module(args.model_def)

            reconstructed_norm_resize = tf.image.resize_images(reconstructed_norm, (args.input_image_size,args.input_image_size))

            # Stack images from both the input batch and the reconstructed batch in a new tensor 
            shp = [-1] + images_norm.get_shape().as_list()[1:]
            input_images = tf.reshape(tf.stack([images_norm, reconstructed_norm_resize], axis=0), shp)
            _, end_points = network.inference(input_images, 1.0, 
                phase_train=False, bottleneck_layer_size=128, weight_decay=0.0)

            # Get a list of feature names to use for loss terms
            feature_names = args.loss_features.replace(' ', '').split(',')

            # Calculate L2 loss between original and reconstructed images in feature space
            reconstruction_loss_list = []
            for feature_name in feature_names:
                feature_flat = slim.flatten(end_points[feature_name])
                image_feature, reconstructed_feature = tf.unstack(tf.reshape(feature_flat, [2,args.batch_size,-1]), num=2, axis=0)
                reconstruction_loss = tf.reduce_mean(tf.reduce_sum(tf.pow(image_feature-reconstructed_feature, 2)), name=feature_name+'_loss')
                reconstruction_loss_list.append(reconstruction_loss)
            # Sum up the losses in for the different features
            reconstruction_loss = tf.add_n(reconstruction_loss_list, 'reconstruction_loss')
        else:
            pass
        
        # Create KL divergence loss
        kl_loss = kl_divergence_loss(mean, log_variance)
        kl_loss_mean = tf.reduce_mean(kl_loss)
        
        total_loss = args.alfa*kl_loss_mean + args.beta*reconstruction_loss
        
        learning_rate = tf.train.exponential_decay(args.initial_learning_rate, global_step,
            args.learning_rate_decay_steps, args.learning_rate_decay_factor, staircase=True)
        
        # Calculate gradients and make sure not to include parameters for the perceptual loss model
        opt = tf.train.AdamOptimizer(learning_rate)
        grads = opt.compute_gradients(total_loss, var_list=get_variables_to_train())
        
        # Apply gradients
        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
        with tf.control_dependencies([apply_gradient_op]):
            train_op = tf.no_op(name='train')

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
        
        facenet_saver = tf.train.Saver(get_facenet_variables_to_restore())

        # Start running operations on the Graph
        gpu_memory_fraction = 1.0
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():
            
            if args.reconstruction_loss_type=='PERCEPTUAL':
                if not args.pretrained_model:
                    raise ValueError('A pretrained model must be specified when using perceptual loss')
                pretrained_model_exp = os.path.expanduser(args.pretrained_model)
                print('Restoring pretrained model: %s' % pretrained_model_exp)
                facenet_saver.restore(sess, pretrained_model_exp)
          
            log = {
                'total_loss': np.zeros((0,), np.float),
                'reconstruction_loss': np.zeros((0,), np.float),
                'kl_loss': np.zeros((0,), np.float),
                'learning_rate': np.zeros((0,), np.float),
                }
            
            step = 0
            print('Running training')
            while step < args.max_nrof_steps:
                start_time = time.time()
                step += 1
                save_state = step>0 and (step % args.save_every_n_steps==0 or step==args.max_nrof_steps)
                if save_state:
                    _, reconstruction_loss_, kl_loss_mean_, total_loss_, learning_rate_, rec_ = sess.run(
                          [train_op, reconstruction_loss, kl_loss_mean, total_loss, learning_rate, reconstructed])
                    img = facenet.put_images_on_grid(rec_, shape=(16,8))
                    misc.imsave(os.path.join(model_dir, 'reconstructed_%06d.png' % step), img)
                else:
                    _, reconstruction_loss_, kl_loss_mean_, total_loss_, learning_rate_ = sess.run(
                          [train_op, reconstruction_loss, kl_loss_mean, total_loss, learning_rate])
                log['total_loss'] = np.append(log['total_loss'], total_loss_)
                log['reconstruction_loss'] = np.append(log['reconstruction_loss'], reconstruction_loss_)
                log['kl_loss'] = np.append(log['kl_loss'], kl_loss_mean_)
                log['learning_rate'] = np.append(log['learning_rate'], learning_rate_)

                duration = time.time() - start_time
                print('Step: %d \tTime: %.3f \trec_loss: %.3f \tkl_loss: %.3f \ttotal_loss: %.3f' % (step, duration, reconstruction_loss_, kl_loss_mean_, total_loss_))

                if save_state:
                    print('Saving checkpoint file')
                    checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
                    print('Saving log')
                    with h5py.File(log_file_name, 'w') as f:
                        for key, value in log.iteritems():
                            f.create_dataset(key, data=value)
コード例 #21
0
ファイル: train_softmax.py プロジェクト: rwchan13/facenet
def main(args):
  
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        train_set = filter_dataset(train_set, os.path.expanduser(args.filter_filename), 
            args.filter_percentile, args.filter_min_nrof_images_per_class)
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list)>0, 'The dataset should not be empty'
        
        # Create a queue that produces indices into the image_list and label_list 
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                             shuffle=True, seed=None, capacity=32)
        
        index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
        
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')

        labels_placeholder = tf.placeholder(tf.int64, shape=(None,1), name='labels')
        
        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                    dtypes=[tf.string, tf.int64],
                                    shapes=[(1,), (1,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder], name='enqueue_op')
        
        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_png(file_contents)
                if args.random_rotate:
                    image = tf.py_func(facenet.random_rotate_image, [image], tf.uint8)
                if args.random_crop:
                    image = tf.random_crop(image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)
    
                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])
    
        image_batch, label_batch = tf.train.batch_join(
            images_and_labels, batch_size=batch_size_placeholder, 
            shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')
        
        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))
        
        print('Building training graph')
        
        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None, 
                weights_initializer=tf.truncated_normal_initializer(stddev=0.1), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                scope='Logits', reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Add center loss
        if args.center_loss_factor>0.0:
            prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch, logits=logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)
        
        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
        
        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, 
                        embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer)
    sess.close()
    return model_dir
コード例 #22
0
def main(args):
  
    network = importlib.import_module(args.model_def)
    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir): 
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir): 
        os.makedirs(model_dir)

    np.random.seed(seed=args.seed)
    random.seed(args.seed)        
    train_set = facenet.get_dataset(args.data_dir)
    
    if args.filter_filename:
        train_set = filter_dataset(train_set, os.path.expanduser(args.filter_filename), 
            args.filter_percentile, args.filter_min_nrof_images_per_class)
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list)>0, 'The dataset should not be empty'
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                             shuffle=True, seed=None, capacity=32)
        index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
        
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')

        labels_placeholder = tf.placeholder(tf.int64, shape=(None,1), name='labels')
               
        input_queue = data_flow_ops.FIFOQueue(capacity=256000,
                                    dtypes=[tf.string, tf.int64],
                                    shapes=[(1,), (1,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder], name='enqueue_op')

        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.cast(tf.image.decode_image(file_contents, channels=3),tf.float32)
                # if args.random_crop:
                #     image = tf.random_crop(image, [args.image_size, args.image_size, 3])
                #     #image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                # else:
                #     image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)
                #image = tf.image.random_brightness(image,max_delta=30)
                #image = tf.image.random_contrast(image,lower=0.8,upper=1.2)
                #image = tf.image.random_saturation(image,lower=0.8,upper=1.2)
                image.set_shape((112, 96, 3))
                images.append(tf.subtract(image,127.5) * 0.0078125)
            images_and_labels.append([images, label])
    
        image_batch, label_batch = tf.train.batch_join(
            images_and_labels, batch_size=batch_size_placeholder, 
            shapes=[(112, 96, 3), ()], enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')
        
        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))       
        print('Building training graph')

        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
            weight_decay=args.weight_decay)
        
        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
        AM_logits = AM_logits_compute(embeddings, label_batch, args, nrof_classes)
        #AM_logits = Arc_logits(embeddings, label_batch, args, nrof_classes)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch, logits=AM_logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
       
        #print('test',tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

        for weights in slim.get_variables_by_name('kernel'):
            kernel_regularization = tf.contrib.layers.l2_regularizer(args.weight_decay)(weights)
            print(weights)
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, kernel_regularization)	
	
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

        if args.weight_decay==0:
            total_loss = tf.add_n([cross_entropy_mean], name='total_loss')
        else:
            total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
        tf.add_to_collection('losses', total_loss)

        #define two saver in case under 'finetuning on different dataset' situation 
        saver_load = tf.train.Saver(tf.trainable_variables(), max_to_keep=1)
        saver_save = tf.train.Saver(tf.trainable_variables(), max_to_keep =1)

        #train_op = facenet.train(total_loss, global_step, args.optimizer, 
        #    learning_rate, args.moving_average_decay, tf.trainable_variables(), args.log_histograms)
        #train_op = tf.train.AdamOptimizer(learning_rate).minimize(total_loss,global_step = global_step,var_list=tf.trainable_variables())
        train_op = tf.train.MomentumOptimizer(learning_rate,momentum=0.9).minimize(total_loss,global_step=global_step,var_list=tf.trainable_variables())
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():
            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver_load.restore(sess, pretrained_model)

            print('Running training')
            epoch = 0
            best_accuracy = 0.0
            
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file)

                print('validation running...')
                if args.lfw_dir:
                    #best_accuracy = evaluate_double(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, embeddings, 
                    #	label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer,best_accuracy, saver_save,model_dir,subdir,image_batch,args)

                    best_accuracy = evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, embeddings, 
                        label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer,best_accuracy,saver_save,model_dir,subdir)
    return model_dir
コード例 #23
0
def main(args):
  
    with tf.Graph().as_default():
      
        with tf.Session() as sess:
            
            np.random.seed(seed=args.seed)
            
            if args.use_split_dataset:
                dataset_tmp = facenet.get_dataset(args.data_dir)
                train_set, test_set = split_dataset(dataset_tmp, args.min_nrof_images_per_class, args.nrof_train_images_per_class)
                if (args.mode=='TRAIN'):
                    dataset = train_set
                elif (args.mode=='CLASSIFY'):
                    dataset = test_set
            else:
                dataset = facenet.get_dataset(args.data_dir)

            # Check that there are at least one training image per class
            for cls in dataset:
                assert(len(cls.image_paths)>0, 'There must be at least one image for each class in the dataset')            

                 
            paths, labels = facenet.get_image_paths_and_labels(dataset)
            
            print('Number of classes: %d' % len(dataset))
            print('Number of images: %d' % len(paths))
            
            # Load the model
            print('Loading feature extraction model')
            facenet.load_model(args.model)
            
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]
            
            # Run forward pass to calculate embeddings
            print('Calculating features for images')
            nrof_images = len(paths)
            nrof_batches_per_epoch = int(math.ceil(1.0*nrof_images / args.batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches_per_epoch):
                start_index = i*args.batch_size
                end_index = min((i+1)*args.batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False, args.image_size)
                feed_dict = { images_placeholder:images, phase_train_placeholder:False }
                emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)
            
            classifier_filename_exp = os.path.expanduser(args.classifier_filename)

            if (args.mode=='TRAIN'):
                # Train classifier
                print('Training classifier')
                model = SVC(kernel='linear', probability=True)

                model.fit(emb_array, labels)
            
                # Create a list of class names
                class_names = [ cls.name.replace('_', ' ') for cls in dataset]
                
                predictions = model.predict_proba(emb_array)
                best_class_indices = np.argmax(predictions, axis=1)
                best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]
                
                #d = pd.DataFrame(columns = ['classification','std', 'prob'])
                
#                for i in range(len(best_class_indices)):
#                    print('%4d  %s: %.3f' % (i, class_names[best_class_indices[i]], best_class_probabilities[i]))
#                    print(predictions[i])
#                    print(np.std(predictions[i]))
#                    print(best_class_indices[i])
#                    print(class_names[best_class_indices[i]])
#                    print(labels[i])
#                print(predictions)
#                print(np.std(predictions, axis =1))
#                print(best_class_probabilities)
                std_cutoff = np.amin(np.std(predictions, axis =1))
                prob_cutoff = np.amin(best_class_probabilities)
                print(std_cutoff)
                print(prob_cutoff)
                np.save("std_cutoff.npy", std_cutoff)
                np.save("prob_cutoff.npy", prob_cutoff)
                #    d.loc[i] = ['good',np.std(predictions[i]), best_class_probabilities[i]]
                #print(d)
                #print(d.std)
                #print(d.prod)
                
                #sns.swarmplot(x="prob", y="std", hue="classification", data=d)
                #plt.show()
                # Saving classifier model
                with open(classifier_filename_exp, 'wb') as outfile:
                    pickle.dump((model, class_names), outfile)
                print('Saved classifier model to file "%s"' % classifier_filename_exp)
                
            elif (args.mode=='CLASSIFY'):
                # Classify images
                print('Testing classifier')
                with open(classifier_filename_exp, 'rb') as infile:
                    (model, class_names) = pickle.load(infile)

                print('Loaded classifier model from file "%s"' % classifier_filename_exp)
                
                std_cutoff = np.load("std_cutoff.npy")
                prob_cutoff = np.load("prob_cutoff.npy")
                print(std_cutoff)
                print(prob_cutoff)

                predictions = model.predict_proba(emb_array)
                best_class_indices = np.argmax(predictions, axis=1)
                best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]
                
     #           d = pd.DataFrame(columns = ['classification','std', 'prob'])
                
                for i in range(len(best_class_indices)):
                    if best_class_probabilities[i] >= prob_cutoff:
                        print('%4d  %s: %.3f' % (i, class_names[best_class_indices[i]], best_class_probabilities[i]))
                    else:
                        print("unknown face")
                        best_class_indices[i] = -1
#                    print(predictions[i])
#                    print(np.std(predictions[i]))
#                    print(best_class_indices[i])
#                    print(class_names[best_class_indices[i]])
#                    print(labels[i])
#                    if i <= 30 :
#                        d.loc[i] = ['good',np.std(predictions[i]), best_class_probabilities[i]]
#                    if i > 30 :
#                        d.loc[i] = ['bad',np.std(predictions[i]), best_class_probabilities[i]]
##                    if best_class_indices[i] == labels[i]:
##                        d.loc[i] = ['good',np.std(predictions[i]), best_class_probabilities[i]]
##                    else:
##                        d.loc[i] = ['bad',np.std(predictions[i]), best_class_probabilities[i]]
#                print(d)
#                
#                    
#                #sns.swarmplot(x="classification", y="std", data=d)
#                sns.swarmplot(x="prob", y="std", hue="classification", data=d)
#                plt.show()
                
                accuracy = np.mean(np.equal(best_class_indices, labels))
                print('Accuracy: %.3f' % accuracy)
コード例 #24
0
def main(args):

    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    # 创建模型文件夹
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
    if args.baihe_pack_file:
        print('load baihe dataset')
        lfw_paths, actual_issame = msgpack_numpy.load(open(args.baihe_pack_file))

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        # 迭代轮数, 不同的轮数可以使用不同的学习率
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augment_data(image_list, label_list, args.image_size,
            args.batch_size, args.max_nrof_epochs, args.random_crop, args.random_flip, args.random_rotate,
            args.nrof_preprocess_threads)
        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))

        print('Building training graph')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        # Build the inference graph, 返回的是网络结构
        prelogits, _ = network.inference(image_batch, args.keep_probability, phase_train=True,
                                         weight_decay=args.weight_decay)
        # 初始化采用截断的正态分布噪声, 标准差为0.1
        # tf.truncated_normal_initializer(stddev=0.1)
        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None,
                                      weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                      weights_regularizer=slim.l2_regularizer(args.weight_decay),
                                      scope='Logits', reuse=False)

        # Add DeCov regularization loss
        if args.decov_loss_factor > 0.0:
            logits_decov_loss = facenet.decov_loss(logits) * args.decov_loss_factor
            # 将decov_loss加入到名字为tf.GraphKeys.REGULARIZATION_LOSSES的集合当中来
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, logits_decov_loss)

        # Add center loss (center_loss作为一个正则项加入到collections)
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            # 将center加入到名字为tf.GraphKeys.REGULARIZATION_LOSSES的集合当中来
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        # 对学习率进行指数衰退
        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        # 将softmax和交叉熵一起做,得到最后的损失函数,提高效率
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits, label_batch, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        # 获取正则loss
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay, tf.all_variables(), args.log_histograms)

        # Evaluation
        print('Building evaluation graph')
        lfw_label_list = range(0, len(lfw_paths))
        assert (len(lfw_paths) % args.lfw_batch_size == 0), \
            "The number of images in the LFW test set need to be divisible by the lfw_batch_size"
        eval_image_batch, eval_label_batch = facenet.read_and_augment_data(lfw_paths, lfw_label_list, args.image_size,
                                                                            args.lfw_batch_size, None, False, False,
                                                                            False, args.nrof_preprocess_threads,
                                                                            shuffle=False)
        # Node for input images
        eval_image_batch.set_shape((None, args.image_size, args.image_size, 3))
        eval_image_batch = tf.identity(eval_image_batch, name='input')
        eval_prelogits, _ = network.inference(eval_image_batch, 1.0,
                                              phase_train=False, weight_decay=0.0, reuse=True)
        eval_embeddings = tf.nn.l2_normalize(eval_prelogits, 1, 1e-10, name='embeddings')

        # Create a saver
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=10)
        # saver = tf.train.Saver(tf.global_variables(), max_to_keep=10)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        # sess.run(tf.global_variables_initializer())
        # sess.run(tf.local_variables_initializer())
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())
        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        # 将队列runner启动,队列就开始运行,返回启动的线程
        # 注意input_queue是先入列,再出列,由于入列的时候输入是place holder,因此到后的线程的时候,会阻塞,
        # 直到下train中sess run (enqueue_op)的时候,  会向队列中载入值,后面的出列才有对象,才在各自的队列中开始执行

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                try:
                    step = sess.run(global_step, feed_dict=None)
                    epoch = step // args.epoch_size
                    # Train for one epoch
                    train(args, sess, epoch, learning_rate_placeholder, global_step, total_loss, train_op, summary_op,
                          summary_writer, regularization_losses, args.learning_rate_schedule_file)

                    # Save variables and the metagraph if it doesn't exist already
                    save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

                    # Evaluate on LFW
                    if args.lfw_dir:
                        evaluate(sess, eval_embeddings, eval_label_batch, actual_issame, args.lfw_batch_size, args.seed,
                                 args.lfw_nrof_folds, log_dir, step, summary_writer)
                    # Evaluate on baihe_data
                    if args.baihe_pack_file:
                        evaluate(sess, eval_embeddings, eval_label_batch, actual_issame, args.lfw_batch_size, args.seed,
                                 args.lfw_nrof_folds, log_dir, step, summary_writer)
                except:
                    traceback.print_exc()
                    continue
    return model_dir
コード例 #25
0
def main(sess, graph, image_dir, class_names, labels, embeds):

    batch_size = 100
    image_margin = 44
    image_size = 160
    min_nr_of_images_per_class = 1

    with graph.as_default():

        with sess.as_default():

            st = time.time()
            dataset = facenet.get_dataset(image_dir)

            #removing faces that already exists
            print('Removing already added faces')
            dataset = [
                x for x in dataset
                if not x.name.replace('_', ' ') in class_names
            ]

            # Check that there are at least one training image per class
            for cls in dataset:
                assert len(
                    cls.image_paths
                ) > 0, 'There must be at least one image for each class in the dataset'

#if no new faces to add then teminate
            if not dataset:
                print('no new faces to be added... terminating')
                return False

            paths, new_labels = facenet.get_image_paths_and_labels(dataset)
            print('Number of classes: %d' % len(dataset))
            print('Number of images: %d' % len(paths))

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Calculating features for images')
            nrof_images = len(paths)
            nrof_batches_per_epoch = int(
                math.ceil(1.0 * nrof_images / batch_size))
            new_emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches_per_epoch):
                start_index = i * batch_size
                end_index = min((i + 1) * batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = load_and_align_data(paths_batch, 160, 44, 0.9)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                new_emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            # Adding embeds and names in database
            new_class_names = [cls.name.replace('_', ' ') for cls in dataset]
            new_labels = np.array(new_labels)
            new_labels = new_labels + class_names.size

            class_names = np.append(class_names, new_class_names)
            embeds = np.concatenate((embeds, new_emb_array), axis=0)
            labels = np.append(labels, new_labels)

            np.save('embed', embeds)
            np.save('labels', labels)
            np.save('classnames', class_names)

            print('Elapsed Time = {}'.format(time.time() - st))

    return class_names, labels, embeds
コード例 #26
0
ファイル: train_softmax.py プロジェクト: Samuelsci/facenet
def main(args):
  
    network = importlib.import_module(args.model_def)
    image_size = (args.image_size, args.image_size)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    stat_file_name = os.path.join(log_dir, 'stat.h5')

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
        
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    dataset = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        dataset = filter_dataset(dataset, os.path.expanduser(args.filter_filename), 
            args.filter_percentile, args.filter_min_nrof_images_per_class)
        
    if args.validation_set_split_ratio>0.0:
        train_set, val_set = facenet.split_dataset(dataset, args.validation_set_split_ratio, args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
    else:
        train_set, val_set = dataset, []
        
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list)>0, 'The training set should not be empty'
        
        val_image_list, val_label_list = facenet.get_image_paths_and_labels(val_set)

        # Create a queue that produces indices into the image_list and label_list 
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                             shuffle=True, seed=None, capacity=32)
        
        index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
        
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
        labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
        control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
        
        nrof_preprocess_threads = 4
        input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
                                    dtypes=[tf.string, tf.int32, tf.int32],
                                    shapes=[(1,), (1,), (1,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='enqueue_op')
        image_batch, label_batch = facenet.create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)

        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')
        
        print('Number of classes in training set: %d' % nrof_classes)
        print('Number of examples in training set: %d' % len(image_list))

        print('Number of classes in validation set: %d' % len(val_set))
        print('Number of examples in validation set: %d' % len(val_image_list))
        
        print('Building training graph')
        
        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability, 
            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None, 
                weights_initializer=slim.initializers.xavier_initializer(), 
                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                scope='Logits', reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Norm for the prelogits
        eps = 1e-4
        prelogits_norm = tf.reduce_mean(tf.norm(tf.abs(prelogits)+eps, ord=args.prelogits_norm_p, axis=1))
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_norm * args.prelogits_norm_loss_factor)

        # Add center loss
        prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch, logits=logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)
        
        correct_prediction = tf.cast(tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)), tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)
        
        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer, 
            learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
        
        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            nrof_steps = args.max_nrof_epochs*args.epoch_size
            nrof_val_samples = int(math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs))   # Validate every validate_every_n_epochs as well as in the last epoch
            stat = {
                'loss': np.zeros((nrof_steps,), np.float32),
                'center_loss': np.zeros((nrof_steps,), np.float32),
                'reg_loss': np.zeros((nrof_steps,), np.float32),
                'xent_loss': np.zeros((nrof_steps,), np.float32),
                'prelogits_norm': np.zeros((nrof_steps,), np.float32),
                'accuracy': np.zeros((nrof_steps,), np.float32),
                'val_loss': np.zeros((nrof_val_samples,), np.float32),
                'val_xent_loss': np.zeros((nrof_val_samples,), np.float32),
                'val_accuracy': np.zeros((nrof_val_samples,), np.float32),
                'lfw_accuracy': np.zeros((args.max_nrof_epochs,), np.float32),
                'lfw_valrate': np.zeros((args.max_nrof_epochs,), np.float32),
                'learning_rate': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_train': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_validate': np.zeros((args.max_nrof_epochs,), np.float32),
                'time_evaluate': np.zeros((args.max_nrof_epochs,), np.float32),
                'prelogits_hist': np.zeros((args.max_nrof_epochs, 1000), np.float32),
              }
            for epoch in range(1,args.max_nrof_epochs+1):
                step = sess.run(global_step, feed_dict=None)
                # Train for one epoch
                t = time.time()
                cont = train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file,
                    stat, cross_entropy_mean, accuracy, learning_rate,
                    prelogits, prelogits_center_loss, args.random_rotate, args.random_crop, args.random_flip, prelogits_norm, args.prelogits_hist_max, args.use_fixed_image_standardization)
                stat['time_train'][epoch-1] = time.time() - t
                
                if not cont:
                    break
                  
                t = time.time()
                if len(val_image_list)>0 and ((epoch-1) % args.validate_every_n_epochs == args.validate_every_n_epochs-1 or epoch==args.max_nrof_epochs):
                    validate(args, sess, epoch, val_image_list, val_label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
                        phase_train_placeholder, batch_size_placeholder, 
                        stat, total_loss, regularization_losses, cross_entropy_mean, accuracy, args.validate_every_n_epochs, args.use_fixed_image_standardization)
                stat['time_validate'][epoch-1] = time.time() - t

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, epoch)

                # Evaluate on LFW
                t = time.time()
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, 
                        embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer, stat, epoch, 
                        args.lfw_distance_metric, args.lfw_subtract_mean, args.lfw_use_flipped_images, args.use_fixed_image_standardization)
                stat['time_evaluate'][epoch-1] = time.time() - t

                print('Saving statistics')
                with h5py.File(stat_file_name, 'w') as f:
                    for key, value in stat.iteritems():
                        f.create_dataset(key, data=value)
    
    return model_dir
コード例 #27
0
ファイル: test.py プロジェクト: pocheck-v2/Pocheck-V2-Beta
                                            gpu_options=gpu_options,
                                            log_device_placement=True))
    with sess.as_default():
        pnet, rnet, onet = detect_face.create_mtcnn(sess, '../parameter/det/')

        minsize = 35  # minimum size of face
        threshold = [0.6, 0.7, 0.7]  # three steps's threshold
        factor = 0.709  # scale factor
        margin = 44
        frame_interval = 3
        batch_size = 1000
        image_size = 182
        input_image_size = 160
        humans_dir = '../JB_celeb/'
        humans_dir = facenet.get_dataset(humans_dir)
        paths, labels = facenet.get_image_paths_and_labels(humans_dir)
        HumanNames = []
        Human_hash = dict()
        Human_count = dict()
        human_len = len(humans_dir)

        for cls in humans_dir:
            HumanNames.append(cls.name)
            Human_hash[cls.name] = [False, 0]
            Human_count[cls.name] = 0

        make_file(Human_hash)

        print('Loading feature extractionodel')
        modeldir = '../parameter/20180402-114759/20180402-114759.pb'
        facenet.load_model(modeldir)
コード例 #28
0
import tensorflow as tf
import numpy as np
import facenet
import os
import math
import pickle
from sklearn.svm import SVC


with tf.Graph().as_default():

    with tf.Session() as sess:

        datadir = './output/'
        dataset = facenet.get_dataset(datadir)
        paths, labels = facenet.get_image_paths_and_labels(dataset)
        print('Number of classes: %d' % len(dataset))
        print('Number of images: %d' % len(paths))

        print('Loading feature extraction model')
        modeldir = './models/facenet/20190310-055158'
        facenet.load_model(modeldir)

        images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
        embedding_size = embeddings.get_shape()[1]

        # Run forward pass to calculate embeddings
        print('Calculating features for images')
        batch_size = 1000
コード例 #29
0
def train():
   # ask for the folder names all the time no function parameters to be passes at any given time.
   # Todo : split the datsset if the user says so and then ask for the test also if yes then call the test function according to the split set results. If split set no then on the whole dataset.
   
   path = input("\nEnter the path to the face images directory inside which multiple user folders are present or press ENTER if the default created output folder is present in this code directory only: ")
   if path == "":
      path = 'output'

   gpu_fraction = input("\nEnter the gpu memory fraction u want to allocate out of 1 or press ENTER for default 0.8: ").rstrip()
   
   ''' 
   if gpu_fraction == "":
      gpu_fraction = 0.8
   else:
      gpu_fraction = round(float(gpu_fraction), 1)
   '''

   model = input("\nEnter the FOLDER PATH inside which 20180402-114759 FOLDER is present. Press ENTER stating that the FOLDER 20180402-114759 is present in this code directory itself: ").rstrip()
   if model == "":
      model = "20180402-114759/20180402-114759.pb"
   else:
      model += "/20180402-114759/20180402-114759.pb"

   batch_size = 90
   ask = input("\nEnter the batch size of images to process at once OR press ENTER for default 90: ").rstrip().lstrip()
   if ask != "":
     batch_size = int(ask)

   image_size = 160
   ask = input("\nEnter the width_size of face images OR press ENTER for default 160: ").rstrip().lstrip()
   if ask != "":
     image_size = int(ask)

   classifier_filename = input("Enter the output SVM classifier filename OR press ENTER for default name= classifier: ")
   if classifier_filename == "":
      classifier_filename = 'classifier.pkl'
   else:
      classifier_filename += '.pkl'
   classifier_filename = os.path.expanduser(classifier_filename)

   split_dataset = input("\nPress Y if you want to split the dataset for Training and Testing: ").rstrip().lstrip().lower()

   # If yes ask for the percentage of training and testing division.
   percentage = 70
   if split_dataset == 'y':
      ask = input("\nEnter the percentage of training dataset for splitting OR press ENTER for default 70: ").rstrip().lstrip()
      if ask != "":
        percentage = float(ask)

   min_nrof_images_per_class = 0
   ask = input("\nEnter the minimum number of images that much be present for a single user to include him for classification. Press ENTER for default value 0: ")
   if ask != "":
     min_nrof_images_per_class = int(ask)

   dataset = facenet.get_dataset(path)
   train_set = []
   test_set = []
   
   if split_dataset == 'y':
     for cls in dataset:
         paths = cls.image_paths
         # Remove classes with less than min_nrof_images_per_class
         if len(paths) >= min_nrof_images_per_class:
            np.random.shuffle(paths)

            # Find the number of images in training set and testing set images for this class
            no_train_images = int(percentage * len(paths) * 0.01)

            train_set.append(facenet.ImageClass(cls.name, paths[:no_train_images]))
            test_set.append(facenet.ImageClass(cls.name, paths[no_train_images:]))
     
     ''' Check that there are at least one training image per class
     for cls in train_set:
        assert(len(cls.image_paths)>0, '\nUnable to have at least one image in train set for one of the class. Change parameter values.')
     for cls in test_set:
        assert(len(cls.image_paths)>0, '\nUnable to have at least one image in test set for one of the class. Change parameter values.')

   else:
       # Check that there are at least one training image per class
       for cls in dataset:
          assert(len(cls.image_paths)>0, '\nThere must be at least one image for each class in the dataset')
     '''
   paths_train = []
   labels_train = []
   paths_test = []
   labels_test = []
   emb_array = []
   class_names = []

   if split_dataset == 'y':
      paths_train, labels_train = facenet.get_image_paths_and_labels(train_set)
      paths_test, labels_test = facenet.get_image_paths_and_labels(test_set)
      print('\nNumber of classes: %d' % len(train_set))
      print('\nNumber of images in TRAIN set: %d' % len(paths_train))
      print('\nNumber of images in TEST set: %d' % len(paths_test))
   else:
      paths_train, labels_train = facenet.get_image_paths_and_labels(dataset)  
      print('\nNumber of classes: %d' % len(dataset))
      print('\nNumber of images: %d' % len(paths_train))

   # Find embedding
   emb_array = get_embeddings(model, paths_train, batch_size, image_size)

   # Train the classifier
   print('\nTraining classifier')
   model_svc = SVC(kernel='linear', probability=True)
   model_svc.fit(emb_array, labels_train)

   # Create a list of class names
   if split_dataset == 'y':
      class_names = [ cls.name.replace('_', ' ') for cls in train_set]
   else:
      class_names = [cls.name.replace('_', ' ') for cls in dataset]

   # Saving classifier model
   with open(classifier_filename, 'wb') as outfile:
        pickle.dump((model_svc, class_names), outfile)
  
   print('\nSaved classifier model to file: "%s"' % classifier_filename)
   
   if split_dataset == 'y':
     # Find embedding for test data
     emb_array = get_embeddings(model, paths_test, batch_size, image_size)
     
     # Call test on the test set.
     test(classifier_filename, emb_array, labels_test, model, batch_size, image_size)

   else:
     # Ask the user to test or not on the whole dataset
     ask = input("Press y if you want to run the TEST on whole dataset or press ENTER to exit: ").rstrip().lstrip().lower()
     if ask == 'y':
        test()
     else:
        sys.exit()
コード例 #30
0
def test_identity(args, dataset):
    with tf.Graph().as_default():

        with tf.Session() as sess:
            np.random.seed(seed=args.seed)
            # Classify images
            print('Testing classifier')
            classifier_filename_exp = os.path.expanduser(
                args.classifier_filename)
            with open(classifier_filename_exp, 'rb') as infile:
                (model, class_names) = pickle.load(infile)

            paths, labels = facenet.get_image_paths_and_labels(
                dataset, class_names)

            # Load the model
            print('Loading feature extraction model')
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Calculating features for images')
            nrof_images = len(paths)
            nrof_batches_per_epoch = int(
                math.ceil(1.0 * nrof_images / args.batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            print('Num epoch: ', nrof_batches_per_epoch)

            for i in range(nrof_batches_per_epoch):
                print('Epoch: ', i)
                start_index = i * args.batch_size
                end_index = min((i + 1) * args.batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False,
                                           args.image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            print('Loaded classifier model from file "%s"' %
                  classifier_filename_exp)

            predictions = model.predict_proba(emb_array)
            best_class_indices = np.argmax(predictions, axis=1)
            best_class_probabilities = predictions[
                np.arange(len(best_class_indices)), best_class_indices]

            for i in range(len(best_class_indices)):
                print('%4d  %s: %.3f' % (i, class_names[best_class_indices[i]],
                                         best_class_probabilities[i]))

            accuracy = np.mean(np.equal(best_class_indices, labels))
            return accuracy
コード例 #31
0
def main(args):
    dataset = facenet.get_dataset(args.dataset_dir)
  
    with tf.Graph().as_default():
      
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(dataset)
        nrof_images = len(image_list)
        image_indices = range(nrof_images)

        image_batch, label_batch = facenet.read_and_augment_data(image_list,
            image_indices, args.image_size, args.batch_size, None, 
            False, False, False, nrof_preprocess_threads=4, shuffle=False)
        
        model_exp = os.path.expanduser(args.model_file)
        with gfile.FastGFile(model_exp,'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            input_map={'input':image_batch, 'phase_train':False}
            tf.import_graph_def(graph_def, input_map=input_map, name='net')
        
        embeddings = tf.get_default_graph().get_tensor_by_name("net/embeddings:0")

        with tf.Session() as sess:
            tf.train.start_queue_runners(sess=sess)
                
            embedding_size = int(embeddings.get_shape()[1])
            nrof_batches = int(math.ceil(nrof_images / args.batch_size))
            nrof_classes = len(dataset)
            label_array = np.array(label_list)
            class_names = [cls.name for cls in dataset]
            nrof_examples_per_class = [ len(cls.image_paths) for cls in dataset ]
            class_variance = np.zeros((nrof_classes,))
            class_center = np.zeros((nrof_classes,embedding_size))
            distance_to_center = np.ones((len(label_list),))*np.NaN
            emb_array = np.zeros((0,embedding_size))
            idx_array = np.zeros((0,), dtype=np.int32)
            lab_array = np.zeros((0,), dtype=np.int32)
            index_arr = np.append(0, np.cumsum(nrof_examples_per_class))
            for i in range(nrof_batches):
                t = time.time()
                emb, idx = sess.run([embeddings, label_batch])
                emb_array = np.append(emb_array, emb, axis=0)
                idx_array = np.append(idx_array, idx, axis=0)
                lab_array = np.append(lab_array, label_array[idx], axis=0)
                for cls in set(lab_array):
                    cls_idx = np.where(lab_array==cls)[0]
                    if cls_idx.shape[0]==nrof_examples_per_class[cls]:
                        # We have calculated all the embeddings for this class
                        i2 = np.argsort(idx_array[cls_idx])
                        emb_class = emb_array[cls_idx,:]
                        emb_sort = emb_class[i2,:]
                        center = np.mean(emb_sort, axis=0)
                        diffs = emb_sort - center
                        dists_sqr = np.sum(np.square(diffs), axis=1)
                        class_variance[cls] = np.mean(dists_sqr)
                        class_center[cls,:] = center
                        distance_to_center[index_arr[cls]:index_arr[cls+1]] = np.sqrt(dists_sqr)
                        emb_array = np.delete(emb_array, cls_idx, axis=0)
                        idx_array = np.delete(idx_array, cls_idx, axis=0)
                        lab_array = np.delete(lab_array, cls_idx, axis=0)

                        
                print('Batch %d in %.3f seconds' % (i, time.time()-t))
                
            print('Writing filtering data to %s' % args.data_file_name)
            mdict = {'class_names':class_names, 'image_list':image_list, 'label_list':label_list, 'distance_to_center':distance_to_center }
            with h5py.File(args.data_file_name, 'w') as f:
                for key, value in iteritems(mdict):
                    f.create_dataset(key, data=value)
コード例 #32
0
def main(args):
  
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
        
    # Store some git revision info in a text file in the log directory
    src_path,_ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    if args.filter_filename:
        train_set = filter_dataset(train_set, os.path.expanduser(args.filter_filename), 
            args.filter_percentile, args.filter_min_nrof_images_per_class)
    nrof_classes = len(train_set)
    
    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)
    
    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
    
    with tf.Graph().as_default(), tf.device('/cpu:0'):
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)
        
        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list)>0, 'The dataset should not be empty'
        
        # Create a queue that produces indices into the image_list and label_list 
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                             shuffle=True, seed=None, capacity=32)
        
        index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
        
        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
        
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')

        labels_placeholder = tf.placeholder(tf.int64, shape=(None,1), name='labels')
        
        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                    dtypes=[tf.string, tf.int64],
                                    shapes=[(1,), (1,)],
                                    shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder], name='enqueue_op')
        args.input_queue = input_queue
        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents, channels=3)
                if args.random_rotate:
                    image = tf.py_func(facenet.random_rotate_image, [image], tf.uint8)
                if args.random_crop:
                    image = tf.random_crop(image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)
    
                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])
    
        image_batch, label_batch = tf.train.batch_join(
            images_and_labels, batch_size=batch_size_placeholder, 
            shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)
        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')
        
        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))
        
        print('Building training graph')
        learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
            args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)
#        opt = tf.train.GradientDescentOptimizer(learning_rate)
        total_grads = []
        loss_reg = []
        loss_cross = []
        loss_center = []
        images_splits = tf.split(axis=0, num_or_size_splits=args.num_gpus, value=image_batch)
        labels_splits = tf.split(axis=0, num_or_size_splits=args.num_gpus, value=label_batch)
        embedding_list = []
        with tf.variable_scope(tf.get_variable_scope()):
            for i in range(args.num_gpus):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('facenet_%d' % (i)) as scope:
                        print('Build training graph on gpu %d' % i)
                        # Build the inference graph
                        prelogits, _ = network.inference(images_splits[i], args.keep_probability, 
                            phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, 
                            weight_decay=args.weight_decay)
                        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
                        if args.l2_softmax_alpha > 0:
                            prelogits = embeddings * args.l2_softmax_alpha
                        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None, 
                                weights_initializer=tf.truncated_normal_initializer(stddev=0.1), 
                                weights_regularizer=slim.l2_regularizer(args.weight_decay),
                                scope='Logits', reuse=False)

                        embedding_list.append(embeddings)
                        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
                        regularization_losses = sum(regularization_losses)
                        loss_reg.append(regularization_losses)
                        tf.add_to_collection('losses', regularization_losses)
                        # Add center loss
                        if args.center_loss_factor>0.0:
                            prelogits_center_loss, _ = facenet.center_loss(prelogits, labels_splits[i], args.center_loss_alfa, nrof_classes)
                            center_loss = tf.identity(prelogits_center_loss * args.center_loss_factor, 'center_loss')
                            loss_center.append(center_loss)
                            tf.add_to_collection('losses', center_loss)

                        # Reuse variables for the next tower.
                        tf.get_variable_scope().reuse_variables()

                        # Calculate the average cross entropy loss across the batch
                        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                            labels=labels_splits[i], logits=logits, name='cross_entropy_per_example')
                        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
                        loss_cross.append(cross_entropy_mean)
                        tf.add_to_collection('losses', cross_entropy_mean)
                        
                        # Calculate the total losses
                        total_loss = cross_entropy_mean + regularization_losses
                        if args.center_loss_factor>0.0:
                            total_loss = total_loss + center_loss
                        total_loss = tf.identity(total_loss, name='total_loss')
                        opt = facenet.optimizer(total_loss, global_step, args.optimizer, 
                            learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
                        grads = opt.compute_gradients(total_loss)
                        total_grads.append(grads)
        print('Build training total graph')
        regularization_losses = tf.add_n(loss_reg) / args.num_gpus
        tf.summary.scalar('loss/regularization', regularization_losses)
        tf.summary.scalar('loss/center', tf.add_n(loss_center) / args.num_gpus)
        tf.summary.scalar('loss/cross', tf.add_n(loss_cross) / args.num_gpus)
        total_loss = tf.add_n(loss_reg + loss_cross + loss_center) / args.num_gpus
        tf.summary.scalar('loss/total', total_loss)
        embeddings = tf.concat(axis=0, values=embedding_list)
        # We must calculate the mean of each gradient. Note that this is the
        # synchronization point across all towers.
        grads = average_gradients(total_grads)

        # Apply the gradients to adjust the shared variables.
        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

        # Track the moving averages of all trainable variables.
        variable_averages = tf.train.ExponentialMovingAverage(
            args.moving_average_decay, global_step)
        variables_averages_op = variable_averages.apply(tf.trainable_variables())

        # Group all updates to into a single train op.
        train_op = tf.group(apply_gradient_op, variables_averages_op)
        
        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step, 
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, 
                        embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer)
    return model_dir
コード例 #33
0
SVM_MODEL_PATH = os.path.join(MODEL_PATH, "svm", "lfw_svm_classifier.pkl")

# 訓練/驗證用的圖像資料目錄
IMG_IN_PATH = os.path.join(DATA_PATH, "lfw")

# 訓練/驗證用的圖像資料目錄
IMG_OUT_PATH = os.path.join(DATA_PATH, "lfw_mtcnnpy_160")

#轉換每張人臉的圖像成為Facenet的人臉特徵向量(128 bytes)表示
# 使用Tensorflow的Facenet模型
with tf.Graph().as_default():
    with tf.Session() as sess:
        datadir = IMG_OUT_PATH  # 經過偵測、對齊 & 裁剪後的人臉圖像目錄
        dataset = facenet.get_dataset(datadir)
        # 原始: 取得每個人臉圖像的路徑與標籤
        paths, labels, labels_dict = facenet.get_image_paths_and_labels(
            dataset)
        print('Origin: Number of classes : %d' % len(labels_dict))
        print('Origin: Number of images : %d' % len(paths))

        # 由於lfw的人臉圖像集中有很多的人臉類別只有1張的圖像, 對於訓練來說樣本太少
        # 因此我們只挑選圖像樣本張數大於5張的人臉類別

        # 過濾: 取得每個人臉圖像的路徑與標籤 (>=5)
        paths, labels, labels_dict = facenet.get_image_paths_and_labels(
            dataset, enable_filter=True, filter_size=5)
        print('Filtered: Number of classes: %d' % len(labels_dict))
        print('Filtered: Number of images: %d' % len(paths))

        #載入Facenet模型
        print('Loading feature extraction model')
        modeldir = FACENET_MODEL_PATH
コード例 #34
0
ファイル: train_softmax.py プロジェクト: helloyide/facenet
def main(args):
    # 动态import python模块, 这里指的是外部参数指定的网络结构
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    # os.path.expanduser跨平台支持替换路径中的user路径~
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)

    # args.data_dir can contain more datasets, separated by comma
    # TODO: no logic for name conflict?
    data_dirs = args.data_dir.split(",")
    train_set = []
    for data_dir in data_dirs:
        if len(data_dir) > 0:
            train_set.extend(facenet.get_dataset(data_dir))

    if args.filter_filename:
        train_set = filter_dataset(train_set, os.path.expanduser(args.filter_filename),
                                   args.filter_percentile, args.filter_min_nrof_images_per_class)
    nrof_classes = len(train_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    continue_ckpt_dir = None
    if args.continue_ckpt_dir:
        continue_ckpt_dir = os.path.expanduser(args.continue_ckpt_dir)
        print('Continue training from the checkpoint: %s' % continue_ckpt_dir)

    snapshot_at_step = None
    if args.snapshot_at_step:
        snapshot_at_step = int(args.snapshot_at_step)
        print('Will take a snapshot checkpoint at step', snapshot_at_step)

    nrof_preprocess_threads = 4
    if args.nrof_preprocess_threads:
        nrof_preprocess_threads = int(args.nrof_preprocess_threads)
        print('Number of preprocess threads', nrof_preprocess_threads)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list) > 0, 'The dataset should not be empty'

        # Create a queue that produces indices into the image_list and label_list
        # https://www.tensorflow.org/api_guides/python/threading_and_queues

        # This function converts Python objects of various types to Tensor objects.
        # It accepts Tensor objects, numpy arrays, Python lists, and Python scalars.
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        # This operation returns a 1-D integer tensor representing the shape of input.
        range_size = array_ops.shape(labels)[0]
        # Produces the integers from 0 to limit-1 in a queue.
        index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
                                                    shuffle=True, seed=None, capacity=32)
        index_dequeue_op = index_queue.dequeue_many(args.batch_size * args.epoch_size, 'index_dequeue')

        learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
        
        image_paths_placeholder = tf.placeholder(tf.string, shape=(None, 1), name='image_paths')

        labels_placeholder = tf.placeholder(tf.int64, shape=(None, 1), name='labels')

        # Creates a queue that dequeues elements in a first-in first-out order.
        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                              dtypes=[tf.string, tf.int64],
                                              shapes=[(1,), (1,)],
                                              shared_name=None, name=None)
        enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder], name='enqueue_op')

        # 读取图片文件, 将图片转换成tensor并且做ensembling处理, 结果存入images_and_labels数组
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            # Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                # Detects whether an image is a GIF, JPEG, or PNG, and performs the appropriate operation
                # to convert the input bytes string into a Tensor of type uint8.
                # Note: decode_gif returns a 4-D array [num_frames, height, width, 3],
                # as opposed to decode_jpeg and decode_png, which return 3-D arrays [height, width, num_channels].
                image = tf.image.decode_image(file_contents, channels=3)
                # 对训练图片做ensembling
                # https://www.tensorflow.org/api_docs/python/tf/image
                # https://www.tensorflow.org/api_docs/python/tf/contrib/image
                if args.random_rotate:
                    image = tf.py_func(facenet.random_rotate_image, [image], tf.uint8)
                if args.random_crop:
                    # 训练数据的图片(182)比参数传进来的大小(160)略大, 不做缩放而是直接随机切成160的
                    image = tf.random_crop(image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)
                if args.random_brightness:
                    image = tf.image.random_brightness(image, max_delta=0.2)

                # pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])

        # Runs a list of tensors to fill a queue to create batches of examples.
        image_batch, label_batch = tf.train.batch_join(
            images_and_labels, batch_size=batch_size_placeholder,
            shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)

        # https://stackoverflow.com/questions/34877523/in-tensorflow-what-is-tf-identity-used-for
        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')

        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))

        print('Building training graph')

        # Build the inference graph
        prelogits, _ = network.inference(image_batch, args.keep_probability,
                                         phase_train=phase_train_placeholder,
                                         bottleneck_layer_size=args.embedding_size,
                                         weight_decay=args.weight_decay)

        logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None,
                                      weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                      weights_regularizer=slim.l2_regularizer(args.weight_decay),
                                      scope='Logits', reuse=False)

        # Normalizes along dimension dim using an L2 norm.
        # For a 1-D tensor with dim = 0, computes output = x / sqrt(max(sum(x**2), epsilon))
        # For x with more dimensions, independently normalizes each 1-D slice along dimension dim.
        # 人脸图片对应的最终编码, 也是算法的核心输出
        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Add center loss
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            # Wrapper for Graph.add_to_collection() using the default graph.
            # Stores value in the collection with the given name.
            # Note that collections are not sets, so it is possible to add a value to a collection several times.
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                                 # args.center_loss_factor center loss论文里的lambda
                                 prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(learning_rate_placeholder,
                                                   global_step,
                                                   args.learning_rate_decay_epochs * args.epoch_size,
                                                   args.learning_rate_decay_factor,
                                                   staircase=True)

        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch, logits=logits, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        # Adds all input tensors element-wise.
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss,
                                 global_step,
                                 args.optimizer,
                                 learning_rate,
                                 args.moving_average_decay,
                                 tf.global_variables(),
                                 args.log_histograms)

        # Create a saver
        saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)
            elif continue_ckpt_dir:
                files = os.listdir(continue_ckpt_dir)
                meta_files = [s for s in files if s.endswith('.meta')]
                if len(meta_files) == 0:
                    raise ValueError('No meta file found in %s' % continue_ckpt_dir)
                elif len(meta_files) > 1:
                    raise ValueError(
                        'There should not be more than one meta file in %s' % continue_ckpt_dir)
                saver = tf.train.import_meta_graph(continue_ckpt_dir + "/" + meta_files[0])
                latest_checkpoint = tf.train.latest_checkpoint(continue_ckpt_dir)
                print('Restoring checkpoint: %s' % latest_checkpoint)
                saver.restore(sess, latest_checkpoint)
                # TODO: don't know why global_step is not saved. get it from the filename
                last_step = int(os.path.basename(latest_checkpoint).split('-')[-1])
                print('Checkpoint restored, last step is ', str(last_step))
                sess.run(global_step.assign(last_step))

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch_size = args.epoch_size
                epoch = step // epoch_size

                if args.learning_rate > 0.0:
                    lr = args.learning_rate
                else:
                    # Read the schedule file each epoch, you can change the file content during running
                    lr = facenet.get_learning_rate_from_file(args.learning_rate_schedule_file, epoch)
                    # Special value means stop
                    if lr == 0.0:
                        break

                # Train for one epoch
                train(args,
                      sess,
                      epoch,
                      image_list,
                      label_list,
                      index_dequeue_op,
                      enqueue_op,
                      image_paths_placeholder,
                      labels_placeholder,
                      learning_rate_placeholder,
                      phase_train_placeholder,
                      batch_size_placeholder,
                      global_step,
                      total_loss,
                      train_op,
                      summary_op,
                      summary_writer,
                      regularization_losses,
                      lr,
                      snapshot_at_step,
                      saver,
                      model_dir,
                      subdir
                      )

                # Save variables and the metagraph if it doesn't exist already (step in filename is the next step after restore)
                save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step + epoch_size)

                # Evaluate on LFW
                if args.lfw_dir and args.lfw_epoch_interval > 0:
                    if epoch % args.lfw_epoch_interval == 0:
                        evaluate(sess,
                                 enqueue_op,
                                 image_paths_placeholder,
                                 labels_placeholder,
                                 phase_train_placeholder,
                                 batch_size_placeholder,
                                 embeddings,
                                 label_batch,
                                 lfw_paths,
                                 actual_issame,
                                 args.lfw_batch_size,
                                 args.lfw_nrof_folds,
                                 log_dir,
                                 step,
                                 summary_writer)

                # Print current time
                print("Current date time:", datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S'))

    return model_dir
コード例 #35
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    network = importlib.import_module(args.model_def)

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Write arguments to a text file
    facenet.write_arguments_to_file(args, os.path.join(log_dir,
                                                       'arguments.txt'))

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    train_set = facenet.get_dataset(args.data_dir)

    train_set, test_set = facenet.split_dataset(train_set,
                                                0.8,
                                                mode='SPLIT_IMAGES')

    if args.filter_filename:
        train_set = filter_dataset(train_set,
                                   os.path.expanduser(args.filter_filename),
                                   args.filter_percentile,
                                   args.filter_min_nrof_images_per_class)
    nrof_classes = len(train_set)
    test_nrof_classes = len(test_set)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        # meta_file, ckpt_model=facenet.get_model_filenames(pretrained_model)
        # pretrained_model=ckpt_model
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)
        assert len(image_list) > 0, 'The dataset should not be empty'

        test_image_list, test_label_list = facenet.get_image_paths_and_labels(
            test_set)
        assert len(test_image_list) > 0, 'The dataset should not be empty'

        # Create a queue that produces indices into the image_list and label_list
        labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
        range_size = array_ops.shape(labels)[0]
        index_queue = tf.train.range_input_producer(range_size,
                                                    num_epochs=None,
                                                    shuffle=True,
                                                    seed=None,
                                                    capacity=32)

        index_dequeue_op = index_queue.dequeue_many(
            args.batch_size * args.epoch_size, 'index_dequeue')

        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')

        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        image_paths_placeholder = tf.placeholder(tf.string,
                                                 shape=(None, 1),
                                                 name='image_paths')

        labels_placeholder = tf.placeholder(tf.int64,
                                            shape=(None, 1),
                                            name='labels')

        input_queue = data_flow_ops.FIFOQueue(capacity=100000,
                                              dtypes=[tf.string, tf.int64],
                                              shapes=[(1, ), (1, )],
                                              shared_name=None,
                                              name=None)
        enqueue_op = input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder], name='enqueue_op')

        nrof_preprocess_threads = 4
        images_and_labels = []
        for _ in range(nrof_preprocess_threads):
            filenames, label = input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents, channels=3)
                if args.random_rotate:
                    image = tf.py_func(facenet.random_rotate_image, [image],
                                       tf.uint8)
                if args.random_crop:
                    image = tf.random_crop(
                        image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(
                        image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)

                #pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            images_and_labels.append([images, label])

        image_batch, label_batch = tf.train.batch_join(
            images_and_labels,
            batch_size=batch_size_placeholder,
            shapes=[(args.image_size, args.image_size, 3), ()],
            enqueue_many=True,
            capacity=4 * nrof_preprocess_threads * args.batch_size,
            allow_smaller_final_batch=True)

        image_batch = tf.identity(image_batch, 'image_batch')
        image_batch = tf.identity(image_batch, 'input')
        label_batch = tf.identity(label_batch, 'label_batch')

        print('Total number of classes: %d' % nrof_classes)
        print('Total number of examples: %d' % len(image_list))

        # Create a queue that produces indices into the test_image_list and test_label_list
        test_labels = ops.convert_to_tensor(test_label_list, dtype=tf.int32)
        test_range_size = array_ops.shape(test_labels)[0]
        test_index_queue = tf.train.range_input_producer(test_range_size,
                                                         num_epochs=None,
                                                         shuffle=True,
                                                         seed=None,
                                                         capacity=32)

        # global test_batch_size
        # test_batch_size=args.batch_size
        test_index_dequeue_op = test_index_queue.dequeue_many(
            test_batch_size * test_epoch_size, 'test_index_dequeue')
        test_input_queue = data_flow_ops.FIFOQueue(
            capacity=100000,
            dtypes=[tf.string, tf.int64],
            shapes=[(1, ), (1, )],
            shared_name=None,
            name=None)
        test_enqueue_op = test_input_queue.enqueue_many(
            [image_paths_placeholder, labels_placeholder],
            name='test_enqueue_op')

        test_nrof_preprocess_threads = 4
        test_images_and_labels = []
        for _ in range(test_nrof_preprocess_threads):
            filenames, label = test_input_queue.dequeue()
            images = []
            for filename in tf.unstack(filenames):
                file_contents = tf.read_file(filename)
                image = tf.image.decode_image(file_contents, channels=3)
                if args.random_rotate:
                    image = tf.py_func(facenet.random_rotate_image, [image],
                                       tf.uint8)
                if args.random_crop:
                    image = tf.random_crop(
                        image, [args.image_size, args.image_size, 3])
                else:
                    image = tf.image.resize_image_with_crop_or_pad(
                        image, args.image_size, args.image_size)
                if args.random_flip:
                    image = tf.image.random_flip_left_right(image)

                # pylint: disable=no-member
                image.set_shape((args.image_size, args.image_size, 3))
                images.append(tf.image.per_image_standardization(image))
            test_images_and_labels.append([images, label])

        test_image_batch, test_label_batch = tf.train.batch_join(
            test_images_and_labels,
            batch_size=test_batch_size,
            shapes=[(args.image_size, args.image_size, 3), ()],
            enqueue_many=True,
            capacity=4 * test_nrof_preprocess_threads * test_batch_size,
            allow_smaller_final_batch=True)

        test_image_batch = tf.identity(test_image_batch, 'test_image_batch')
        test_image_batch = tf.identity(test_image_batch, 'test_input')
        test_label_batch = tf.identity(test_label_batch, 'test_label_batch')

        print('Total number of test classes: %d' % test_nrof_classes)
        print('Total number of test examples: %d' % len(test_image_list))

        image_input_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        label_input_placeholder = tf.get_default_graph().get_tensor_by_name(
            "label_batch:0")

        print('Building training graph')

        # Build the inference graph
        print("embeddings size is: %s" % (str(args.embedding_size)))
        prelogits, _ = network.inference(
            image_batch,
            args.keep_probability,
            phase_train=phase_train_placeholder,
            bottleneck_layer_size=args.embedding_size,
            weight_decay=args.weight_decay)
        logits = slim.fully_connected(
            prelogits,
            len(train_set),
            activation_fn=None,
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(args.weight_decay),
            scope='Logits',
            reuse=False)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        # Add center loss
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, _ = facenet.center_loss(
                prelogits, label_batch, args.center_loss_alfa, nrof_classes)
            tf.add_to_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES,
                prelogits_center_loss * args.center_loss_factor)

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.summary.scalar('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=label_batch,
            logits=logits,
            name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.global_variables(), args.log_histograms)

        # Create a saver
        if args.pretrained_model:
            variables = []
            for v in tf.trainable_variables():
                if v.name.startswith("Logits") or v.name.startswith(
                        "InceptionResnetV1/Block8") or v.name.startswith(
                            "InceptionResnetV1/Block17"):
                    print("skip variable %s" % v.name)
                    continue
                else:
                    print("var name %s" % v.name)
                    variables.append(v)
                # if not v.name.startswith("Logits") and not v.name.startswith("Block8"):
                #      variables.append(v)

            saver = tf.train.Saver(variables, max_to_keep=3)
        else:
            saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(coord=coord, sess=sess)

        with sess.as_default():

            if pretrained_model:
                print('Restoring pretrained model: %s' % pretrained_model)
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            print('Running training')
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                #epoch = step // args.epoch_size

                # Train for one epoch
                step=train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
                    learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step,
                    total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file\
                      ,test_image_list, test_label_list, test_index_dequeue_op, test_enqueue_op,image_batch, label_batch)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)
                epoch += 1

                # Evaluate on LFW
                if args.lfw_dir:
                    evaluate(sess, enqueue_op, image_paths_placeholder,
                             labels_placeholder, phase_train_placeholder,
                             batch_size_placeholder, embeddings, label_batch,
                             lfw_paths, actual_issame, args.lfw_batch_size,
                             args.lfw_nrof_folds, log_dir, step,
                             summary_writer)

            # constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["embeddings"])
            # with tf.gfile.FastGFile(model_dir + '/video-faces_%d_model.pb'%args.max_nrof_epochs, mode='wb') as f:
            #     f.write(constant_graph.SerializeToString())
    return model_dir
コード例 #36
0
def main(args):
  
    np.random.seed(seed=args.seed)
    random.seed(args.seed)
    train_set = facenet.get_dataset(args.data_dir)
    result_filename = os.path.join(os.path.expanduser(args.data_dir), 'statistics.txt')
    
    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        
        # Get a list of image paths and their labels
        image_list, _ = facenet.get_image_paths_and_labels(train_set)
        nrof_images = len(image_list)
        assert nrof_images>0, 'The dataset should not be empty'
        
        input_queue = tf.train.string_input_producer(image_list, num_epochs=None,
                             shuffle=False, seed=None, capacity=32)
        
        
        nrof_preprocess_threads = 4
        images = []
        for _ in range(nrof_preprocess_threads):
            filename = input_queue.dequeue()
            file_contents = tf.read_file(filename)
            image = tf.image.decode_image(file_contents)
            image = tf.image.resize_image_with_crop_or_pad(image, 160, 160)
            
            #pylint: disable=no-member
            image.set_shape((args.image_size, args.image_size, 3))
            image = tf.cast(image, tf.float32)
            images.append((image,))
    
        image_batch = tf.train.batch_join(images, batch_size=100, allow_smaller_final_batch=True)
        #mean = tf.reduce_mean(image_batch, reduction_indices=[0,1,2])
        m, v = tf.nn.moments(image_batch, [1,2])
        mean = tf.reduce_mean(m, 0)
        variance = tf.reduce_mean(v, 0)
        
        
        
        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess.run(tf.global_variables_initializer())
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            # Training and validation loop
            print('Running training')
            nrof_batches = nrof_images // args.batch_size
            #nrof_batches = 20
            means = np.zeros(shape=(nrof_batches, 3), dtype=np.float32)
            variances = np.zeros(shape=(nrof_batches, 3), dtype=np.float32)
            for i in range(nrof_batches):
                means[i,:], variances[i,:] = sess.run([mean, variance])
                if (i+1)%10==0:
                    print('Batch: %5d/%5d, Mean: %s,  Variance: %s' % (i+1, nrof_batches, np.array_str(np.mean(means[:i,:],axis=0)), np.array_str(np.mean(variances[:i,:],axis=0))))
            dataset_mean = np.mean(means,axis=0)
            dataset_variance = np.mean(variances,axis=0)
            print('Final mean: %s' % np.array_str(dataset_mean))
            print('Final variance: %s' % np.array_str(dataset_variance))
            with open(result_filename, 'w') as text_file:
                print('Writing result to %s' % result_filename)
                text_file.write('Mean: %.5f, %.5f, %.5f\n' % (dataset_mean[0], dataset_mean[1], dataset_mean[2]))
                text_file.write('Variance: %.5f, %.5f, %.5f\n' % (dataset_variance[0], dataset_variance[1], dataset_variance[2]))
コード例 #37
0
import facenet
import detect_face
import os
import sys
import math
import pickle
from sklearn.svm import SVC
import glob

with tf.Graph().as_default():

    with tf.Session() as sess:

        datadir = '/Users/parth/facenet/demo_dataset_align'
        dataset = facenet.get_dataset(datadir)
        paths, labels = facenet.get_image_paths_and_labels(dataset)
        print('Number of classes: %d' % len(dataset))
        print('Number of images: %d' % len(paths))

        print('Loading feature extraction model')
        modeldir = '/Users/parth/facenet/models/20180402-114759/20180402-114759.pb'
        facenet.load_model(modeldir)

        images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
            "phase_train:0")
        embedding_size = embeddings.get_shape()[1]

        # Create a list of class names
コード例 #38
0
 def test(self, dataset):
     with tf.Graph().as_default():
         with tf.Session() as sess:
             np.random.seed(seed=666)
             paths, labels = facenet.get_image_paths_and_labels(dataset)
コード例 #39
0
def main(args):
    with tf.Graph().as_default():

        with tf.Session() as sess:

            np.random.seed(seed=args.seed)
            dataset = facenet.get_dataset(args.data_dir)

            # Check that there are at least one training image per class
            for cls in dataset:
                assert (
                    len(cls.image_paths) > 0,
                    'There must be at least one image for each class in the dataset'
                )

            paths, labels = facenet.get_image_paths_and_labels(dataset)

            print('Number of classes: %d' % len(dataset))
            print('Number of images: %d' % len(paths))

            # Load the model
            print('Loading feature extraction model')
            facenet.load_model(args.model)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            # Run forward pass to calculate embeddings
            print('Calculating features for images')
            nrof_images = len(paths)
            nrof_batches_per_epoch = int(
                math.ceil(1.0 * nrof_images / args.batch_size))
            emb_array = np.zeros((nrof_images, embedding_size))
            for i in range(nrof_batches_per_epoch):
                start_index = i * args.batch_size
                end_index = min((i + 1) * args.batch_size, nrof_images)
                paths_batch = paths[start_index:end_index]
                images = facenet.load_data(paths_batch, False, False,
                                           args.image_size)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                emb_array[start_index:end_index, :] = sess.run(
                    embeddings, feed_dict=feed_dict)

            classifier_filename_exp = os.path.expanduser(
                args.classifier_filename)

            if (args.mode == 'TRAIN'):
                # Train classifier
                print('Training classifier')
                model = SVC(kernel='linear', probability=True)
                model.fit(emb_array, labels)

                # Create a list of class names
                class_names = [cls.name.replace('_', ' ') for cls in dataset]

                # Saving classifier model
                with open(classifier_filename_exp, 'wb') as outfile:
                    pickle.dump((model, class_names), outfile)
                print('Saved classifier model to file "%s"' %
                      classifier_filename_exp)

            elif (args.mode == 'CLASSIFY'):
                # Classify images
                print('Testing classifier')
                with open(classifier_filename_exp, 'rb') as infile:
                    (model, class_names) = pickle.load(infile)

                print('Loaded classifier model from file "%s"' %
                      classifier_filename_exp)

                predictions = model.predict_proba(emb_array)
                best_class_indices = np.argmax(predictions, axis=1)
                best_class_probabilities = predictions[
                    np.arange(len(best_class_indices)), best_class_indices]

                for i in range(len(best_class_indices)):
                    print('%4d  %s: %.3f' %
                          (i, class_names[best_class_indices[i]],
                           best_class_probabilities[i]))

                accuracy = np.mean(np.equal(best_class_indices, labels))
                print('Accuracy: %.3f' % accuracy)
コード例 #40
0
def main(args):

    network = importlib.import_module(args.model_def, 'inference')

    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
    if not os.path.isdir(
            log_dir):  # Create the log directory if it doesn't exist
        os.makedirs(log_dir)
    model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
    if not os.path.isdir(
            model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Store some git revision info in a text file in the log directory
    src_path, _ = os.path.split(os.path.realpath(__file__))
    facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))

    np.random.seed(seed=args.seed)
    train_set = facenet.get_dataset(args.data_dir)

    print('Model directory: %s' % model_dir)
    print('Log directory: %s' % log_dir)
    pretrained_model = None
    if args.pretrained_model:
        pretrained_model = os.path.expanduser(args.pretrained_model)
        print('Pre-trained model: %s' % pretrained_model)

    if args.lfw_dir:
        print('LFW directory: %s' % args.lfw_dir)
        # Read the file containing the pairs used for testing
        pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
        # Get the paths for the corresponding images
        lfw_paths, actual_issame = lfw.get_paths(
            os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)

    with tf.Graph().as_default():
        tf.set_random_seed(args.seed)
        global_step = tf.Variable(0, trainable=False)

        # Get a list of image paths and their labels
        image_list, label_list = facenet.get_image_paths_and_labels(train_set)

        # Read data and apply label preserving distortions
        image_batch, label_batch = facenet.read_and_augument_data(
            image_list, label_list, args.image_size, args.batch_size,
            args.max_nrof_epochs, args.random_crop, args.random_flip,
            args.nrof_preprocess_threads)
        print('Total number of classes: %d' % len(train_set))
        print('Total number of examples: %d' % len(image_list))

        # Node for input images
        image_batch.set_shape((None, args.image_size, args.image_size, 3))
        image_batch = tf.identity(image_batch, name='input')

        # Placeholder for the learning rate
        learning_rate_placeholder = tf.placeholder(tf.float32,
                                                   name='learning_rate')

        # Placeholder for phase_train
        phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')

        # Build the inference graph
        prelogits, _ = network.inference(image_batch,
                                         args.keep_probability,
                                         phase_train=phase_train_placeholder,
                                         weight_decay=args.weight_decay)
        with tf.variable_scope('Logits'):
            n = int(prelogits.get_shape()[1])
            m = len(train_set)
            w = tf.get_variable(
                'w',
                shape=[n, m],
                dtype=tf.float32,
                initializer=tf.truncated_normal_initializer(stddev=0.1),
                regularizer=slim.l2_regularizer(args.weight_decay),
                trainable=True)
            b = tf.get_variable('b', [m], initializer=None, trainable=True)
            logits = tf.matmul(prelogits, w) + b

        # Add DeCov regularization loss
        if args.decov_loss_factor > 0.0:
            logits_decov_loss = facenet.decov_loss(
                logits) * args.decov_loss_factor
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                                 logits_decov_loss)

        # Add center loss
        update_centers = tf.no_op('update_centers')
        if args.center_loss_factor > 0.0:
            prelogits_center_loss, update_centers = facenet.center_loss(
                prelogits, label_batch, args.center_loss_alfa)
            tf.add_to_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES,
                prelogits_center_loss * args.center_loss_factor)

        embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

        learning_rate = tf.train.exponential_decay(
            learning_rate_placeholder,
            global_step,
            args.learning_rate_decay_epochs * args.epoch_size,
            args.learning_rate_decay_factor,
            staircase=True)
        tf.scalar_summary('learning_rate', learning_rate)

        # Calculate the average cross entropy loss across the batch
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits, label_batch, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')
        tf.add_to_collection('losses', cross_entropy_mean)

        # Calculate the total losses
        regularization_losses = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss = tf.add_n([cross_entropy_mean] + regularization_losses,
                              name='total_loss')

        # Build a Graph that trains the model with one batch of examples and updates the model parameters
        train_op = facenet.train(total_loss, global_step, args.optimizer,
                                 learning_rate, args.moving_average_decay,
                                 tf.all_variables(), args.log_histograms)

        # Create a saver
        save_variables = list(set(tf.all_variables()) - set([w]) - set([b]))
        saver = tf.train.Saver(save_variables, max_to_keep=3)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Start running operations on the Graph.
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        sess.run(tf.initialize_all_variables())
        sess.run(tf.initialize_local_variables())
        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)
        tf.train.start_queue_runners(sess=sess)

        with sess.as_default():

            if pretrained_model:
                saver.restore(sess, pretrained_model)

            # Training and validation loop
            epoch = 0
            while epoch < args.max_nrof_epochs:
                step = sess.run(global_step, feed_dict=None)
                epoch = step // args.epoch_size
                # Train for one epoch
                train(args, sess, epoch, phase_train_placeholder,
                      learning_rate_placeholder, global_step, total_loss,
                      train_op, summary_op, summary_writer,
                      regularization_losses, args.learning_rate_schedule_file,
                      update_centers)

                # Save variables and the metagraph if it doesn't exist already
                save_variables_and_metagraph(sess, saver, summary_writer,
                                             model_dir, subdir, step)

                # Evaluate on LFW
                if args.lfw_dir:
                    start_time = time.time()
                    _, _, accuracy, val, val_std, far = lfw.validate(
                        sess,
                        lfw_paths,
                        actual_issame,
                        args.seed,
                        args.batch_size,
                        image_batch,
                        phase_train_placeholder,
                        embeddings,
                        nrof_folds=args.lfw_nrof_folds)
                    print('Accuracy: %1.3f+-%1.3f' %
                          (np.mean(accuracy), np.std(accuracy)))
                    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %
                          (val, val_std, far))
                    lfw_time = time.time() - start_time
                    # Add validation loss and accuracy to summary
                    summary = tf.Summary()
                    #pylint: disable=maybe-no-member
                    summary.value.add(tag='lfw/accuracy',
                                      simple_value=np.mean(accuracy))
                    summary.value.add(tag='lfw/val_rate', simple_value=val)
                    summary.value.add(tag='time/lfw', simple_value=lfw_time)
                    summary_writer.add_summary(summary, step)
                    with open(os.path.join(log_dir, 'lfw_result.txt'),
                              'at') as f:
                        f.write('%d\t%.5f\t%.5f\n' %
                                (step, np.mean(accuracy), val))

    return model_dir