コード例 #1
0
ファイル: infer.py プロジェクト: n23e113/neuguen
def infer():

    x = read_image_file(args.input_pic)
    logit, _, _, _ = model_build.build_mobilenet_v1_debug(
        x, mobilenet_training=False, neuguen_training=False, preprocess=False)

    session_config = tf.ConfigProto()
    session_config.gpu_options.allow_growth = True

    neuguen_saver = tf.train.Saver()
    save_path = args.model_checkpoint

    with tf.Session(config=session_config) as session:

        session.run(tf.global_variables_initializer())

        model_build.restore_last_checkpoint(session, save_path)
        logit_value = session.run([logit])
        print(logit_value)
        print(np.argmax(logit_value[0]))
コード例 #2
0
def generate():

    if args.input_pic:
        init = read_image_file(args.input_pic)
    else:
        init = random_init()

    with tf.variable_scope("neuguen_generator"):
        # h x w x rgb [0-255]
        x = tf.get_variable("picture", dtype=tf.float32, initializer=init)
        tf.summary.histogram("x", x)
    if args.cls == "smile":
        y = tf.constant([[1]], dtype=tf.int64, shape=(1, 1))
        print("smile")
    else:
        y = tf.constant([[0]], dtype=tf.int64, shape=(1, 1))
        print("not smile")

    logit, _, _, _ = model_build.build_mobilenet_v1_debug(
        x, mobilenet_training=False, neuguen_training=False, preprocess=False)

    loss = model_build.build_loss(logit, y)
    constraint = constraint_loss(x)
    tf.summary.histogram("constraint_loss", constraint)
    loss = loss + constraint

    global_step = tf.train.create_global_step()

    train_op = model_build.build_generator_train_op(loss, [x], global_step)

    session_config = tf.ConfigProto()
    session_config.gpu_options.allow_growth = True

    neuguen_saver = tf.train.Saver()
    save_path = args.model_checkpoint

    merge_summary = tf.summary.merge_all()
    save_path_generator = "neuguen_generator"
    if not os.path.exists(save_path_generator):
        os.makedirs(save_path_generator)

    with tf.Session(config=session_config) as session:

        session.run(tf.global_variables_initializer())

        model_build.restore_last_checkpoint(session, save_path)

        summary_writer = tf.summary.FileWriter(save_path_generator,
                                               session.graph)

        for j in xrange(args.steps):
            loss_value, _, step_value, logit_value, summary = session.run(
                [loss, train_op, global_step, logit, merge_summary])
            summary_writer.add_summary(summary, step_value)
            sys.stdout.write("\r{0}--{1}    ".format(step_value, loss_value))
            sys.stdout.flush()
            #print(np.argmax(logit_value[0]))
            #print(logit_value)
        print("")
        print(logit_value[0])
        print(stable_softmax(logit_value[0]))
        generated_image = session.run([x])
        generated_image = (np.asarray(generated_image[0][0]) + 1.0) / 2.0 * 255
        generated_image = generated_image.astype(np.uint8)
        #print(generated_image.shape)
        #print(generated_image.dtype)
        result = Image.fromarray(generated_image)
        result = result.resize((178, 218), Image.ANTIALIAS)
        result.save('out.jpg')
コード例 #3
0
def test():
    dataset, testset = data_provider.config_to_slim_dataset(
        config=TRAINING_CONFIG, dataset_dir="./")

    # testing data
    prefetch_queue_test =\
        data_provider.slim_dataset_to_prefetch_queue(testset,
            BATCH_SIZE, shuffle=False)
    face_test_batch, label_test_batch = prefetch_queue_test.dequeue()
    face_test_batch = tf.cast(face_test_batch, tf.float32)

    x = tf.placeholder(tf.uint8, shape=(None, 224, 224, 3))
    y = tf.placeholder(tf.int64, shape=(None, 1))

    logit, _, _, _ = model_build.build_mobilenet_v1_debug(
        x, mobilenet_training=False, neuguen_training=False)

    global_step = tf.train.create_global_step()
    increment_global_step_op = tf.assign(global_step, global_step + 1)

    correct_prediction = tf.equal(tf.squeeze(y), tf.argmax(logit, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    confusion_matrix_op = tf.confusion_matrix(tf.squeeze(y),
                                              tf.argmax(logit, 1))

    session_config = tf.ConfigProto()
    session_config.gpu_options.allow_growth = True

    neuguen_saver = tf.train.Saver()
    save_path = "neuguen_test"
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    with tf.Session(config=session_config) as session:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        session.run(tf.global_variables_initializer())

        if args.model_checkpoint:
            neuguen_saver.restore(session, args.model_checkpoint)
        else:
            neuguen_saver.restore(
                session, tf.train.latest_checkpoint(DEFAULT_MODEL_PATH))

        for j in xrange(1):
            confusion_matrix = np.array([[0., 0.], [0., 0.]])
            accuracy_avg = 0.0
            for i in xrange(int(TRAINING_CONFIG["test_size"] / BATCH_SIZE)):
                faces, labels, step = session.run([
                    face_test_batch, label_test_batch, increment_global_step_op
                ])
                accuracy_value, confusion, logit_value = session.run(
                    [accuracy, confusion_matrix_op, logit],
                    feed_dict={
                        x: faces,
                        y: labels
                    })
                confusion_matrix = confusion_matrix + confusion
                accuracy_avg = accuracy_avg + (accuracy_value -
                                               accuracy_avg) / (i + 1)
                sys.stdout.write(
                    "\r{0}--{1} training accuracy(ma):{2}    ".format(
                        j, i, accuracy_avg))
                sys.stdout.flush()
            print("")
            print(confusion_matrix)

        print("thread.join")
        coord.request_stop()
        coord.join(threads)
コード例 #4
0
def train():
    dataset, testset = data_provider.config_to_slim_dataset(
        config=TRAINING_CONFIG, dataset_dir="./")

    # training data
    prefetch_queue = data_provider.slim_dataset_to_prefetch_queue(
        dataset, BATCH_SIZE)
    face_batch, label_batch = prefetch_queue.dequeue()
    face_batch = tf.cast(face_batch, tf.float32)

    tf.summary.image("face", face_batch[0:16], max_outputs=16)

    x = tf.placeholder(tf.uint8, shape=(None, 224, 224, 3))
    y = tf.placeholder(tf.int64, shape=(None, 1))

    if args.fine_tune:
        logit, trainable, total_reg_losses, _ = model_build.build_mobilenet_v1_debug(
            x, mobilenet_training=True, neuguen_training=True)
        print("fine tune")
    else:
        logit, trainable, total_reg_losses, _ = model_build.build_mobilenet_v1_debug(
            x)
    tf.summary.scalar("regularization_loss", tf.reduce_sum(total_reg_losses))

    loss = model_build.build_loss(logit, y)
    tf.summary.scalar("cross_entropy_loss", loss)

    loss = loss + tf.reduce_sum(total_reg_losses)
    tf.summary.scalar("total_loss", loss)

    global_step = tf.train.create_global_step()
    train_op = model_build.build_train_op(loss, trainable, global_step)
    for var in tf.global_variables():
        tf.summary.histogram(var.op.name, var)

    correct_prediction = tf.equal(tf.squeeze(y), tf.argmax(logit, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar("batch_accuracy", accuracy)
    confusion_matrix_op = tf.confusion_matrix(tf.squeeze(y),
                                              tf.argmax(logit, 1))

    session_config = tf.ConfigProto()
    session_config.gpu_options.allow_growth = True

    neuguen_saver = tf.train.Saver(max_to_keep=10)
    merge_summary = tf.summary.merge_all()

    save_path_fine_tune = "neuguen_model_fine_tune"
    if not os.path.exists(save_path_fine_tune):
        os.makedirs(save_path_fine_tune)
    save_path = "neuguen_model"
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    with tf.Session(config=session_config) as session:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        session.run(tf.global_variables_initializer())

        if args.fine_tune:
            summary_writer = tf.summary.FileWriter(save_path_fine_tune,
                                                   session.graph)
            model_build.restore_last_checkpoint(session, save_path)
        else:
            summary_writer = tf.summary.FileWriter(save_path, session.graph)
            model_build.restore_pretrained_mobilenet(session)

        for j in xrange(20):
            confusion_matrix = np.array([[0., 0.], [0., 0.]])
            accuracy_avg = 0.0

            for i in xrange(int(TRAINING_CONFIG["training_size"] /
                                BATCH_SIZE)):
                faces, labels, step = session.run(
                    [face_batch, label_batch, global_step])
                if step % 100 == 99:
                    if step % 10000 == 9999:
                        run_options = tf.RunOptions(
                            trace_level=tf.RunOptions.FULL_TRACE)
                        run_metadata = tf.RunMetadata()
                        summary, loss_value, accuracy_value, confusion, _ = session.run(
                            [
                                merge_summary, loss, accuracy,
                                confusion_matrix_op, train_op
                            ],
                            feed_dict={
                                x: faces,
                                y: labels
                            },
                            options=run_options,
                            run_metadata=run_metadata)
                        summary_writer.add_summary(summary, step)
                        summary_writer.add_run_metadata(
                            run_metadata, "step{0}".format(step))
                    else:
                        summary, loss_value, accuracy_value, confusion, _ = session.run(
                            [
                                merge_summary, loss, accuracy,
                                confusion_matrix_op, train_op
                            ],
                            feed_dict={
                                x: faces,
                                y: labels
                            })
                        summary_writer.add_summary(summary, step)
                else:
                    loss_value, accuracy_value, confusion, _ = session.run(
                        [loss, accuracy, confusion_matrix_op, train_op],
                        feed_dict={
                            x: faces,
                            y: labels
                        })
                confusion_matrix = confusion_matrix + confusion
                accuracy_avg = accuracy_avg + (accuracy_value -
                                               accuracy_avg) / (i + 1)
                sys.stdout.write(
                    "\r{0}--{1} training accuracy(ma):{2}    ".format(
                        j, i, accuracy_avg))
                sys.stdout.flush()
            print("")
            print(confusion_matrix)

            if args.fine_tune:
                neuguen_saver.save(session,
                                   os.path.join(save_path_fine_tune,
                                                "neuguen.ckpt"),
                                   global_step=global_step)
            else:
                neuguen_saver.save(session,
                                   os.path.join(save_path, "neuguen.ckpt"),
                                   global_step=global_step)

        print("thread.join")
        coord.request_stop()
        coord.join(threads)
コード例 #5
0
ファイル: test_debug.py プロジェクト: n23e113/neuguen
def test():
    dataset, testset = data_provider.config_to_slim_dataset(
        config=TRAINING_CONFIG, dataset_dir="./")

    # testing data
    prefetch_queue_test =\
        data_provider.slim_dataset_to_prefetch_queue(testset,
            DEBUG_BATCH_SIZE, shuffle=False)
    face_test_batch, label_test_batch = prefetch_queue_test.dequeue()
    face_test_batch = tf.cast(face_test_batch, tf.float32)

    print(face_test_batch.shape)

    tf.summary.image("face", face_test_batch[0:16], max_outputs=16)

    x = tf.placeholder(tf.uint8, shape=(None, 224, 224, 3))
    y = tf.placeholder(tf.int64, shape=(None, 1))

    logit, _, _, debug_endpoints = model_build.build_mobilenet_v1_debug(
        x, mobilenet_training=False, neuguen_training=False)
    with tf.variable_scope("neuguen", reuse=True):
        moving_mean = tf.get_variable("BatchNorm/moving_mean")
        moving_var = tf.get_variable("BatchNorm/moving_variance")
    print(moving_mean.shape)
    print(moving_var.shape)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    print(update_ops)

    for var in tf.global_variables():
        tf.summary.histogram(var.op.name, var)

    global_step = tf.train.create_global_step()
    increment_global_step_op = tf.assign(global_step, global_step + 1)

    correct_prediction = tf.equal(tf.squeeze(y), tf.argmax(logit, 1))
    with tf.control_dependencies(update_ops):
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar("batch_accuracy", accuracy)
    confusion_matrix_op = tf.confusion_matrix(tf.squeeze(y),
                                              tf.argmax(logit, 1))

    session_config = tf.ConfigProto()
    session_config.gpu_options.allow_growth = True

    neuguen_saver = tf.train.Saver()
    merge_summary = tf.summary.merge_all()
    save_path = "neuguen_test"
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    with tf.Session(config=session_config) as session:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        summary_writer = tf.summary.FileWriter(save_path, session.graph)

        session.run(tf.global_variables_initializer())

        if args.model_checkpoint:
            neuguen_saver.restore(session, args.model_checkpoint)
        else:
            neuguen_saver.restore(
                session, tf.train.latest_checkpoint(DEFAULT_MODEL_PATH))

        for j in xrange(1):
            confusion_matrix = np.array([[0., 0.], [0., 0.]])
            accuracy_avg = 0.0
            #for i in xrange(int(TRAINING_CONFIG["test_size"] / BATCH_SIZE)):
            for i in xrange(100):
                faces, labels, step = session.run([
                    face_test_batch, label_test_batch, increment_global_step_op
                ])
                summary, accuracy_value, confusion, logit_value = session.run(
                    #[merge_summary, accuracy, confusion_matrix_op, logit],
                    [merge_summary, accuracy,
                     tf.no_op(), logit],
                    feed_dict={
                        x: faces,
                        y: labels
                    })
                #print(moving_mean_value)
                #confusion_matrix = confusion_matrix + confusion
                accuracy_avg = accuracy_avg + (accuracy_value -
                                               accuracy_avg) / (i + 1)
                sys.stdout.write(
                    "\r{0}--{1} training accuracy(ma):{2}    ".format(
                        j, i, accuracy_avg))
                sys.stdout.flush()
                #print("")
                #print(logit_value)
                summary_writer.add_summary(summary, step)
            print("")
            print(confusion_matrix)

        print("thread.join")
        coord.request_stop()
        coord.join(threads)