Esempio n. 1
0
def main(_):
    if not FLAGS.dataset_name:
        raise ValueError(
            'You must supply the dataset name with --dataset_name')
    if not FLAGS.record_dir:
        raise ValueError(
            'You must supply the TFRecord_Dir name with --record_dir')

    data_set_dummy = tf_record_dataset.TFRecordDataset(
        tfrecord_dir=FLAGS.record_dir,
        dataset_name=FLAGS.dataset_name,
        num_classes=10)

    dataset = data_set_dummy.get_split(split_name='train')

    provider = slim.dataset_data_provider.DatasetDataProvider(dataset)
    [image, label] = provider.get(['image', 'label'])

    with tf.Session() as sess:
        with slim.queues.QueueRunners(sess):
            plt.figure()
            for i in range(4):
                np_image, np_label = sess.run([image, label])
                height, width, _ = np_image.shape
                class_name = name = dataset.labels_to_names[np_label]

                plt.subplot(2, 2, i + 1)
                plt.imshow(np_image)
                plt.title('%d, %s, %d x %d' % (np_label, name, height, width))
                plt.axis('off')
            plt.show()
Esempio n. 2
0
def run():
    tfrecord_dataset = tf_record_dataset.TFRecordDataset(
        tfrecord_dir=FLAGS.record_dir,
        dataset_name=FLAGS.dataset_name,
        num_classes=FLAGS.num_classes)

    dataset = tfrecord_dataset.get_split(split_name='validation')

    # Choice Model . vgg, inception.V1~3 , alexnet , resnet .. etc...

    inception = nets.inception
    X_image = tf.placeholder(tf.float32,
                             shape=[
                                 None,
                                 inception.inception_v3.default_image_size,
                                 inception.inception_v3.default_image_size, 3
                             ])
    images, labels, _ = load_batch(
        dataset,
        height=inception.inception_v3.default_image_size,
        width=inception.inception_v3.default_image_size,
        num_classes=FLAGS.num_classes)

    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits, end_points = inception.inception_v3(
            inputs=X_image, num_classes=FLAGS.num_classes)

    predictions = tf.argmax(logits, 1)
    Y_label = tf.placeholder(tf.float32, shape=[None, 5])
    targets = tf.argmax(Y_label, 1)

    provider = slim.dataset_data_provider.DatasetDataProvider(dataset)

    log_dir = FLAGS.log
    eval_dir = FLAGS.log
    if not tf.gfile.Exists(eval_dir):
        tf.gfile.MakeDirs(eval_dir)
    if not tf.gfile.Exists(log_dir):
        raise Exception("trained check point does not exist at %s " % log_dir)
    else:
        checkpoint_path = tf.train.latest_checkpoint(log_dir)

    import matplotlib.pyplot as plt
    with tf.Session() as sess:
        saver = tf.train.Saver()
        saver.restore(sess, checkpoint_path)
        with slim.queues.QueueRunners(sess):

            for i in range(100):
                np_image, np_label = sess.run([images, labels])

                tempimage, tflabel = sess.run([predictions, targets],
                                              feed_dict={
                                                  X_image: np_image,
                                                  Y_label: np_label
                                              })

                print("Predict : ", tempimage)
                print("Answer  : ", tflabel)
                print('enter')
Esempio n. 3
0
def run():
    tfrecord_dataset = tf_record_dataset.TFRecordDataset(
        tfrecord_dir=FLAGS.record_dir,
        dataset_name=FLAGS.dataset_name,
        num_classes=FLAGS.num_classes)

    dataset = tfrecord_dataset.get_split(split_name='train')

    # Choice Model . vgg, inception.V1~3 , alexnet , resnet .. etc...

    inception = nets.inception

    images, labels, _ = load_batch(
        dataset,
        height=inception.inception_v3.default_image_size,
        width=inception.inception_v3.default_image_size,
        num_classes=FLAGS.num_classes)

    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits, end_points = inception.inception_v3(
            inputs=images, num_classes=FLAGS.num_classes)

    #loss Function
    loss = slim.losses.softmax_cross_entropy(logits, labels)
    total_loss = slim.losses.get_total_loss()

    #optimizer
    optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)

    predictions = tf.argmax(logits, 1)
    targets = tf.argmax(labels, 1)

    correct_prediction = tf.equal(predictions, targets)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    tf.summary.scalar('losses/Total', total_loss)
    tf.summary.scalar('accuracy', accuracy)
    summary_op = tf.summary.merge_all()

    #
    log_dir = FLAGS.log
    if not tf.gfile.Exists(log_dir):
        tf.gfile.MakeDirs(log_dir)

    # 훈련 오퍼레이션 정의
    train_op = slim.learning.create_train_op(total_loss, optimizer)

    final_loss = slim.learning.train(train_op,
                                     log_dir,
                                     number_of_steps=FLAGS.step,
                                     summary_op=summary_op,
                                     save_summaries_secs=30,
                                     save_interval_secs=30)
Esempio n. 4
0
# 1. TFRecord 포맷 데이터을 읽어서 변환할 수 있도록 slim.dataset.Dataset 클래스를 정의한다.
# 2. 데이터를 피드하기 위한 slim.dataset_data_provider.DatasetDataProvider를 생성한다.
# 3. 네트워크 모델의 입력에 맞게 전처리 작업 및 편의를 위한 one-hot 인코딩 작업을 한 후, tf.train.batch를 생성한다.
"""

import tensorflow as tf
import tensorflow.contrib.slim as slim
from datasets import tf_record_dataset
from utils.dataset_utils import load_batch

"""
# slim.dataset.Dataset 클래스를 정의
"""
TF_RECORD_DIR = '/home/itrocks/Git/Tensorflow/tf-slim-tutorial/raw_data/mnist/tfrecord'
mnist_tfrecord_dataset = tf_record_dataset.TFRecordDataset(tfrecord_dir=TF_RECORD_DIR,
                                                           dataset_name='mnist',
                                                           num_classes=10)
# train 데이터셋 생성
dataset = mnist_tfrecord_dataset.get_split(split_name='train')

"""
# slim.dataset_data_provider.DatasetDataProvider를 생성
"""
provider = slim.dataset_data_provider.DatasetDataProvider(dataset)
[image, label] = provider.get(['image', 'label'])

# 테스트
import matplotlib.pyplot as plt
with tf.Session() as sess:
    with slim.queues.QueueRunners(sess):
        plt.figure()
Esempio n. 5
0
__author__ = '*****@*****.**'

import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets
from utils.dataset_utils import load_batch
from datasets import tf_record_dataset

tf.logging.set_verbosity(tf.logging.INFO)

'''
# 평가 데이터 로드
'''
batch_size = 16
tfrecord_dataset = tf_record_dataset.TFRecordDataset(
    tfrecord_dir='/home/itrocks/Git/Tensorflow/dog-breed-classification.tf/raw_data/dog/tfrecord',
    dataset_name='dog',
    num_classes=120)
# Selects the 'train' dataset.
dataset = tfrecord_dataset.get_split(split_name='validation')
images, labels, num_samples = load_batch(dataset, batch_size=batch_size, height=224, width=224)

'''
# 네트워크 모델 로드: VGG-16
'''
vgg = tf.contrib.slim.nets.vgg
with slim.arg_scope(vgg.vgg_arg_scope()):
  logits, end_points = vgg.vgg_16(inputs=images, num_classes=120, is_training=True)


'''
# 메트릭 정의
Esempio n. 6
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        #######################
        # Config model_deploy #
        #######################
        deploy_config = model_deploy.DeploymentConfig(
            num_clones=FLAGS.num_clones,
            clone_on_cpu=FLAGS.clone_on_cpu,
            replica_id=FLAGS.task,
            num_replicas=FLAGS.worker_replicas,
            num_ps_tasks=FLAGS.num_ps_tasks)

        # Create global_step
        with tf.device(deploy_config.variables_device()):
            global_step = slim.create_global_step()

        ######################
        # Select the dataset #
        ######################
        tfrecord_dataset = tf_record_dataset.TFRecordDataset(
            tfrecord_dir=FLAGS.dataset_dir,
            dataset_name=FLAGS.dataset_name,
            num_classes=FLAGS.num_shards)
        dataset = tfrecord_dataset.get_split(
            split_name=FLAGS.dataset_split_name)

        ######################
        # Select the network #
        ######################
        network_fn = nets_factory.get_network_fn(
            FLAGS.model_name,
            num_classes=(dataset.num_classes - FLAGS.labels_offset),
            weight_decay=FLAGS.weight_decay,
            is_training=True)

        #####################################
        # Select the preprocessing function #
        #####################################
        preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            preprocessing_name, is_training=True)

        ##############################################################
        # Create a dataset provider that loads data from the dataset #
        ##############################################################
        with tf.device(deploy_config.inputs_device()):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=20 * FLAGS.batch_size,
                common_queue_min=10 * FLAGS.batch_size)
            [image, label] = provider.get(['image', 'label'])
            label -= FLAGS.labels_offset

            train_image_size = FLAGS.train_image_size or network_fn.default_image_size

            image = image_preprocessing_fn(image, train_image_size,
                                           train_image_size)

            images, labels = tf.train.batch(
                [image, label],
                batch_size=FLAGS.batch_size,
                num_threads=FLAGS.num_preprocessing_threads,
                capacity=5 * FLAGS.batch_size)
            labels = slim.one_hot_encoding(
                labels, dataset.num_classes - FLAGS.labels_offset)
            batch_queue = slim.prefetch_queue.prefetch_queue(
                [images, labels], capacity=2 * deploy_config.num_clones)

        ####################
        # Define the model #
        ####################
        def clone_fn(batch_queue):
            """Allows data parallelism by creating multiple clones of network_fn."""
            images, labels = batch_queue.dequeue()
            logits, end_points = network_fn(images)

            #############################
            # Specify the loss function #
            #############################
            if 'AuxLogits' in end_points:
                slim.losses.softmax_cross_entropy(
                    end_points['AuxLogits'],
                    labels,
                    label_smoothing=FLAGS.label_smoothing,
                    weights=0.4,
                    scope='aux_loss')
            slim.losses.softmax_cross_entropy(
                logits,
                labels,
                label_smoothing=FLAGS.label_smoothing,
                weights=1.0)
            return end_points

        # Gather initial summaries.
        summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

        clones = model_deploy.create_clones(deploy_config, clone_fn,
                                            [batch_queue])
        first_clone_scope = deploy_config.clone_scope(0)
        # Gather update_ops from the first clone. These contain, for example,
        # the updates for the batch_norm variables created by network_fn.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                       first_clone_scope)

        # Add summaries for end_points.
        end_points = clones[0].outputs
        for end_point in end_points:
            x = end_points[end_point]
            summaries.add(tf.summary.histogram('activations/' + end_point, x))
            summaries.add(
                tf.summary.scalar('sparsity/' + end_point,
                                  tf.nn.zero_fraction(x)))

        # Add summaries for losses.
        for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
            summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))

        # Add summaries for variables.
        for variable in slim.get_model_variables():
            summaries.add(tf.summary.histogram(variable.op.name, variable))

        #################################
        # Configure the moving averages #
        #################################
        if FLAGS.moving_average_decay:
            moving_average_variables = slim.get_model_variables()
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay, global_step)
        else:
            moving_average_variables, variable_averages = None, None

        #########################################
        # Configure the optimization procedure. #
        #########################################
        with tf.device(deploy_config.optimizer_device()):
            learning_rate = _configure_learning_rate(dataset.num_samples,
                                                     global_step)
            optimizer = _configure_optimizer(learning_rate)
            summaries.add(tf.summary.scalar('learning_rate', learning_rate))

        if FLAGS.sync_replicas:
            # If sync_replicas is enabled, the averaging will be done in the chief
            # queue runner.
            optimizer = tf.train.SyncReplicasOptimizer(
                opt=optimizer,
                replicas_to_aggregate=FLAGS.replicas_to_aggregate,
                total_num_replicas=FLAGS.worker_replicas,
                variable_averages=variable_averages,
                variables_to_average=moving_average_variables)
        elif FLAGS.moving_average_decay:
            # Update ops executed locally by trainer.
            update_ops.append(
                variable_averages.apply(moving_average_variables))

        # Variables to train.
        variables_to_train = _get_variables_to_train()

        #  and returns a train_tensor and summary_op
        total_loss, clones_gradients = model_deploy.optimize_clones(
            clones, optimizer, var_list=variables_to_train)
        # Add total_loss to summary.
        summaries.add(tf.summary.scalar('total_loss', total_loss))

        # Create gradient updates.
        grad_updates = optimizer.apply_gradients(clones_gradients,
                                                 global_step=global_step)
        update_ops.append(grad_updates)

        update_op = tf.group(*update_ops)
        with tf.control_dependencies([update_op]):
            train_tensor = tf.identity(total_loss, name='train_op')

        # Add the summaries from the first clone. These contain the summaries
        # created by model_fn and either optimize_clones() or _gather_clone_loss().
        summaries |= set(
            tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))

        # Merge all summaries together.
        summary_op = tf.summary.merge(list(summaries), name='summary_op')

        ###########################
        # Kicks off the training. #
        ###########################
        slim.learning.train(
            train_tensor,
            logdir=FLAGS.train_dir,
            master=FLAGS.master,
            is_chief=(FLAGS.task == 0),
            init_fn=_get_init_fn(),
            summary_op=summary_op,
            number_of_steps=FLAGS.max_number_of_steps,
            log_every_n_steps=FLAGS.log_every_n_steps,
            save_summaries_secs=FLAGS.save_summaries_secs,
            save_interval_secs=FLAGS.save_interval_secs,
            sync_optimizer=optimizer if FLAGS.sync_replicas else None)
Esempio n. 7
0
MNIST examples
"""

import tensorflow as tf
import tensorflow.contrib.slim as slim
import c01_defining_models.s04_examples.mnist_deep_step_by_step_slim as mnist_model
from utils.dataset_utils import load_batch
from datasets import tf_record_dataset

tf.logging.set_verbosity(tf.logging.INFO)

'''
# 훈련 데이터 로드
'''
mnist_tfrecord_dataset = tf_record_dataset.TFRecordDataset(
    tfrecord_dir='/home/itrocks/Git/Tensorflow/tf-slim-tutorial/raw_data/mnist/tfrecord',
    dataset_name='mnist',
    num_classes=10)
# Selects the 'train' dataset.
dataset = mnist_tfrecord_dataset.get_split(split_name='train')
images, labels, _ = load_batch(dataset)

'''
# 모델 정의
'''
logits = mnist_model.mnist_convnet(images)


'''
# 손실함수 정의
'''
loss = slim.losses.softmax_cross_entropy(logits, labels)