def distorted_inputs():
    """
    Construct distorted input for CIRAF training using the Reder ops.

    Returns:
        images: Images. 4-D Tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
        labels: Labels: 1-D Tensor of [batch_size] size.

    Raises:
        ValueErroe: if no data_dir.
    """
    if not FLAGS.data_dir:
        raise ValueError('Please supply a data_dir')
    data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
    if data_dir[0:2] == './':
        data_dir = data_dir[2:]
    # print('**************************', data_dir)
    images, labels = input.distorted_inputs(data_dir=data_dir,
                                            batch_size=FLAGS.batch_size)

    if FLAGS.use_fp16:
        images = tf.cast(images, tf.float16)
        labels = tf.cast(labels, tf.float16)

    return images, labels
Exemplo n.º 2
0
def main(_):
    images, labels = distorted_inputs(FLAGS.data_dir, FLAGS.train_lst)

    is_training = tf.placeholder('bool', [], name='is_training')  # placeholder for the fusion part

    logits = inference(images,
                       num_classes=FLAGS.num_classes,
                       is_training=is_training,
                       num_blocks=[3, 4, 6, 3])
    train(is_training,logits, images, labels)
Exemplo n.º 3
0
def distorted_inputs():
    if not FLAGS.data_dir:
        raise ValueError('Please supply a data_dir')
    data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
    print(FLAGS.batch_size)
    images, labels = input.distorted_inputs(data_dir=data_dir,
                                            batch_size=FLAGS.batch_size)
    if FLAGS.use_fp16:
        images = tf.cast(images, tf.float16)
        labels = tf.cast(labels, tf.float16)
    return images, labels
Exemplo n.º 4
0
def distorted_inputs(data_dir):
  """Construct distorted input for CIFAR training using the Reader ops.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.

  Raises:
    ValueError: If no data_dir
  """
  if not FLAGS.data_dir:
    raise ValueError('Please supply a data_dir')
  images, labels = input.distorted_inputs(data_dir=data_dir,
                                                  batch_size=FLAGS.batch_size)
  if FLAGS.use_fp16:
    images = tf.cast(images, tf.float16)
    labels = tf.cast(labels, tf.float16)
  return images, labels
Exemplo n.º 5
0
def distorted_inputs():
    """

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.

  Raises:
    ValueError: If no data_dir
  """
    if not FLAGS.data_dir:
        raise ValueError('Please supply a data_dir')
    data_dir = FLAGS.data_dir
    images, labels = input.distorted_inputs(data_dir=data_dir,
                                            batch_size=FLAGS.batch_size)
    if FLAGS.use_fp16:
        images = tf.cast(images, tf.float16)
        labels = tf.cast(labels, tf.float16)
    return images, labels
Exemplo n.º 6
0
#-*- coding: utf-8 -*-
# Read data and feeddict
#
# *** Udacity Predict steering angle ****
#
# Copyright 2016 Zhaocheng Liu, Xiao Wang
# {zcliu,  xwang696}@gatech.edu

import tensorflow as tf
import input

init_op = tf.initialize_all_variables()
images, angles = input.distorted_inputs("../data/angles_train.txt", 10)
eval_i, eval_a = input.origin_inputs("../data/angles_valid.txt", 10)
#images,angles = input.input("./image/angles.txt", 3)

#with tf.name_scope('input'):
#	tf.image_summary('input', images, max_images = 2)

#merged = tf.merge_all_summaries()
#input_writer = tf.train.SummaryWriter('./', graph = tf.get_default_graph())

with tf.Session() as sess:
    sess.run(init_op)
    sess.run(tf.initialize_local_variables())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord, sess=sess)
    for i in range(0, 100):
        #im = sess.run(images)
        an = sess.run(angles)
        an_e = sess.run(eval_a)
Exemplo n.º 7
0
with tf.Graph().as_default(), tf.device('/cpu:0'):
    global_step = tf.Variable(
        initial_value=0,
        name="global_step",
        trainable=False,
        collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES],
        dtype=tf.float32)

    lr = FLAGS.learning_rate
    # opt = tf.train.RMSPropOptimizer(lr, decay=0.9, momentum=0.9, epsilon=1)
    opt = tf.train.MomentumOptimizer(lr, momentum=0.9)

    # Get images and labels
    # for train
    with tf.name_scope('train_images'):
        images, labels = input.distorted_inputs(FLAGS.batch_size)

    batch_queue = tf.contrib.slim.prefetch_queue.prefetch_queue(
        [images, labels], capacity=2 * FLAGS.num_gpus)

    tower_grads = []
    tower_losses = []
    with tf.variable_scope(tf.get_variable_scope()):
        for i in xrange(FLAGS.num_gpus):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('%s_%d' % ('tower', i)) as scope:

                    image_batch, label_batch = batch_queue.dequeue()

                    loss = dla_34.loss(image_batch, label_batch)
Exemplo n.º 8
0
def train():
    with tf.Graph().as_default(), tf.device('/cpu:0'):
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)

        lr = FLAGS.learning_rate
        opt = tf.train.RMSPropOptimizer(lr, decay=0.9, momentum=0.9, epsilon=1)

        # Get images and labels
        # for train
        with tf.name_scope('train_images'):
            images, labels, boxes, num_objects = input.distorted_inputs(
                FLAGS.batch_size)

        batch_queue = tf.contrib.slim.prefetch_queue.prefetch_queue(
            [images, labels, boxes, num_objects], capacity=2 * FLAGS.num_gpus)

        tower_grads = []
        tower_losses = []
        with tf.variable_scope(tf.get_variable_scope()):
            for i in xrange(FLAGS.num_gpus):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('%s_%d' % ('tower', i)) as scope:

                        image_batch, label_batch, box_batch, num_objects_batch = batch_queue.dequeue(
                        )

                        cls_loss, loc_loss = ssd.loss(image_batch, label_batch,
                                                      box_batch,
                                                      num_objects_batch)

                        loss = cls_loss + loc_loss
                        regularization_loss = tf.add_n(
                            tf.get_collection(
                                tf.GraphKeys.REGULARIZATION_LOSSES))

                        loss = loss + regularization_loss

                        tf.get_variable_scope().reuse_variables()

                        summaries = tf.get_collection(tf.GraphKeys.SUMMARIES,
                                                      scope)

                        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                                       scope)

                        grads = opt.compute_gradients(loss)

                        tower_grads.append(grads)
                        tower_losses.append(loss)

        grads = average_gradients(tower_grads)

        #validation
        val_images, val_labels, val_boxes, val_num_objects = input.inputs(1)
        with tf.device('/gpu:0'):
            with tf.name_scope('eval_images'):
                cls_pred, loc_pred = ssd.inference(val_images)

        summaries.extend(
            tf.get_collection(tf.GraphKeys.SUMMARIES, 'train_images'))
        summaries.extend(
            tf.get_collection(tf.GraphKeys.SUMMARIES, 'eval_images'))

        # Add a summary to track the learning rate.
        summaries.append(tf.summary.scalar('learning_rate', lr))

        for grad, var in grads:
            if grad is not None:
                summaries.append(
                    tf.summary.histogram(var.op.name + '/gradients', grad))

        with tf.control_dependencies(update_ops):
            train_op = opt.apply_gradients(grads, global_step=global_step)

        for var in tf.trainable_variables():
            print(var.name)
            summaries.append(tf.summary.histogram(var.op.name, var))

        saver = tf.train.Saver(max_to_keep=20)

        summary_op = tf.summary.merge(summaries)

        pretrained_ckpt_path = FLAGS.pretrained_ckpt_path

        if not tf.train.latest_checkpoint(FLAGS.ckpt_save_path):
            print('pretrained ckpt')
            exclude_layers = ['global_step']
            restore_variables = slim.get_variables_to_restore(
                exclude=exclude_layers)
            init_fn = slim.assign_from_checkpoint_fn(pretrained_ckpt_path,
                                                     restore_variables,
                                                     ignore_missing_vars=True)

        else:
            print('training ckpt')
            init_fn = None

        sv = tf.train.Supervisor(logdir=FLAGS.ckpt_save_path,
                                 summary_op=None,
                                 saver=saver,
                                 save_model_secs=0,
                                 init_fn=init_fn)
        config_ = tf.ConfigProto(allow_soft_placement=True)
        config_.gpu_options.per_process_gpu_memory_fraction = 0.4

        # sess=sv.managed_session(config=config_)
        with sv.managed_session(config=config_) as sess:
            # Start the queue runners.
            sv.start_queue_runners(sess=sess)

            for step in xrange(FLAGS.max_steps):
                start_time = time.time()
                sess.run(train_op)
                loss_value, cls_loss_value, loc_loss_value = sess.run(
                    [loss, cls_loss, loc_loss])
                duration = time.time() - start_time

                assert not np.isnan(
                    loss_value), 'Model diverged with loss = NaN'

                if step % 100 == 0:
                    num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
                    examples_per_sec = num_examples_per_step / duration
                    sec_per_batch = duration / FLAGS.num_gpus

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), step, loss_value,
                                        examples_per_sec, sec_per_batch))
                    print(cls_loss_value, loc_loss_value)

                if step % 100 == 0:
                    summary_str = sess.run(summary_op)

                if step % (int(FLAGS.num_train / FLAGS.batch_size) *
                           4) == 0 and step != 0:

                    print('start validation')
                    entire_TF = []
                    entire_score = []
                    entire_numGT = []
                    for val_step in range(FLAGS.num_validation):

                        if val_step % 500 == 0:
                            print(val_step, ' / ', FLAGS.num_validation)
                        val_GT_boxes, val_GT_cls, val_loc_pred, val_cls_pred, num_objects = sess.run(
                            [
                                val_boxes, val_labels, loc_pred, cls_pred,
                                val_num_objects
                            ])

                        TF_array, TF_score, num_GT = validation.one_image_validation(
                            val_GT_boxes, val_GT_cls, val_loc_pred,
                            val_cls_pred, num_objects)

                        if len(entire_TF) == 0:
                            entire_TF = TF_array
                            entire_score = TF_score
                            entire_numGT = num_GT
                        else:
                            for k_cls in range(FLAGS.num_classes - 1):
                                entire_TF[k_cls] = np.concatenate(
                                    [entire_TF[k_cls], TF_array[k_cls]],
                                    axis=0)
                                entire_score[k_cls] = np.concatenate(
                                    [entire_score[k_cls], TF_score[k_cls]],
                                    axis=0)
                                entire_numGT[k_cls] += num_GT[k_cls]

                    entire_AP_sum = validation.compute_AP(
                        entire_score, entire_TF, entire_numGT)

                    mAP = np.sum(np.array(entire_AP_sum)) / np.sum(
                        np.array(entire_AP_sum) != 0)

                    print('class AP : ', entire_AP_sum)
                    print('mAP : ', mAP)

                    checkpoint_path = os.path.join(FLAGS.ckpt_save_path,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
Exemplo n.º 9
0
def train():
    with tf.Graph().as_default(), tf.device('/cpu:0'):
        global_step = tf.Variable(initial_value=0,
                                  name="global_step",
                                  trainable=False,
                                  collections=[
                                      tf.GraphKeys.GLOBAL_STEP,
                                      tf.GraphKeys.GLOBAL_VARIABLES
                                  ],
                                  dtype=tf.float32)

        lr = FLAGS.learning_rate
        opt = tf.train.AdamOptimizer(learning_rate=lr,
                                     beta1=0.9,
                                     beta2=0.999,
                                     epsilon=1e-8)

        with tf.name_scope('train_images'):
            images, labels, boxes, num_objects = input.distorted_inputs(
                FLAGS.batch_size)

        batch_queue = tf.contrib.slim.prefetch_queue.prefetch_queue(
            [images, labels, boxes, num_objects], capacity=2 * FLAGS.num_gpus)

        tower_grads = []
        tower_losses = []
        with tf.variable_scope(tf.get_variable_scope()):
            for i in xrange(FLAGS.num_gpus):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('%s_%d' % ('tower', i)) as scope:

                        image_batch, label_batch, box_batch, num_objects_batch = batch_queue.dequeue(
                        )

                        loss_hm, loss_wh, loss_off, tmp_wh, tmp_pos = centernet.loss(
                            image_batch, label_batch, box_batch,
                            num_objects_batch)

                        loss = loss_hm + loss_wh * 0.1 + loss_off

                        regularization_loss = tf.add_n(
                            tf.get_collection(
                                tf.GraphKeys.REGULARIZATION_LOSSES))

                        loss = loss + regularization_loss

                        tf.get_variable_scope().reuse_variables()

                        summaries = tf.get_collection(tf.GraphKeys.SUMMARIES,
                                                      scope)

                        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                                       scope)

                        grads = opt.compute_gradients(loss)

                        tower_grads.append(grads)
                        tower_losses.append(loss)

        grads = average_gradients(tower_grads)

        #validation
        val_images, val_labels, val_boxes, val_num_objects = input.inputs(1)
        with tf.device('/gpu:0'):
            with tf.name_scope('eval_images'):
                hm_pred, wh_pred, offset_pred = centernet.inference(val_images)
                classes_pred, scores_pred, boxes_pred, heatmap_pred = validation.decode_(
                    hm_pred, wh_pred, offset_pred)

        summaries.extend(
            tf.get_collection(tf.GraphKeys.SUMMARIES, 'train_images'))
        summaries.extend(
            tf.get_collection(tf.GraphKeys.SUMMARIES, 'eval_images'))

        # Add a summary to track the learning rate.
        summaries.append(tf.summary.scalar('learning_rate', lr))
        #
        # for grad, var in grads:
        #     if grad is not None:
        #         summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
        #
        with tf.control_dependencies(update_ops):
            train_op = opt.apply_gradients(grads, global_step=global_step)

        total_parameters = 0
        trainable_list = []
        for var in tf.trainable_variables():
            print(var.name)
            trainable_list.append(var.name)
            summaries.append(tf.summary.histogram(var.op.name, var))
            # shape is an array of tf.Dimension
            shape = var.get_shape()
            variable_parameters = 1
            for dim in shape:
                variable_parameters *= dim.value
            total_parameters += variable_parameters
        print('total_parameters : ', total_parameters)
        saver = tf.train.Saver(max_to_keep=20)

        summary_op = tf.summary.merge(summaries)

        pretrained_ckpt_path = FLAGS.pretrained_dir

        if tf.train.latest_checkpoint(FLAGS.ckpt_dir):
            print('use latest trained check point')
            init_fn = None
        else:
            if FLAGS.pretrained_dir == "":
                print('use no ckpt')
                init_fn = None
            else:
                print('use pretrained check point')
                variables_to_restore = slim.get_variables_to_restore(
                    include=trainable_list, exclude=['global_step'])
                #        for k in variables_to_restore:
                #            print(k.name)
                init_fn = slim.assign_from_checkpoint_fn(
                    FLAGS.pretrained_dir,
                    variables_to_restore,
                    ignore_missing_vars=True)

        sv = tf.train.Supervisor(logdir=FLAGS.ckpt_dir,
                                 summary_op=None,
                                 saver=saver,
                                 save_model_secs=0,
                                 init_fn=init_fn)
        config_ = tf.ConfigProto(allow_soft_placement=True)
        config_.gpu_options.allow_growth = True
        # config_.gpu_options.per_process_gpu_memory_fraction = 1.0

        # sess=sv.managed_session(config=config_)
        with sv.managed_session(config=config_) as sess:
            # Start the queue runners.
            sv.start_queue_runners(sess=sess)

            for step in xrange(FLAGS.max_steps):
                start_time = time.time()
                sess.run(train_op)

                sv_global_step, loss_value, loss_hm_, loss_wh_, loss_off_ = sess.run(
                    [sv.global_step, loss, loss_hm, loss_wh, loss_off])

                duration = time.time() - start_time

                assert not np.isnan(
                    loss_value), 'Model diverged with loss = NaN'

                if sv_global_step % 100 == 0:
                    num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
                    examples_per_sec = num_examples_per_step / duration
                    sec_per_batch = duration / FLAGS.num_gpus
                    epochs = sv_global_step * FLAGS.batch_size / FLAGS.num_train

                    format_str = (
                        'epochs %.2f, step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (epochs, step, loss_value,
                                        examples_per_sec, sec_per_batch))
                    print('loss_hm : ', loss_hm_, 'loss_wh : ', loss_wh_,
                          'loss_off : ', loss_off_)

                if sv_global_step % 10 == 0:
                    summary_str = sess.run(summary_op)
                    sv.summary_computed(sess, summary_str)

                if sv_global_step % (int(FLAGS.num_train / FLAGS.batch_size) *
                                     1) == 0 and sv_global_step != 0:

                    print('start validation')
                    entire_TF = []
                    entire_score = []
                    entire_numGT = []
                    for val_step in range(FLAGS.num_validation):

                        if val_step % 500 == 0:
                            print(val_step, ' / ', FLAGS.num_validation)

                        val_img, \
                        val_GT_boxes, \
                        val_GT_cls, \
                        val_loc_pred, \
                        val_cls_pred, \
                        val_score_pred, \
                        num_objects = sess.run([val_images,
                                                val_boxes,
                                                val_labels,
                                                boxes_pred,
                                                classes_pred,
                                                scores_pred,
                                                val_num_objects])

                        TF_array, TF_score, num_GT = validation.one_image_validation(
                            val_GT_boxes, val_GT_cls, val_loc_pred,
                            val_cls_pred, val_score_pred, num_objects)

                        if len(entire_TF) == 0:
                            entire_TF = TF_array
                            entire_score = TF_score
                            entire_numGT = num_GT
                        else:
                            for k_cls in range(FLAGS.num_classes - 1):
                                entire_TF[k_cls] = np.concatenate(
                                    [entire_TF[k_cls], TF_array[k_cls]],
                                    axis=0)
                                entire_score[k_cls] = np.concatenate(
                                    [entire_score[k_cls], TF_score[k_cls]],
                                    axis=0)
                                entire_numGT[k_cls] += num_GT[k_cls]

                    entire_AP_sum = validation.compute_AP(
                        entire_score, entire_TF, entire_numGT)

                    mAP = np.sum(np.array(entire_AP_sum)) / np.sum(
                        np.array(entire_AP_sum) != 0)

                    print('class AP : ', entire_AP_sum)
                    print(len(entire_AP_sum))
                    print('mAP : ', mAP)

                    checkpoint_path = os.path.join(FLAGS.ckpt_dir,
                                                   'model.ckpt')
                    saver.save(sess,
                               checkpoint_path,
                               global_step=sv.global_step)