コード例 #1
0
def main(_):
    with tf.Graph().as_default() as g:
        with tf.device("/cpu:0"):
            images_eval_train, _ = inputs(batch_size=FLAGS.finetune_batch_size,
                                          validation=FLAGS.validation,
                                          shuffle=True)
            images_eval_test, labels_eval_test = inputs(
                batch_size=FLAGS.eval_batch_size,
                train=False,
                validation=FLAGS.validation,
                shuffle=False,
                num_epochs=1)

        with tf.device(FLAGS.device):
            with tf.variable_scope("CNN") as scope:
                # Build graph of finetuning BN stats
                finetune_op = build_finetune_graph(images_eval_train)
                scope.reuse_variables()
                # Build eval graph
                n_correct, m = build_eval_graph(images_eval_test,
                                                labels_eval_test)

        init_op = tf.global_variables_initializer()
        saver = tf.train.Saver(tf.global_variables())
        sess = tf.Session()
        sess.run(init_op)
        ckpt = tf.train.get_checkpoint_state(FLAGS.log_dir)
        print("Checkpoints:", ckpt)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        tf.train.start_queue_runners(sess=sess, coord=coord)
        print("Finetuning...")
        for _ in range(FLAGS.finetune_iter):
            sess.run(finetune_op)

        sum_correct_examples = 0
        sum_m = 0
        try:
            while not coord.should_stop():
                _n_correct, _m = sess.run([n_correct, m])
                sum_correct_examples += _n_correct
                sum_m += _m
        except tf.errors.OutOfRangeError:
            print('Done evaluation -- epoch limit reached')
        finally:
            # When done, ask the threads to stop.
            coord.request_stop()
        print(
            "Test: num_test_examples:{}, num_correct_examples:{}, accuracy:{}".
            format(sum_m, sum_correct_examples,
                   sum_correct_examples / float(sum_m)))
コード例 #2
0
    def get_tfrecords(self):

        """
        xtrain: all records
        *_l   : partial records
        """
        if self.dataset =='SVHN':
            from svhn import inputs, unlabeled_inputs
            xtrain_l, ytrain_l = inputs(batch_size=self.batch_size, train=True,  validation=False, shuffle=True)
            xtrain             = unlabeled_inputs(batch_size=self.batch_size,    validation=False, shuffle=True)
            xtest , ytest      = inputs(batch_size=self.batch_size, train=False, validation=False, shuffle=True)
        else:
            sys.exit('[ERROR] not implemented yet')
        return (xtrain_l, ytrain_l), xtrain, (xtest , ytest)
コード例 #3
0
def evaluate():
    """Eval CIFAR-10 for a number of steps."""
    with tf.Graph().as_default() as g:
        # Get images and labels for CIFAR-10.
        eval_data = FLAGS.eval_data == 'test'
        images, labels = svhn.inputs(eval_data=eval_data)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = svhn.inference(images)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            svhn.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
コード例 #4
0
def evaluate():
  images, labels = svhn.inputs()

  sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 6])
    # Build a Graph that computes the logits predictions from the
    # inference model.  

  logits1,logits2,logits3,logits4,logits5,logits6 = svhn.net_1(images, 1.)
    
  logits = tf.stack([tf.argmax(tf.nn.softmax(logits1), 1), \
      tf.argmax(tf.nn.softmax(logits2), 1),\
      tf.argmax(tf.nn.softmax(logits3), 1),\
      tf.argmax(tf.nn.softmax(logits4), 1),\
      tf.argmax(tf.nn.softmax(logits5), 1),\
      tf.argmax(tf.nn.softmax(logits6), 1)], axis=1)

  # Restore the moving average version of the learned variables for eval.
  variable_averages = tf.train.ExponentialMovingAverage(
        svhn.MOVING_AVERAGE_DECAY)
  variables_to_restore = variable_averages.variables_to_restore()
  saver = tf.train.Saver(variables_to_restore)
  eval_once(logits, sparse_labels, saver)
コード例 #5
0
ファイル: train_svhn.py プロジェクト: xiaopingzeng/adanet
def main(_):
    numpy.random.seed(seed=FLAGS.seed)
    tf.set_random_seed(numpy.random.randint(1234))
    with tf.Graph().as_default() as g:
        with tf.device("/cpu:0"):
            images, labels = inputs(batch_size=FLAGS.batch_size,
                                    train=True,
                                    validation=FLAGS.validation,
                                    shuffle=True)
            ul_images = unlabeled_inputs(batch_size=FLAGS.ul_batch_size,
                                         validation=FLAGS.validation,
                                         shuffle=True)

            images_eval_train, labels_eval_train = inputs(
                batch_size=FLAGS.eval_batch_size,
                train=True,
                validation=FLAGS.validation,
                shuffle=True)
            ul_images_eval_train = unlabeled_inputs(
                batch_size=FLAGS.eval_batch_size,
                validation=FLAGS.validation,
                shuffle=True)

            images_eval_test, labels_eval_test = inputs(
                batch_size=FLAGS.eval_batch_size,
                train=False,
                validation=FLAGS.validation,
                shuffle=True)

        with tf.device(FLAGS.device):
            lr = tf.placeholder(tf.float32, shape=[], name="learning_rate")
            mom = tf.placeholder(tf.float32, shape=[], name="momentum")
            with tf.variable_scope("CNN") as scope:
                # Build training graph
                loss, train_op, global_step = build_training_graph(
                    images, labels, ul_images, lr, mom)
                scope.reuse_variables()
                # Build eval graph
                losses_eval_train = build_eval_graph(images_eval_train,
                                                     labels_eval_train,
                                                     ul_images_eval_train)
                losses_eval_test = build_eval_graph(images_eval_test,
                                                    labels_eval_test,
                                                    images_eval_test)

            init_op = tf.global_variables_initializer()

        if not FLAGS.log_dir:
            logdir = None
            writer_train = None
            writer_test = None
        else:
            logdir = FLAGS.log_dir
            writer_train = tf.summary.FileWriter(FLAGS.log_dir + "/train", g)
            writer_test = tf.summary.FileWriter(FLAGS.log_dir + "/test", g)

        saver = tf.train.Saver(tf.global_variables())
        sv = tf.train.Supervisor(is_chief=True,
                                 logdir=logdir,
                                 init_op=init_op,
                                 init_feed_dict={
                                     lr: FLAGS.learning_rate,
                                     mom: FLAGS.mom1
                                 },
                                 saver=saver,
                                 global_step=global_step,
                                 summary_op=None,
                                 summary_writer=None,
                                 save_model_secs=150,
                                 recovery_wait_secs=0)

        print("Training...")
        with sv.managed_session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            for ep in range(FLAGS.num_epochs):
                if sv.should_stop():
                    break

                if ep < FLAGS.epoch_decay_start:
                    feed_dict = {lr: FLAGS.learning_rate, mom: FLAGS.mom1}
                else:
                    decayed_lr = (
                        (FLAGS.num_epochs - ep) /
                        float(FLAGS.num_epochs -
                              FLAGS.epoch_decay_start)) * FLAGS.learning_rate
                    feed_dict = {lr: decayed_lr, mom: FLAGS.mom2}

                sum_loss = 0
                start = time.time()
                for i in range(FLAGS.num_iter_per_epoch):
                    _, batch_loss, _ = sess.run([train_op, loss, global_step],
                                                feed_dict=feed_dict)
                    sum_loss += batch_loss
                end = time.time()
                print("Epoch:", ep, "CE_loss_train:",
                      sum_loss / FLAGS.num_iter_per_epoch, "elapsed_time:",
                      end - start)

                if (ep + 1
                    ) % FLAGS.eval_freq == 0 or ep + 1 == FLAGS.num_epochs:
                    # Eval on training data
                    act_values_dict = {}
                    for key, _ in losses_eval_train.items():
                        act_values_dict[key] = 0
                    n_iter_per_epoch = NUM_EVAL_EXAMPLES // FLAGS.eval_batch_size
                    for i in range(n_iter_per_epoch):
                        values = list(losses_eval_train.values())
                        act_values = sess.run(values)
                        for key, value in zip(list(act_values_dict.keys()),
                                              act_values):
                            act_values_dict[key] += value
                    summary = tf.Summary()
                    current_global_step = sess.run(global_step)
                    for key, value in act_values_dict.items():
                        print("train-" + key, value / n_iter_per_epoch)
                        summary.value.add(tag=key,
                                          simple_value=value /
                                          n_iter_per_epoch)
                    if writer_train is not None:
                        writer_train.add_summary(summary, current_global_step)

                    # Eval on test data
                    act_values_dict = {}
                    for key, _ in losses_eval_test.items():
                        act_values_dict[key] = 0
                    n_iter_per_epoch = NUM_EVAL_EXAMPLES // FLAGS.eval_batch_size
                    for i in range(n_iter_per_epoch):
                        values = list(losses_eval_test.values())
                        act_values = sess.run(values)
                        for key, value in zip(list(act_values_dict.keys()),
                                              act_values):
                            act_values_dict[key] += value
                    summary = tf.Summary()
                    current_global_step = sess.run(global_step)
                    for key, value in act_values_dict.items():
                        print("test-" + key, value / n_iter_per_epoch)
                        summary.value.add(tag=key,
                                          simple_value=value /
                                          n_iter_per_epoch)
                    if writer_test is not None:
                        writer_test.add_summary(summary, current_global_step)

            saver.save(sess, sv.save_path, global_step=global_step)
        sv.stop()