コード例 #1
0
def main():
    fraction = 0.4
    min_after_dequeue = int(mnist.NUM_EXAMPLES_PER_EPOCH * fraction)
    images, labels = mnist_inputs(min_after_dequeue)
    validation_images, validation_labels = mnist_inputs(2000,
                                                        train=False,
                                                        num_epochs=None)
    with tf.variable_scope("inference") as scope:
        logits = mnist.inference(images)
        scope.reuse_variables()
        validation_logits = mnist.inference(validation_images)
    loss = mnist.loss(logits, labels)
    tf.scalar_summary("cross_entropy", loss)
    accuracy = mnist.accuracy(validation_logits, validation_labels)
    tf.scalar_summary("validation_accuracy", accuracy)
    train_op = mnist.train(loss)
    sess = tf.Session()
    sess.run(tf.initialize_local_variables())
    sess.run(tf.initialize_all_variables())
    tf.train.start_queue_runners(sess=sess)
    merge = tf.merge_all_summaries()
    writer = tf.train.SummaryWriter(
        "/home/windows98/PycharmProjects/mnist/Summary/")
    for index in range(NUM_STEPS):
        _, loss_value, summary = sess.run([train_op, loss, merge])
        writer.add_summary(summary, index + 1)
        # accuracy_score, summary = sess.run([accuracy, summary])
        # writer.add_summary(summary, index+1)
        print("step:" + str(index + 1) + " loss: " + str(loss_value))
コード例 #2
0
def run_training():
    with tf.Graph().as_default():
        images, labels = inputs(train=True,
                                batch_size=FLAGS.batch_size,
                                num_epochs=FLAGS.num_epochs)
        logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
        loss = mnist.loss(logits, labels)
        train_op = mnist.training(loss, FLAGS.learning_rate)
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess = tf.Session()
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        try:
            step = 0
            while not coord.should_stop():
                start_time = time.time()
                _, loss_value = sess.run([train_op, loss])
                duration = time.time() - start_time
                if step % 100 == 0:
                    print('Step %d: loss = %.2f (%.3f sec)' %
                          (step, loss_value, duration))
                    step += 1
        except tf.errors.OutOfRangeError:
            print('Done training for %d epochs, %d steps.' %
                  (FLAGS.num_epochs, step))
        finally:
            coord.request_stop()
            coord.join(threads)
            sess.close()
コード例 #3
0
ファイル: mnist_eval.py プロジェクト: oranshayer/LRnets
def evaluate():
  tf.gfile.DeleteRecursively(FLAGS.eval_dir)
  tf.gfile.MakeDirs(FLAGS.eval_dir)
  with tf.Graph().as_default() as g:
    mnist_dataset = input_data.read_data_sets(FLAGS.data_dir)
    images = tf.placeholder(tf.float32, [FLAGS.batch_size, 784])
    labels = tf.placeholder(tf.int64, [FLAGS.batch_size])

    # Build a Graph that computes the logits predictions from the
    # inference model.
    W1 = tf.placeholder(tf.float32, [5, 5, 1, 32])
    W2 = tf.placeholder(tf.float32, [5, 5, 32, 64])
    W_fc = tf.placeholder(tf.float32, [3136, 512])
    logits = mnist.inference(images, W1, W2, W_fc)

    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    # Restore the moving average version of the learned variables for eval.
#    variable_averages = tf.train.ExponentialMovingAverage(
#        mnist.MOVING_AVERAGE_DECAY)
#    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver()

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

    while True:
      precision = eval_once(saver, summary_writer, top_k_op, summary_op, W1, W2, W_fc, mnist_dataset, images, labels)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
  return precision
コード例 #4
0
def main():
    images, labels = inputs()
    reshaped_images = tf.reshape(images, [
        mnist.BATCH_SIZE, mnist.IMAGE_HEIGHT, mnist.IMAGE_WIDTH,
        mnist.IMAGE_DEPTH
    ])
    logits = mnist.inference(reshaped_images)
    loss = mnist.loss(logits, labels)
    accuracy = mnist.accuracy(logits, labels)
    train_op = mnist.train(loss)
    init = tf.initialize_all_variables()
    with tf.Session() as sess:
        sess.run(init)
        for index in range(NUM_STEPS):
            batch_x, batch_y = mnist_data.train.next_batch(mnist.BATCH_SIZE)
            _, loss_value = sess.run([train_op, loss],
                                     feed_dict={
                                         images: batch_x,
                                         labels: batch_y
                                     })
            print("step:" + str(index + 1) + " loss: " + str(loss_value))
            if (index + 1) % 10 == 0:
                validation_x, validation_y = mnist_data.validation.next_batch(
                    mnist.BATCH_SIZE)
                accuracy_score = sess.run(accuracy,
                                          feed_dict={
                                              images: validation_x,
                                              labels: validation_y
                                          })
                print("accuracy : " + str(accuracy_score))
コード例 #5
0
ファイル: fully_connected_feed.py プロジェクト: rdefeo/tensor
def run_training():
    """Train MNIST for a number of steps."""
    data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)

    with tf.Graph().as_default():
        images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
        logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2)
        loss = mnist.calculate_loss(logits, labels_placeholder)
        train_op = mnist.training(loss, FLAGS.learning_rate)
        eval_correct = mnist.evaluation(logits, labels_placeholder)
        summary_op = tf.merge_all_summaries()  # Collect all summaries generated by the default graph
        saver = tf.train.Saver()  # Create a saver for writing training checkpoints.

        sess = tf.Session()
        init = tf.initialize_all_variables()
        sess.run(init)
        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                                graph_def=sess.graph_def)
        # Training loop
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            # Fill a feed dictionary with the actual set of images and labels
            # for this particular training step.
            feed_dict = fill_feed_dict(data_sets.train,
                                       images_placeholder,
                                       labels_placeholder)
            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.
            _, loss_value = sess.run([train_op, loss],
                                     feed_dict=feed_dict)
            duration = time.time() - start_time
            # Write the summaries and print an overview fairly often.
            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
                # Update the events file.
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess, FLAGS.train_dir, global_step=step)
                print('Training Data Evaluation:')
                do_eval(sess,
                        eval_correct,
                        images_placeholder,
                        labels_placeholder,
                        data_sets.train)

                print('Validation Data Evaluation:')
                do_eval(sess,
                        eval_correct,
                        images_placeholder,
                        labels_placeholder,
                        data_sets.validation)
                print('Test Data Evaluation:')
                do_eval(sess,
                        eval_correct,
                        images_placeholder,
                        labels_placeholder,
                        data_sets.test)
コード例 #6
0
def train():
    filenames = tf.placeholder(tf.string, [None])
    dataset = tf.data.TFRecordDataset(filenames)
    dataset = dataset.map(mnist.parse_data)
    dataset = dataset.shuffle(buffer_size=50000)
    dataset = dataset.batch(FLAGS.batch_size)
    dataset = dataset.repeat()

    iterator = dataset.make_initializable_iterator()

    global_step = tf.train.get_or_create_global_step()
    images, labels = iterator.get_next()
    logits, pred = mnist.inference(images, training=True)
    loss = mnist.loss(logits, labels)
    train_op = mnist.train(loss, global_step)

    with tf.train.MonitoredTrainingSession(
        checkpoint_dir=FLAGS.train_dir,
        hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_step), tf.train.NanTensorHook(loss)],
        save_checkpoint_steps=100
    ) as mon_sess:
        mon_sess.run(iterator.initializer, feed_dict={filenames: ['train_img.tfrecords']})
        while not mon_sess.should_stop():
            _, train_loss, train_step, label = mon_sess.run([train_op, loss, global_step, labels])
            if train_step % 100 == 0:
                print('step: {}, loss: {}'.format(train_step, train_loss))
コード例 #7
0
def run_training():
  data_sets = data_mnist.read_data_sets()
  with tf.Graph().as_default():
    images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
    logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2)
    loss = mnist.loss(logits, labels_placeholder)
    train_op = mnist.training(loss, FLAGS.learning_rate)
    eval_correct = mnist.evaluation(logits, labels_placeholder)

    summary_op = tf.merge_all_summaries()
    saver = tf.train.Saver()
    
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)

    # Start the training loop.
    for step in xrange(FLAGS.max_steps):
      start_time = time.time()
      feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder)
      _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
      duration = time.time() - start_time

      if step % 100 == 0:
        print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
        summary_str = sess.run(summary_op, feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)
        summary_writer.flush()

      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
        checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint')
        saver.save(sess, checkpoint_file, global_step=step)
        do_eval(sess,eval_correct, images_placeholder, labels_placeholder, data_sets.train)
        do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation)
        do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
コード例 #8
0
def evaluate(dataset):
    """Evaluate model on Dataset for a number of steps."""
    with tf.Graph().as_default():
        # Graph creation
        batch_size = dataset.num_examples
        images_placeholder, labels_placeholder = mnist.placeholder_inputs(
            batch_size)
        logits = mnist.inference(images_placeholder, train=False)
        validation_accuracy = tf.reduce_sum(
            mnist.evaluation(logits,
                             labels_placeholder)) / tf.constant(batch_size)
        validation_loss = mnist.loss(logits, labels_placeholder)

        # Reference to sess and saver
        sess = tf.Session()
        saver = tf.train.Saver()

        # Create summary writer
        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                               graph_def=graph_def)
        step = -1
        while True:
            step = do_eval(saver,
                           summary_writer,
                           validation_accuracy,
                           validation_loss,
                           images_placeholder,
                           labels_placeholder,
                           dataset,
                           prev_global_step=step)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
コード例 #9
0
def train():
    images, labels = mnist.inputs(['train_img.tfrecords'], mnist.TRAIN_EXAMPLES_NUM,
                                  FLAGS.batch_size, shuffle=True)
    global_step = tf.train.get_or_create_global_step()

    logits, pred = mnist.inference(images, training=True)
    loss = mnist.loss(logits, labels)
    train_op = mnist.train(loss, global_step)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        init_op = tf.group(
            tf.local_variables_initializer(),
            tf.global_variables_initializer())
        sess.run(init_op)
        ckpt = os.path.join(FLAGS.train_dir, 'model.ckpt')

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess, coord=coord)

        for i in range(1, FLAGS.max_step + 1):
            _, train_loss, predict, label = sess.run([train_op, loss, pred, labels])
            # print(predict, '\n', label)
            if i % 100 == 0:
                print('step: {}, loss: {}'.format(i, train_loss))
                # print(predict, '\n', label)
                saver.save(sess, ckpt, global_step=i)

        coord.request_stop()
        coord.join(threads)
コード例 #10
0
def run_test():
    """Train MNIST for a number of steps."""
    # Get the sets of images and labels for training, validation, and
    # test on MNIST.
    train, validation, test = datasets_mnist.read_data_sets(
        FLAGS.input_data_dir, FLAGS.fake_data)
    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(
            FLAGS.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = mnist.inference(images_placeholder, FLAGS.hidden1,
                                 FLAGS.hidden2, phase_pl)

        eval_correct = mnist.evaluation(logits, labels_placeholder)
        # Add the variable initializer Op.
        all_variable = tf.global_variables()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        with tf.Session() as sess:

            saver.restore(sess, "log/model.ckpt-1999")
            for variable in all_variable:
                if "moving" in variable.name:
                    print(variable.name, variable.eval())
            do_eval(sess, eval_correct, images_placeholder, labels_placeholder,
                    phase_pl, test)
コード例 #11
0
def evalute(payload=None):
    if payload == None:
        return error('Payload was not found.')

    image = payload.get('data', None)
    step = payload.get('step', 1000)

    if image == None:
        return error("The 'data' parameter is require.")

    with tf.Graph().as_default():
        x = tf.placeholder(tf.float32, shape=[None, 784])
        keep_prob = tf.placeholder(tf.float32)
        inference = mnist.inference(x, keep_prob)
        saver = tf.train.Saver()
        session = tf.Session()
        session.run(tf.global_variables_initializer())
        ckpt = os.path.join(ckpt_dir, 'ckpt-%d' % step)
        if os.path.isfile(ckpt):
            saver.restore(session, ckpt)
        else:
            return error('Checkpoint file for %d step is not found.' % step)
        results = session.run(inference,
                              feed_dict={
                                  x: [image],
                                  keep_prob: 1.0
                              })[0]
        result = np.argmax(results)
        return {'inference': result, 'results': results.tolist()}
コード例 #12
0
        def _impl(payload):
            def error(m):
                return {'error': m}

            ckpt_dir = os.path.join(pwd, 'var', self.uuid.hex)
            if payload == None:
                return error('Payload was not found.')

            image = payload.get('image', None)
            step = payload.get('step', None)

            if image == None:
                return error("The 'image' parameter is require.")
            if step == None:
                return error("The 'step' parameter is require.")

            with tf.Graph().as_default():
                x = tf.placeholder(tf.float32, shape=[None, 784])
                keep_prob = tf.placeholder(tf.float32)
                inference = mnist.inference(x, keep_prob)
                saver = tf.train.Saver()
                session = tf.Session()
                session.run(tf.global_variables_initializer())
                ckpt = os.path.join(ckpt_dir, 'ckpt-%d' % step)
                saver.restore(session, ckpt)
                #return error('Checkpoint file for %d step is not found.' % step)
                results = session.run(inference,
                                      feed_dict={
                                          x: [image],
                                          keep_prob: 1.0
                                      })[0]
                result = np.argmax(results)
                return {'inference': result, 'results': results.tolist()}
コード例 #13
0
ファイル: mnist_eval.py プロジェクト: oranshayer/LRnets
def evaluate():
    """Eval CIFAR-10 for a number of steps."""
    with tf.Graph().as_default() as g:
        # Get images and labels for CIFAR-10.
        mnist_dataset = input_data.read_data_sets(FLAGS.data_dir)
        images = tf.placeholder(tf.float32, [FLAGS.batch_size, 784])
        labels = tf.placeholder(tf.int64, [FLAGS.batch_size])
        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = mnist.inference(images)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            mnist.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op,
                      mnist_dataset, images, labels)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
コード例 #14
0
def train():
    """Train CIFAR-10 for a number of steps."""
    with tf.Graph().as_default():
        global_step = tf.train.get_or_create_global_step()

        # Get images and labels for CIFAR-10.
        # Force input pipeline to CPU:0 to avoid operations sometimes ending up on
        # GPU and resulting in a slow down.
        with tf.device('/cpu:0'):
            images, labels = mnist.distorted_inputs()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = mnist.inference(images)

        # Calculate loss.
        loss = mnist.loss(logits, labels)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        train_op = mnist.train(loss, global_step)

        class _LoggerHook(tf.train.SessionRunHook):
            """Logs loss and runtime."""
            def begin(self):
                self._step = -1
                self._start_time = time.time()

            def before_run(self, run_context):
                self._step += 1
                return tf.train.SessionRunArgs(loss)  # Asks for loss value.

            def after_run(self, run_context, run_values):
                if self._step % FLAGS.log_frequency == 0:
                    current_time = time.time()
                    duration = current_time - self._start_time
                    self._start_time = current_time

                    loss_value = run_values.results
                    examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
                    sec_per_batch = float(duration / FLAGS.log_frequency)

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), self._step, loss_value,
                                        examples_per_sec, sec_per_batch))

        with tf.train.MonitoredTrainingSession(
                checkpoint_dir=FLAGS.train_dir,
                hooks=[
                    tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
                    tf.train.NanTensorHook(loss),
                    _LoggerHook()
                ],
                config=tf.ConfigProto(log_device_placement=FLAGS.
                                      log_device_placement)) as mon_sess:
            while not mon_sess.should_stop():
                mon_sess.run(train_op)
コード例 #15
0
def run_training():
    data_sets = input_data.read_data_sets(FLAGS.input_data_dir,
                                          FLAGS.fake_data)

    with tf.Graph().as_default():
        images_placeholder, labels_placeholder = placeholder_inputs(
            FLAGS.batch_size)
        logits = mnist.inference(images_placeholder, FLAGS.hidden1,
                                 FLAGS.hidden2)
        loss = mnist.loss(logits, labels_placeholder)
        train_op = mnist.training(loss, FLAGS.learning_rate)
        eval_correct = mnist.evaluation(logits, labels_placeholder)

        # Build the summary tensor based on the TF collection of Summaries
        summary = tf.summary.merge_all()

        init = tf.global_variables_initializer()

        # Create a saver for writing training checkpoints
        saver = tf.train.Saver()

        sess = tf.Session()

        # Instantiate a SummaryWriter to output summries and the Graph
        summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

        sess.run(init)

        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            feed_dict = fill_feed_dict(data_sets.train, images_placeholder,
                                       labels_placeholder)
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
            duration = time.time() - start_time

            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' %
                      (step, loss_value, duration))
                # Update the events file
                summary_str = sess.run(summary, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file, global_step=step)
                # Evaluate against the training set.
                print('Training Data Eval:')
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.train)
                # Evaluate against the validation set.
                print('Validation Data Eval:')
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.validation)
                # Evaluate aginst the test set.
                print('Test Data Eval:')
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.test)
コード例 #16
0
def run_training():
    data_sets = input_data.read_data_sets(fake_data)

    with tf.Graph().as_default():
        image_placeholder, label_placeholder = placeholder_inputs(batch_size)

        logits = mnist.inference(image_placeholder, hidden1_unit, hidden2_unit)
        #计算损失
        loss = mnist.loss(logits, label_placeholder)
        #训练
        train_op = mnist.training(loss, 0.01)
        #计算正确分类数
        eval_correct = mnist.evaluation(logits, label_placeholder)
        #合并默认图中的所有summaries操作
        summary_op = tf.merge_all_summaries()
        #用于保存网络中的变量
        saver = tf.train.Saver()

        sess = tf.Session()
        #初始化所有变量
        sess.run(tf.initialize_all_variables())

        summary_writter = tf.train.SummaryWriter(train_dir, sess.graph)

        for step in xrange(max_steps):
            start_time = time.time()
            #获取feed_dict字典
            feed_dict = fill_placeholder(data_sets.train, image_placeholder,
                                         label_placeholder)

            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)

            duration = time.time() - start_time

            if step % 100 == 0:
                print(
                    'Step %d:loss=%.2f (%.3f sec) %(step,loss_value,duration)')
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writter.add_summary(summary_str, step)
                summary_writter.flush()

            if step % 1000 == 0:
                saver.save(sess, train_dir, global_step=step)

                print('Training Data Eval:')
                do_eval(sess, eval_correct, image_placeholder,
                        label_placeholder, data_sets.train)

                print("Validation Data Eval")
                do_eval(sess, eval_correct, image_placeholder,
                        label_placeholder, data_sets.validation)

                print("Test Data Eval:")
                do_eval(sess, eval_correct, image_placeholder,
                        label_placeholder, data_sets.test)
コード例 #17
0
def run_training():
    """Train MNIST for a number of steps."""
    data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)

    with tf.Graph().as_default():
        images_placeholder, labels_placeholder = placeholder_inputs(
            FLAGS.batch_size)
        logits = mnist.inference(images_placeholder, FLAGS.hidden1,
                                 FLAGS.hidden2)
        loss = mnist.calculate_loss(logits, labels_placeholder)
        train_op = mnist.training(loss, FLAGS.learning_rate)
        eval_correct = mnist.evaluation(logits, labels_placeholder)
        summary_op = tf.merge_all_summaries(
        )  # Collect all summaries generated by the default graph
        saver = tf.train.Saver(
        )  # Create a saver for writing training checkpoints.

        sess = tf.Session()
        init = tf.initialize_all_variables()
        sess.run(init)
        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                                graph_def=sess.graph_def)
        # Training loop
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            # Fill a feed dictionary with the actual set of images and labels
            # for this particular training step.
            feed_dict = fill_feed_dict(data_sets.train, images_placeholder,
                                       labels_placeholder)
            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
            duration = time.time() - start_time
            # Write the summaries and print an overview fairly often.
            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' %
                      (step, loss_value, duration))
                # Update the events file.
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess, FLAGS.train_dir, global_step=step)
                print('Training Data Evaluation:')
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.train)

                print('Validation Data Evaluation:')
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.validation)
                print('Test Data Evaluation:')
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.test)
コード例 #18
0
def main():
    with tf.Graph().as_default(), tf.device("/cpu:0"):
        tower_gradients = []
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=LEARNING_RATE)
        for gpu_index in range(NUM_GPUS):
            gpu_name = "/gpu:" + str(gpu_index)
            with tf.device(gpu_name):
                with tf.name_scope("%s_%d" % (TOWER_NAME, gpu_index)) as scope:
                    loss = tower_loss(scope)
                    tf.get_variable_scope().reuse_variables()
                    gradient = optimizer.compute_gradients(loss)
                    tower_gradients.append(gradient)
        validation_x, validation_y = mnist_inputs(MIN_AFTER_DEQUEUE,
                                                  train=False,
                                                  num_epochs=None)
        validation_logits = mnist.inference(validation_x)
        validation_accuracy = mnist.accuracy(validation_logits, validation_y)
        gradient_mean = averaged_gradients(tower_gradients)
        train_step = optimizer.apply_gradients(gradient_mean)
        init = tf.group(tf.initialize_all_variables(),
                        tf.initialize_local_variables())
        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        sess.run(init)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        start_time = datetime.now()
        try:
            index = 1
            while not coord.should_stop():
                _, loss_value = sess.run([train_step, loss])
                print("step: " + str(index) + " loss:" + str(loss_value))
                if index % 10 == 0:
                    accuracy = sess.run(validation_accuracy)
                    print("validation accuracy: " + str(accuracy))
                index += 1
        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
            end_time = datetime.now()
            print("Time Consumption: " + str(end_time - start_time))
        except KeyboardInterrupt:
            print("keyboard interrupt detected, stop running")
            del sess

        finally:
            # When done, ask the threads to stop.
            coord.request_stop()

        # Wait for threads to finish.
        coord.join(threads)

        sess.close()
        del sess
コード例 #19
0
def train():

    with tf.Graph().as_default():
        global_step = tf.train.get_or_create_global_step()

        with tf.device('/cpu:0'):
            images, labels = mnist.distorted_inputs()
        print(global_step)

        #with tf.device('/gpu:0'):
        logits = mnist.inference(images)
        #with tf.device('/gpu:0'):
        loss = mnist.loss(logits, labels)
        #with tf.device('/gpu:0'):
        train_op = mnist.train(loss, global_step)

        class _LoggerHook(tf.train.SessionRunHook):
            def begin(self):
                self._step = -1
                self._start_time = time.time()

            def before_run(self, run_context):
                self._step += 1
                return tf.train.SessionRunArgs(loss)

            def after_run(self, run_context, run_values):
                if self._step % FLAGS.log_frequency == 0:
                    current_time = time.time()
                    duration = current_time - self._start_time
                    self._start_time = current_time

                    loss_value = run_values.results
                    examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
                    sec_per_batch = float(duration / FLAGS.log_frequency)

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), self._step, loss_value,
                                        examples_per_sec, sec_per_batch))

        with tf.train.MonitoredTrainingSession(
                checkpoint_dir=FLAGS.train_dir,
                hooks=[
                    tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
                    tf.train.NanTensorHook(loss),
                    _LoggerHook()
                ],
                config=tf.ConfigProto(log_device_placement=FLAGS.
                                      log_device_placement)) as mon_sess:
            while not mon_sess.should_stop():
                mon_sess.run(train_op)
コード例 #20
0
def evaluation():
    images, labels = mnist.inputs(['./validation_img.tfrecords'], mnist.VALIDATION_EXAMPLES_NUM,
                                  batch_size=FLAGS.batch_size, shuffle=False)
    logits, pred = mnist.inference(images, training=False)
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    saver = tf.train.Saver()

    while True:
        eval_once(saver, top_k_op)
        if FLAGS.run_once:
            break
        time.sleep(FLAGS.eval_interval_secs)
コード例 #21
0
def run_training():
    data_sets = input_data.read_data_sets('MNIST_data', FLAGS.fake_data)

    with tf.Graph().as_default():
        image_placeholder, label_placeholder = placeholder_inputs(FLAGS.batch_size)

        logits = mnist.inference(image_placeholder, FLAGS.hidden1_unit, FLAGS.hidden2_unit)

        loss = mnist.loss(logits, label_placeholder)

        train_op = mnist.training(loss, FLAGS.learning_rate)

        eval_correct = mnist.evaluation(logits, label_placeholder)

        summary_op = tf.summary.merge_all()

        saver = tf.train.Saver()

        sess = tf.Session()

        sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)

        for step in range(FLAGS.max_steps):
            start_time = time.time()

            feed_dict = fill_placeholder(data_sets.train, image_placeholder, label_placeholder)

            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)

            duration = time.time() - start_time

            if step % 100 == 0 :
                print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))

                summary_str = sess.run(summary_op, feed_dict = feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

            if step % 1000 == 0:
                saver.save(sess, FLAGS.train_dir, global_step=step)

                print('Training Data Eval: ')
                do_eval(sess, eval_correct, image_placeholder, label_placeholder, data_sets.train)

                print('Validation Data Eval: ')
                do_eval(sess, eval_correct, image_placeholder, label_placeholder, data_sets.validation)

                print('Test Data Eval:')
                do_eval(sess, eval_correct, image_placeholder, label_placeholder, data_sets.test)
コード例 #22
0
def run_training():
    data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)
    with tf.Graph().as_default():
        images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
        logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2)  # 模型的部分

        loss = mnist.loss(logits, labels_placeholder)

        train_op = mnist.training(loss, FLAGS.learning_rate)

        eval_correct = mnist.evaluation(logits, labels_placeholder)

        summary = tf.summary.merge_all()

        init = tf.global_variables_initializer()

        saver = tf.train.Saver()

        sess = tf.Session()

        summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

        sess.run(init)

        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder)
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)

            duration = time.time() - start_time

            if step % 100 == 0:
                print 'Setp %d:loss =%.2f (%.3f sec)' % (step, loss_value, duration)
                summary_str = sess.run(summary, feed_dict=feed_dict)

                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file, global_step=step)

                print('Training Data Eval:')
                do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train)

                print "Validation Data Eval:"
                do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation)

                print "Test Data Eval:"
                do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
コード例 #23
0
def train_and_validation():
    training_dataset = tf.data.TFRecordDataset(['./train_img.tfrecords'])
    validation_dataset = tf.data.TFRecordDataset(['./validation_img.tfrecords'])
    test_dataset = tf.data.TFRecordDataset(['./test_img.tfrecords'])

    training_dataset = training_dataset.map(mnist.parse_data)
    training_dataset = training_dataset.shuffle(50000).batch(FLAGS.batch_size).repeat()
    validation_dataset = validation_dataset.map(mnist.parse_data).batch(FLAGS.batch_size)
    test_dataset = test_dataset.map(mnist.parse_data).batch(FLAGS.batch_size)

    iterator = tf.data.Iterator.from_structure(output_types=training_dataset.output_types,
                                               output_shapes=training_dataset.output_shapes)

    training_init_op = iterator.make_initializer(training_dataset)
    validation_init_op = iterator.make_initializer(validation_dataset)
    test_init_op = iterator.make_initializer(test_dataset)
    images, labels = iterator.get_next()

    training = tf.placeholder(dtype=tf.bool)
    logits, pred = mnist.inference(images, training=training)
    loss = mnist.loss(logits, labels)
    top_k_op = tf.nn.in_top_k(logits, labels, 1)
    global_step = tf.train.get_or_create_global_step()
    train_op = mnist.train(loss, global_step)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(training_init_op)
        print('begin to train!')
        ckpt = os.path.join(FLAGS.train_dir, 'model.ckpt')
        train_step = 0
        while train_step < FLAGS.max_step:
            _, train_loss, step, label = sess.run([train_op, loss, global_step, labels], feed_dict={training: True})
            train_step += 1
            if train_step % 100 == 0:
                saver.save(sess, ckpt, train_step)
                if train_step % 1000 == 0:
                    precision = evaluate(sess, top_k_op, training, mnist.TRAIN_EXAMPLES_NUM)
                    print('step: {}, loss: {}, training precision: {}'.format(train_step, train_loss, precision))
                sess.run(validation_init_op)
                precision = evaluate(sess, top_k_op, training, mnist.VALIDATION_EXAMPLES_NUM)
                print('step: {}, loss: {}, validation precision: {}'.format(train_step, train_loss, precision))
                sess.run(training_init_op)
        sess.run(test_init_op)
        precision = evaluate(sess, top_k_op, training, mnist.TEST_EXAMPLES_NUM)
        print('finally test precision: {}'.format(precision))
コード例 #24
0
ファイル: feed.py プロジェクト: zengxinlu/DL-Homework
def run_training():
    data_sets = input_data.read_data_sets(FLAGS.input_data_dir,
                                          FLAGS.fake_data)
    with tf.Graph().as_default():
        # 初始化输入占位符
        images_placeholder, labels_placeholder = placeholder_inputs(
            FLAGS.batch_size)
        # 构建神经网络
        logits = mnist.inference(images_placeholder, FLAGS.hidden1,
                                 FLAGS.hidden2)
        # 添加损失函数
        loss = mnist.loss(logits, labels_placeholder)
        # 训练
        train_op = mnist.training(loss, FLAGS.learning_rate)
        # 正确率计算
        eval_correct = mnist.evaluation(logits, labels_placeholder)

        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)

        start_time = time.time()

        for step in range(FLAGS.max_steps):

            feed_dict = fill_feed_dict(data_sets.train, images_placeholder,
                                       labels_placeholder)
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
            if step % 100 == 0:
                duration = time.time() - start_time
                print('Step %d: loss = %.2f (%.3f sec)' %
                      (step, loss_value, duration))
                start_time = time.time()

            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                print('Training Data Eval:')
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.train)
                print('Validation Data Eval:')
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.validation)
                print('Test Data Eval:')
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.test)

    print(FLAGS)
コード例 #25
0
def run_training():
    with tf.Graph().as_default():
        # 输入images和labels
        images, labels = inputs(train=True,
                                batch_size=FLAGS.batch_size,
                                num_epochs=FLAGS.num_epochs)
        # 构建一个从推理模型来预测数据的图
        logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
        loss = mnist.loss(logits, labels)  # 定义损失函数

        # Add to the Graph operations that train the model.
        train_op = mnist.training(loss, FLAGS.learning_rate)

        # 初始化参数,string_input_producer内部创建了一个epoch计数变量,
        # 归入tf.GraphKeys.LOCAL_VARIABLES集合中,必须单独用initialize_local_variable()初始化
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        sess = tf.Session()
        sess.run(init_op)

        # Start input enqueue threads
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            step = 0
            while not coord.should_stop():  # 进入死循环
                start_time = time.time()
                _, loss_value = sess.run([train_op, loss])
            duration = time.time() - start_time

            # 每100次训练输出一次结果
            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' %
                      (step, loss_value, duration))
            step += 1
        except tf.errors.OutOfRangeError:
            print('Done training for %d epochs, %d steps.' %
                  (FLAGS.num_epochs, step))
        finally:
            coord.request_stop()  # 通知其他线程关闭

        coord.join(threads)
        sess.close()
コード例 #26
0
def pred(filename, train_dir):
    img = cv2.imread(filename, flags=cv2.IMREAD_GRAYSCALE)
    img = tf.cast(img, tf.float32)
    img = tf.reshape(img, [-1, 28, 28, 1])

    logits, predict = mnist.inference(img, training=False)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        ckpt = tf.train.get_checkpoint_state(train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('no checkpoint file')
            return
        pre = sess.run(predict)
        print('model:{}, file:{}, label: {} ({:.2f}%)'.format(
            ckpt.model_checkpoint_path, filename, np.argmax(pre[0]),
            np.max(pre[0]) * 100))
コード例 #27
0
def test(data_set):
    with tf.Graph().as_default():
        images_placeholder, labels_placeholder = placeholder_inputs(
            FLAGS.batch_size)
        logits = mnist.inference(images_placeholder, FLAGS.hidden1,
                                 FLAGS.hidden2)
        saver = tf.train.Saver()

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            saver.restore(
                sess,
                "./tmp/tensorflow/mnist/logs/fully_connected_feed/model.ckpt")
            prediction = tf.argmax(logits, 1)
            feed_dict = fill_feed_dict(data_set, images_placeholder,
                                       labels_placeholder)
            predint = prediction.eval(feed_dict=feed_dict, session=sess)

            print('识别结果:', predint[0])
コード例 #28
0
def run_training():
    data_sets = input_data.read_data_sets(data_dir)
    with tf.Graph().as_default():
        input_holder, label_holder = generate_placeholder(50, input_size)
        logits = mnist.inference(input_holder, input_size, 128, 32,
                                 label_classes)
        loss = mnist.loss(logits, label_holder)
        train_op = mnist.training(loss, 0.01)
        eval_correct = mnist.evaluation(logits, label_holder)
        summary = tf.summary.merge_all()

        init = tf.global_variables_initializer()
        saver = tf.train.Saver()
        sess = tf.Session()
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)

        sess.run(init)
        for step in range(2000):
            start_time = time.time()
            feed_dict = fill_feed_dict(data_sets.train, input_holder,
                                       label_holder, 50)
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
            duration = time.time() - start_time

            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' %
                      (step, loss_value, duration))
                summary_str = sess.run(summary, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

            if (step + 1) % 1000 == 0 or (step + 1) == 2000:
                checkpoint_file = os.path.join(log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file, global_step=step)
                print('Training Data Eval:')
                do_eval(sess, eval_correct, input_holder, label_holder,
                        data_sets.train, 50)
                print('Validation Data Eval:')
                do_eval(sess, eval_correct, input_holder, label_holder,
                        data_sets.validation, 50)
                print('Test Data Eval:')
                do_eval(sess, eval_correct, input_holder, label_holder,
                        data_sets.test, 50)
コード例 #29
0
def run_training():
    data_sets = data_mnist.read_data_sets()
    with tf.Graph().as_default():
        images_placeholder, labels_placeholder = placeholder_inputs(
            FLAGS.batch_size)
        logits = mnist.inference(images_placeholder, FLAGS.hidden1,
                                 FLAGS.hidden2)
        loss = mnist.loss(logits, labels_placeholder)
        train_op = mnist.training(loss, FLAGS.learning_rate)
        eval_correct = mnist.evaluation(logits, labels_placeholder)

        summary_op = tf.merge_all_summaries()
        saver = tf.train.Saver()

        sess = tf.Session()
        sess.run(tf.initialize_all_variables())
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)

        # Start the training loop.
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            feed_dict = fill_feed_dict(data_sets.train, images_placeholder,
                                       labels_placeholder)
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
            duration = time.time() - start_time

            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' %
                      (step, loss_value, duration))
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint')
                saver.save(sess, checkpoint_file, global_step=step)
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.train)
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.validation)
                do_eval(sess, eval_correct, images_placeholder,
                        labels_placeholder, data_sets.test)
コード例 #30
0
def evaluate_channel(channel_index, log_dir):
    '''
    Evaluate test pixels with related network
    channel_index: start from 1, three channels of image
    '''
    with tf.Graph().as_default():

        image_channel1 = np.load('data/test/test_channel'+str(channel_index)+'.npy')
        test_pixel_num = image_channel1.shape[0]
        BATCH_SIZE = 1000
        TEST_BATCH_NUM = test_pixel_num//BATCH_SIZE
        OBSERVATION_NUM = 96
        image_placeholder = tf.placeholder(tf.float32,shape = (BATCH_SIZE,OBSERVATION_NUM))
        logits, keep_prob, shadow_prob = mnist.inference(image_placeholder)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(log_dir+'channel'+str(channel_index))

            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('_')[-1]
                print('Model restored.')
            else:
                print('No checkpoint file found')

            normal_outputs = np.ndarray((TEST_BATCH_NUM,BATCH_SIZE,3))

            for i in range(TEST_BATCH_NUM):
                feed_dict = {image_placeholder: image_channel1[i*BATCH_SIZE:(i+1)*BATCH_SIZE], keep_prob: 1, shadow_prob: 1}
                outputs = sess.run(logits, feed_dict=feed_dict)
                normal_outputs[i,...] = outputs


    predict_outputs = expand(normal_outputs)
    print(predict_outputs.shape)
    np.save('predict_outputs_'+str(channel_index)+'.npy', predict_outputs)
    gts = np.load('data/test/test_normals.npy')
    degree_error = calculate_normal_error_in_degree(predict_outputs,gts)
    avg_error = np.sum(degree_error)/degree_error.shape[0]
    print('channel_index: %s, avg_error = %s' % (channel_index, avg_error))
    return predict_outputs
コード例 #31
0
def run_training():
    data_sets = read_data_sets('/tmp/tensorflow/mnist/input_data', False)
    with tf.Graph().as_default():

        images_placeholder, labels_placeholder = placeholder_inputs(
            100)  # 每100张送进去计算一次所以这里的向量维度100

        logits = mnist.inference(images_placeholder, 128, 32)

        loss = mnist.loss(logits, labels_placeholder)

        train_op = mnist.training(loss, 0.01)

        summary = tf.summary.merge_all()

        saver = tf.train.Saver()

        init = tf.global_variables_initializer()  #前面都是定义变量,这里对该图中的变量进行初始化

        sess = tf.Session()

        summary_writer = tf.summary.FileWriter("logs/", sess.graph)

        sess.run(init)

        for step in xrange(2000):  # 每一步取100张,一共2000部,取20万张

            feed_dict = fill_feed_dict(data_sets.train, images_placeholder,
                                       labels_placeholder)

            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
            #sess.run()函数对loss进行计算,得到lossValue用于查看拟合度是否上身,主要的计算在于执行train_op,返回值_表示匿名,既不关心(应为train_op只是一个计算过程,所以没有返回实体,所以打印并没有区别)
            if step % 100 == 0:
                print(loss_value)
                summary_str = sess.run(summary, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

            #    print(train_op)
        save_path = saver.save(sess, "myMnistNet/save_net.ckpt")
        print(save_path)
コード例 #32
0
def mnist_training():
    x = tf.placeholder(tf.float32,
                       shape=[None, IMG_SIZE * IMG_SIZE],
                       name='x_ph')
    y_ = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES], name='y_ph')

    logits = mnist.inference(x)
    loss = mnist.loss(logits, y_)
    train_op = mnist.training(loss, LR)
    eval_correct = mnist.evaluation(logits, y_)
    summary = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    saver = tf.train.Saver(max_to_keep=cfg.MNIST.RUN.models_to_save)
    sess = tf.Session()

    # Instantiate a SummaryWriter to output summaries and the Graph.
    #summary_writer = tf.summary.FileWriter(, sess.graph)

    sess.run(init)
    print "*****TRAINING STARTED*******"
    for i in range(MAX_ITER):
        if i % 100 == 0 and i > 0:
            print('Step %d: loss = %.2f' % (i, loss_val))
            #targets=sess.run(tf.cast(mnist_db.test.labels,tf.int32))
            #prediction=sess.run(eval_correct,feed_dict={x:mnist_db.test.images,y_:targets})
            #print('Step %d: loss = %.2f, accuracy %.4f' % (i, loss_val,prediction))
            saver.save(sess,
                       os.path.join(cfg.MNIST.RUN.models_dir, 'model'),
                       global_step=i)
        batch = mnist_db.train.next_batch(BATCH_SIZE)
        _, loss_val = sess.run([train_op, loss],
                               feed_dict={
                                   x: batch[0],
                                   y_: batch[1]
                               })
    saver.save(
        sess,
        os.path.join(cfg.MNIST.RUN.models_dir, cfg.MNIST.RUN.last_model_name))
コード例 #33
0
def tower_loss(scope, images, labels):
    # Build inference Graph.
    logits = mnist.inference(images, None, None, None, train=True)

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    _ = mnist.loss(logits, labels)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection('losses', scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    # Attach a scalar summary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
        # session. This helps the clarity of presentation on tensorboard.
        loss_name = re.sub('%s_[0-9]*/' % mnist.TOWER_NAME, '', l.op.name)
        tf.summary.scalar(loss_name, l)

    return total_loss
コード例 #34
0
ファイル: eval.py プロジェクト: PFCM/weightnorm
def main(_):
    # build a model
    data = input_data.read_data_sets(FLAGS.data_dir, one_hot=False,
                                     fake_data=False)
    # make placeholders
    images = tf.placeholder(tf.float32, [FLAGS.batch_size, mnist.IMAGE_PIXELS],
                            name='inputs')
    labels = tf.placeholder(tf.int32, [FLAGS.batch_size], name='labels')

    # build model up to inference
    logits = mnist.inference(images, FLAGS.hidden_size, FLAGS.num_layers,
                             do_weightnorm=FLAGS.weightnorm,
                             do_batchnorm=FLAGS.batchnorm,
                             train=True)
    if not FLAGS.batchnorm:
        eval_logits = logits
    else:
        eval_logits = mnist.inference(images, FLAGS.hidden_size, FLAGS.num_layers,
                                      do_weightnorm=FLAGS.weightnorm,
                                      do_batchnorm=FLAGS.batchnorm,
                                      train=False)

    # get a loss function
    loss = mnist.loss(logits, labels, 'train_xent')
    eval_loss = mnist.loss(eval_logits, labels, 'eval_xent')
    # add a sumary of this to track the training loss
   
    # get training ops
    train_op, gstep = mnist.training(loss, FLAGS.learning_rate, FLAGS.momentum)

    # get an op to return precision on a batch
    eval_op = mnist.evaluation(eval_logits, labels)

    valid_var = tf.Variable(0, 'validation performance')
    valid_summ = tf.scalar_summary('validation accuracy', valid_var)

    # get summary op
    summarise = tf.merge_all_summaries()
    with tf.Session() as sess:
        writer = tf.train.SummaryWriter(FLAGS.logdir, sess.graph_def)
        tf.initialize_all_variables().run()
        # do some training
        print('nb: {} steps per epoch'.format(
            data.train.num_examples // FLAGS.batch_size))
        print('Step 0/{}.'.format(FLAGS.max_steps), end='')
        for i in range(FLAGS.max_steps):
            if (i+1) % 5 == 0:
                # write summaries, check on validation set
                if (i+1) % 100 == 0:
                    valid_perf = evaluate(sess,
                                          data.validation,
                                          logits,
                                          [eval_op, eval_loss],
                                          FLAGS.batch_size,
                                          images,
                                          labels,
                                          gstep,
                                          writer)
                    print()
                summ_str, _, _ = sess.run([summarise, loss, train_op],
                                          fill_feed(data.train, images, labels,
                                                    FLAGS.batch_size))
                writer.add_summary(summ_str, gstep.eval(session=sess))
            else:
                # do a step of training
                loss_val, _ = sess.run([loss, train_op],
                                   fill_feed(data.train, images, labels,
                                             FLAGS.batch_size))
                print('\rStep {} (loss {})'.format(i+1, loss_val), end='', flush=True)
        print('\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
        print('Test evaluation:')
        evaluate(sess, data.test, logits, [eval_op, eval_loss], FLAGS.batch_size, images, labels, gstep, writer)
        print('\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
コード例 #35
0
def run_training():
  """Train MNIST for a number of steps."""
  # Get the sets of images and labels for training, validation, and
  # test on MNIST.
  data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
  # Tell TensorFlow that the model will be built into the default Graph.
  with tf.Graph().as_default():
    # Generate placeholders for the images and labels.
    images_placeholder, labels_placeholder = placeholder_inputs(
        FLAGS.batch_size)
    # Build a Graph that computes predictions from the inference model.
    logits = mnist.inference(images_placeholder,
                             FLAGS.hidden1,
                             FLAGS.hidden2)
    # Add to the Graph the Ops for loss calculation.
    loss = mnist.loss(logits, labels_placeholder)
    # Add to the Graph the Ops that calculate and apply gradients.
    train_op = mnist.training(loss, FLAGS.learning_rate)
    # Add the Op to compare the logits to the labels during evaluation.
    eval_correct = mnist.evaluation(logits, labels_placeholder)
    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()
    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver()
    # Create a session for running Ops on the Graph.
    sess = tf.Session()
    # Run the Op to initialize the variables.
    init = tf.initialize_all_variables()
    sess.run(init)
    # Instantiate a SummaryWriter to output summaries and the Graph.
    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                            graph_def=sess.graph_def)
    # And then after everything is built, start the training loop.
    for step in xrange(FLAGS.max_steps):
      start_time = time.time()
      # Fill a feed dictionary with the actual set of images and labels
      # for this particular training step.
      feed_dict = fill_feed_dict(data_sets.train,
                                 images_placeholder,
                                 labels_placeholder)
      # Run one step of the model.  The return values are the activations
      # from the `train_op` (which is discarded) and the `loss` Op.  To
      # inspect the values of your Ops or variables, you may include them
      # in the list passed to sess.run() and the value tensors will be
      # returned in the tuple from the call.
      _, loss_value = sess.run([train_op, loss],
                               feed_dict=feed_dict)
      duration = time.time() - start_time
      # Write the summaries and print an overview fairly often.
      if step % 100 == 0:
        # Print status to stdout.
        print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
        # Update the events file.
        summary_str = sess.run(summary_op, feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)
      # Save a checkpoint and evaluate the model periodically.
      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
        saver.save(sess, FLAGS.train_dir, global_step=step)
        # Evaluate against the training set.
        print('Training Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.train)
        # Evaluate against the validation set.
        print('Validation Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.validation)
        # Evaluate against the test set.
        print('Test Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.test)
コード例 #36
0
def run_training():
    # 获取数据
    data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)
    # 在默认Graph下运行.
    with tf.Graph().as_default():
        # 配置graph
        images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
        # logits是shape为(batch_size,NUM_CLASSES),表示每条数据预测后未归一化的概率分布
        logits = mnist.inference(images_placeholder,
                                    FLAGS.hidden1,
                                    FLAGS.hidden2)
        # 损失函数
        loss = mnist.loss(logits, labels_placeholder)
        train_op = mnist.training(loss, FLAGS.learning_rate)
        eval_correct = mnist.evaluation(logits, labels_placeholder)

        # 汇聚tensor
        summary = tf.summary.merge_all()
        # 建立初始化机制
        init = tf.global_variables_initializer()
        # 建立保存机制
        saver = tf.train.Saver()
        # 建立Session
        sess = tf.Session()

        # 建立一个SummaryWriter输出汇聚的tensor
        summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

        # 开始执行

        # 执行变量
        sess.run(init)

        # 开始训练,2000次循环
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()

            #获取当次循环的数据
            feed_dict = fill_feed_dict(data_sets.train,
                                        images_placeholder,
                                        labels_placeholder)

            # 丢弃了train数据, 记录loss数据, sess.run(train_op)是最简单的训练代码
            # run(self, fetches, feed_dict=None, options=None, run_metadata=None)
            # fetches格式很自由,详情见help(tf.Session.run)
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
            duration = time.time() - start_time

            # 每训练100次输出当前损失,并记录数据
            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
                summary_str = sess.run(summary, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

            # 每1000次测试模型
            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file, global_step=step)
                # Evaluate against the training set.
                print('Training Data Eval:')
                do_eval(sess,
                        eval_correct,
                        images_placeholder,
                        labels_placeholder,
                        data_sets.train)
                # Evaluate against the validation set.
                print('Validation Data Eval:')
                do_eval(sess,
                        eval_correct,
                        images_placeholder,
                        labels_placeholder,
                        data_sets.validation)
                # Evaluate against the test set.
                print('Test Data Eval:')
                do_eval(sess,
                        eval_correct,
                        images_placeholder,
                        labels_placeholder,
                        data_sets.test)
コード例 #37
0
def run_training():
    """
    Train MNIST for a number of steps
    """

    data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)

    with tf.Graph().as_default():
        images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
        logits = mnist.inference(
            images_placeholder,
            FLAGS.hidden1,
            FLAGS.hidden2
        )
        loss = mnist.loss(logits, labels_placeholder)
        train_op = mnist.training(loss, FLAGS.learning_rate)
        eval_correct = mnist.evaluation(logits, labels_placeholder)
        
        summary_op = tf.merge_all_summaries()

        saver = tf.train.Saver()

        sess = tf.Session()

        init = tf.initialize_all_variables()
        sess.run(init)

        summary_writer = tf.training.summary_io.SummaryWriter(
            FLAGS.train_dir,
            graph_def = sess.graph_def
        )

        for step in xrange(FLAGS.max_steps):
            start_time = time.time()

            feed_dict = fill_feed_dict(
                data_sets.train,
                images_placeholder,
                labels_placeholder
            )

            _, loss_value = sess.run([train_op, loss], feed_dict = feed_dict)

            duration = time.time() - start_time

            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)

            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess, FLAGS.train_dir, global_step = step)

                print('Evaluating on training data...')
                do_eval(
                    sess,
                    eval_correct,
                    images_placeholder,
                    labels_placeholder,
                    data_sets.train
                )

                print('Evaluating on validation data...')
                do_eval(
                    sess,
                    eval_correct,
                    images_placeholder,
                    labels_placeholder,
                    data_sets.validation
                )

                print('Evaluating on test data...')
                do_eval(
                    sess,
                    eval_correct,
                    images_placeholder,
                    labels_placeholder,
                    data_sets.test
                )
コード例 #38
0
def run_training(learning_rate=FLAGS.learning_rate,
        momentum=FLAGS.momentum,
        max_norm=FLAGS.max_norm,
        weight_decay=FLAGS.weight_decay,
        keep_prob=FLAGS.keep_prob,
        keep_input=FLAGS.keep_input,
        beta2=FLAGS.beta2,
        num_layers=FLAGS.num_layers):
  """Train MNIST for a number of steps."""
  # Get the sets of images and labels for training, validation, and
  # test on MNIST.
  data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)

  # Tell TensorFlow that the model will be built into the default Graph.
  with tf.Graph().as_default():
    # Generate placeholders for the images and labels.
    images_placeholder = tf.placeholder(tf.float32, shape=(None,
                                                         mnist.IMAGE_PIXELS), name='images')
    labels_placeholder = tf.placeholder(tf.int32, shape=[None], name='labels')

    keep_prob_pl = tf.placeholder(tf.float32, name='keep_prob_pl')
    keep_input_pl = tf.placeholder(tf.float32, name='keep_input_pl')
    learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate_pl')
    
    def fill_feed_dict(data_set, batch_size=FLAGS.batch_size):
      # Create the feed_dict for the placeholders filled with the next
      # `batch size ` examples.
      images_feed, labels_feed = data_set.next_batch(batch_size,
                                                     FLAGS.fake_data)
      feed_dict = {
          images_placeholder: images_feed,
          labels_placeholder: labels_feed,
          keep_prob_pl: keep_prob,
          keep_input_pl: keep_input,
          learning_rate_pl: learning_rate
      }
      return feed_dict
    
    def fill_feed_dict_eval(data_set):
      return {
        images_placeholder: data_set._images,
        labels_placeholder: data_set._labels,
        keep_prob_pl: 1.0,
        keep_input_pl: 1.0,
      }

    # Build a Graph that computes predictions from the inference model.
    with tf.variable_scope('feed_forward_model') as scope:
      logits, bn = mnist.inference(images_placeholder,
                         FLAGS.hidden1,
                         num_layers,
                         weight_decay,
                         keep_prob_pl,
                         keep_input_pl,
                         max_norm)
                     
    # Add to the Graph the Ops for loss calculation.
    loss = mnist.loss(logits, labels_placeholder)
    #loss_eval = mnist.loss( logits_eval, labels_placeholder)
    # Add to the Graph the Ops that calculate and apply gradients.
    train_op = mnist.training(loss, learning_rate_pl, momentum, beta2)
    
    with tf.control_dependencies([train_op]):
      train_op = tf.group(*[b.get_assigner() for b in bn])           
    # Add the Op to compare the logits to the labels during evaluation.
    eval_correct = mnist.evaluation(logits, labels_placeholder)
    results = tf.placeholder( tf.float32, [4])

    summarize_evaluation = tf.scalar_summary(['correct_train', 'loss_train', 'correct_test', 'loss_test'], results)
    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()
  
    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver(max_to_keep=2)

    train_loss = test_loss = 0
    train_cor = test_cor = 0.97
    previous_test_loss = None

    first_step = 0
    # Create a session for running Ops on the Graph.
    sess = tf.Session()

    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph_def)
    restore_path = tf.train.latest_checkpoint("/Users/mikowals/projects/mnist")
    if restore_path:
      saver.restore(sess, restore_path)
      first_step = int(restore_path.split('/')[-1].split('-')[-1])
      print('retored variables from ',  restore_path)
    else:
      # Run the Op to initialize the variables.
      print('initializing variables')
      init = tf.initialize_all_variables()
      sess.run(init)

    # And then after everything is built, start the training loop.
    for step in range(first_step,FLAGS.max_steps):
      start_time = time.time()

      # Fill a feed dictionary with the actual set of images and labels
      # for this particular training step.
      feed_dict = fill_feed_dict(data_sets.train)
      
      # Run one step of the model.  The return values are the activations
      # from the `train_op` (which is discarded) and the `loss` Op.  To
      # inspect the values of your Ops or variables, you may include them
      # in the list passed to sess.run() and the value tensors will be
      # returned in the tuple from the call.
      _, loss_value = sess.run([train_op, loss],
                               feed_dict=feed_dict)
      
      duration = time.time() - start_time
      # Write the summaries and print an overview fairly often.
      
      # Save a checkpoint and evaluate the model periodically.
      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
        saver.save(sess, FLAGS.train_dir, global_step=step)
        # Evaluate against the training set.
        
        # Evaluate against the validation set.
        print('training Data Eval:')
        feed_dict = fill_feed_dict_eval(data_sets.train)
        train_cor, train_loss = sess.run([eval_correct, loss], feed_dict=feed_dict)
        train_cor = train_cor / data_sets.train.num_examples
        print(train_cor, train_loss)
  
        print('Validation Data Eval:')
        feed_dict = fill_feed_dict_eval(data_sets.validation)
        test_cor, test_loss = sess.run([eval_correct, loss], feed_dict=feed_dict)
        test_cor = test_cor / data_sets.validation.num_examples
        print (test_cor, test_loss )
        #if previous_test_loss and test_loss > previous_test_loss:
        #  learning_rate = learning_rate * 0.6
        #if previous_test_loss and test_loss < previous_test_loss:
        #  learning_rate = learning_rate * 1.02
        #previous_test_loss = test_loss
        

      if step > 1000 and step % 100 == 0:
        # Print status to stdout.
        print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
        # Update the events file.
        feed_dict[results] = [
          train_cor, 
          train_loss, 
          test_cor, 
          test_loss]
        summary_str = sess.run(summary_op, feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)

  return -test_cor 
コード例 #39
0
ファイル: fully_connected_feed.py プロジェクト: yuhao/verdict
def run_training():
    """Train MNIST for a number of steps."""
    # Get the sets of images and labels for training, validation, and
    # test on MNIST.
    data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)

    # labels and images are properties of the DataSet class, which is
    # defined at tensorflow/contrib/learn/python/learn/datasets/mnist.py
    # numpy.savetxt("/tmp/xx.csv", data_sets.train.labels, delimiter=",")
    # numpy.savetxt("/tmp/yy.csv", data_sets.test.images, delimiter=",")
    # sys.exit(1)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2)

        # Add to the Graph the Ops for loss calculation.
        loss = mnist.loss(logits, labels_placeholder)

        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = mnist.training(loss, FLAGS.learning_rate)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = mnist.evaluation(logits, labels_placeholder)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Add the variable initializer Op.
        init = tf.initialize_all_variables()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)

        # And then after everything is built:

        # Run the Op to initialize the variables.
        sess.run(init)

        # Start the training loop.
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()

            # Fill a feed dictionary with the actual set of images and labels
            # for this particular training step.
            feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder)

            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)

            duration = time.time() - start_time

            # Write the summaries and print an overview fairly often.
            if step % 100 == 0:
                # Print status to stdout.
                print("Step %d: loss = %.2f (%.3f sec)" % (step, loss_value, duration))
                # Update the events file.
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.train_dir, "checkpoint")
                saver.save(sess, checkpoint_file, global_step=step)
                # Evaluate against the training set.
                print("Training Data Eval:")
                do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train)
                # Evaluate against the validation set.
                print("Validation Data Eval:")
                do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation)
                # Evaluate against the test set.
                print("Test Data Eval:")
                do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)

            if (step + 1) == FLAGS.max_steps:
                float_formatter = lambda x: "%.2f" % x
                numpy.set_printoptions(formatter={"float_kind": float_formatter})
                feed_dict = fill_feed_dict(data_sets.test, images_placeholder, labels_placeholder)
                #  output with softmax
                #  output = sess.run(tf.nn.softmax(logits), feed_dict=feed_dict)
                #  output without softmax
                output = sess.run(tf.argmax(logits, dimension=1), feed_dict=feed_dict)
                numpy.savetxt("/tmp/outputX.csv", output, delimiter=",")