Beispiel #1
0
 def start_train(self):
     y_ = tf.placeholder(tf.float32, [None, 2])
     avg_image = calu_average_train_set(
         config.TRAIN_ORIGINAL_IMAGE_DIR,
         [net_config.IMAGE_W, net_config.IMAGE_H])
     y = self.vgg.fcs_output
     # 计算准确率
     accuracy_tensor = calculate_accuracy(logits=y, labels=y_)
     saver = tf.train.Saver()
     labels = []
     logits = []
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         saver.restore(sess, self.load_model_path)
         image_names = self.dataset.get_val_merged_image_name()
         while self.dataset.get_val_has_next():
             cur_images, cur_labels = self.dataset.get_val_merged_next_batch(
                 net_config.TRAIN_BATCH_SIZE)
             labels.extend(cur_labels)
             cur_images = resize_images(
                 cur_images, [net_config.IMAGE_W, net_config.IMAGE_H])
             cur_images = np.asarray(cur_images, np.float32)
             cur_images -= avg_image
             cur_labels = self.one_hot_encoding(cur_labels)
             feed_dict = {self.vgg.imgs: cur_images, y_: cur_labels}
             y_value, accuracy_value = sess.run([y, accuracy_tensor],
                                                feed_dict=feed_dict)
             logits.extend(np.argmax(y_value, 1))
             print 'accuracy is ', accuracy_value
             del cur_images
             gc.collect()
         _, _, acc = calculate_acc_error(logits,
                                         labels,
                                         images_name=image_names)
         print 'accuracy is %g' % (1 - acc)
Beispiel #2
0
 def start_train(self):
     y_ = tf.placeholder(
         tf.float32,
         [
             None,
             2
         ]
     )
     avg_image = calu_average_train_set(config.TRAIN_DATA_DIR, [net_config.IMAGE_W, net_config.IMAGE_H])
     y = self.vgg.fcs_output
     # 计算准确率
     accuracy_tensor = calculate_accuracy(logits=y, labels=y_)
     saver = tf.train.Saver()
     labels = []
     logits = []
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         if self.load_model:
             saver.restore(sess, net_config.MODEL_LOAD_PATH)
         image_names = self.dataset.get_val_merged_image_name()
         while self.dataset.get_val_has_next():
             cur_images, cur_labels = self.dataset.get_val_merged_next_batch(net_config.TRAIN_BATCH_SIZE)
             labels.extend(cur_labels)
             cur_images = resize_images(
                 cur_images,
                 [net_config.IMAGE_W, net_config.IMAGE_H]
             )
             cur_images = np.asarray(cur_images, np.float32)
             cur_images -= avg_image
             cur_labels = self.one_hot_encoding(cur_labels)
             feed_dict = {
                 self.vgg.imgs: cur_images,
                 y_: cur_labels
             }
             y_value, accuracy_value = sess.run(
                 [y, accuracy_tensor],
                 feed_dict=feed_dict
             )
             logits.extend(np.argmax(y_value, 1))
             print 'accuracy is ', accuracy_value
             del cur_images
             gc.collect()
         recall, precision, f1_score = get_game_evaluate(
             logits=logits,
             labels=labels,
         )
         print 'recall is %g \n presion is %g\nf1_score is %g\n' % (recall, precision, f1_score)
Beispiel #3
0
 def start_train(self):
     y_ = tf.placeholder(
         tf.float32,
         [
             None,
             net_config.OUTPUT_NODE
         ]
     )
     y = self.vgg.fcs_output
     global_step = tf.Variable(0, trainable=False)
     variable_averages = tf.train.ExponentialMovingAverage(
         net_config.MOVEING_AVERAGE_DECAY,
         global_step
     )
     variable_averages_op = variable_averages.apply(
         tf.trainable_variables()
     )
     loss = calculate_loss(logits=y, labels=y_)
     tf.summary.scalar(
         'loss',
         loss
     )
     train_step = tf.train.GradientDescentOptimizer(
         learning_rate=self.learning_rate
     ).minimize(
         loss=loss,
         global_step=global_step
     )
     with tf.control_dependencies([train_step, variable_averages_op]):
         train_op = tf.no_op(name='train')
     # 计算准确率
     accuracy_tensor = calculate_accuracy(logits=y, labels=y_)
     saver = tf.train.Saver()
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         if self.load_model:
             saver.restore(
                 sess,
                 self.load_model
             )
             print 'load model succssful', self.load_model
         all_labels = []
         all_logits = []
         while True:
             val_images, labels, flag = self.dataset.get_val_merged_next_batch(net_config.TRAIN_BATCH_SIZE)
             if not flag:
                 break
             all_labels.extend(labels)
             labels = self.one_hot_encoding(labels)
             feed_dict = {
                 self.vgg.imgs: val_images,
                 y_: labels
             }
             global_step_value, val_loss, val_accuracy, logits = sess.run(
                 [global_step, loss, accuracy_tensor, y],
                 feed_dict=feed_dict
             )
             all_logits.extend(np.argmax(logits, 1))
             calculate_acc_error(
                 logits=np.argmax(logits, 1),
                 label=np.argmax(labels, 1)
             )
             print '-' * 15, 'val accuracy is %g' % \
                             (val_accuracy), '-' * 15
         recall, precision, f1_score = get_game_evaluate(
             logits=all_logits,
             labels=all_labels
         )
         print 'recall is %g, precision is %g, f1_score is %g' % (recall, precision, f1_score)
Beispiel #4
0
def train(is_training, logits, images, labels, save_model_path=None):
    global_step = tf.get_variable('global_step', [],
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)
    val_step = tf.get_variable('val_step', [],
                               initializer=tf.constant_initializer(0),
                               trainable=False)

    loss_ = loss(logits, labels)
    predictions = tf.nn.softmax(logits)
    print 'predictions shape is ', predictions
    print 'label is ', labels
    top1_error = top_k_error(predictions, labels, 1)
    labels_onehot = tf.one_hot(labels, net_config.OUTPUT_NODE)
    print 'one hot label is ', labels_onehot
    accuracy_tensor = calculate_accuracy(predictions, labels_onehot)

    # loss_avg
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, ema.apply([loss_]))
    tf.summary.scalar('loss_avg', ema.average(loss_))

    # validation stats
    ema = tf.train.ExponentialMovingAverage(0.9, val_step)
    val_op = tf.group(val_step.assign_add(1), ema.apply([top1_error]))
    top1_error_avg = ema.average(top1_error)
    tf.summary.scalar('val_top1_error_avg', top1_error_avg)

    tf.summary.scalar('learning_rate', FLAGS.learning_rate)

    opt = tf.train.MomentumOptimizer(FLAGS.learning_rate, MOMENTUM)
    grads = opt.compute_gradients(loss_)
    for grad, var in grads:
        if grad is not None and not FLAGS.minimal_summaries:
            tf.summary.histogram(var.op.name + '/gradients', grad)
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    if not FLAGS.minimal_summaries:
        # Display the training images in the visualizer.
        tf.summary.image('images', images)

        for var in tf.trainable_variables():
            tf.summary.image(var.op.name, var)

    batchnorm_updates = tf.get_collection(UPDATE_OPS_COLLECTION)
    batchnorm_updates_op = tf.group(*batchnorm_updates)
    train_op = tf.group(apply_gradient_op, batchnorm_updates_op)

    saver = tf.train.Saver(tf.all_variables())

    summary_op = tf.summary.merge_all()

    init = tf.initialize_all_variables()

    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)

    summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(FLAGS.log_val_dir, sess.graph)
    if FLAGS.resume:
        latest = tf.train.latest_checkpoint(
            '/home/give/PycharmProjects/StomachCanner/classification/Net/ResNet/models/instance/5500.0/'
        )
        if not latest:
            print "No checkpoint to continue from in", FLAGS.train_dir
            sys.exit(1)
        print "resume", latest
        saver.restore(sess, latest)

    for x in xrange(FLAGS.max_steps + 1):
        start_time = time.time()

        step = sess.run(global_step)
        i = [train_op, loss_]

        write_summary = step % 100 and step > 1
        if write_summary:
            i.append(summary_op)
        o = sess.run(i, {is_training: True})

        loss_value = o[1]

        duration = time.time() - start_time

        assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

        if step % 5 == 0:
            top1_error_value, accuracy_value, labels_values, predictions_values = sess.run(
                [top1_error, accuracy_tensor, labels, predictions],
                feed_dict={is_training: True})
            predictions_values = np.argmax(predictions_values, axis=1)
            examples_per_sec = FLAGS.batch_size / float(duration)
            accuracy = eval_accuracy(predictions_values,
                                     labels_values,
                                     num_category=net_config.OUTPUT_NODE)
            format_str = (
                'step %d, loss = %.2f, top1 error = %g, accuracy value = %g, 0 rate = %g, 1 rate = %g, 2 rate = %g  (%.1f examples/sec; %.3f '
                'sec/batch)')

            print(format_str % (step, loss_value, top1_error_value,
                                accuracy_value, accuracy[0], accuracy[1],
                                accuracy[2], examples_per_sec, duration))
        if write_summary:
            summary_str = o[2]
            summary_writer.add_summary(summary_str, step)

        # Save the model checkpoint periodically.
        if step > 1 and step % 100 == 0:
            checkpoint_path = os.path.join(save_model_path, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=global_step)
            save_dir = os.path.join(save_model_path, str(step))
            if not os.path.exists(save_dir):
                os.mkdir(save_dir)
            filenames = glob(
                os.path.join(save_model_path,
                             '*-' + str(int(step + 1)) + '.*'))
            for filename in filenames:
                shutil.copy(filename,
                            os.path.join(save_dir, os.path.basename(filename)))
        # Run validation periodically
        if step > 1 and step % 100 == 0:
            _, top1_error_value, summary_value, accuracy_value, labels_values, predictions_values = sess.run(
                [
                    val_op, top1_error, summary_op, accuracy_tensor, labels,
                    predictions
                ], {is_training: False})
            predictions_values = np.argmax(predictions_values, axis=1)
            accuracy = eval_accuracy(predictions_values, labels_values)
            print(
                'Validation top1 error %.2f, accuracy value %f,  0 rate = %g, 1 rate = %g  '
                % (top1_error_value, accuracy_value, accuracy[0], accuracy[1]))
            val_summary_writer.add_summary(summary_value, step)
Beispiel #5
0
    def start_train(self):
        y_ = tf.placeholder(tf.float32, [None, 2])
        avg_image = calu_average_train_set(
            config.TRAIN_DATA_DIR, [net_config.IMAGE_W, net_config.IMAGE_H])
        y = self.vgg.fcs_output
        global_step = tf.Variable(0, trainable=False)
        variable_averages = tf.train.ExponentialMovingAverage(
            net_config.MOVEING_AVERAGE_DECAY, global_step)
        variable_averages_op = variable_averages.apply(
            tf.trainable_variables())
        loss = calculate_loss(logits=y, labels=y_)
        tf.summary.scalar('loss', loss)
        train_step = tf.train.GradientDescentOptimizer(
            learning_rate=self.learning_rate).minimize(loss,
                                                       global_step=global_step)
        with tf.control_dependencies([train_step, variable_averages_op]):
            train_op = tf.no_op(name='train')
        # 计算准确率
        accuracy_tensor = calculate_accuracy(logits=y, labels=y_)
        merged = tf.summary.merge_all()
        max_accuracy = 0.0
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            log_path = './log/train'
            val_log_path = './log/val'
            if self.load_model:
                saver.restore(sess, net_config.MODEL_LOAD_PATH)
            writer = tf.summary.FileWriter(log_path, tf.get_default_graph())
            val_writer = tf.summary.FileWriter(val_log_path,
                                               tf.get_default_graph())
            for i in range(self.iterator_number):
                train_images, labels = self.dataset.get_next_train_batch(
                    net_config.TRAIN_BATCH_SIZE,
                    net_config.TRAIN_BATCH_DISTRIBUTION)
                train_images = resize_images(
                    train_images, [net_config.IMAGE_W, net_config.IMAGE_H])
                train_images = np.asarray(train_images, np.float32)
                train_images -= avg_image

                labels = self.one_hot_encoding(labels)
                feed_dict = {self.vgg.imgs: train_images, y_: labels}
                _, loss_value, accuracy_value, summary, y_value, global_step_value = sess.run(
                    [train_op, loss, accuracy_tensor, merged, y, global_step],
                    feed_dict=feed_dict)
                if i % 500 == 0 and i != 0:
                    # 保存模型
                    print 'save model successful'
                    saver.save(sess, net_config.MODEL_SAVE_PATH)
                writer.add_summary(summary, i)
                if (i % 40) == 0 and i != 0:
                    val_images, labels = self.dataset.get_next_val_batch(
                        net_config.TRAIN_BATCH_SIZE,
                        net_config.TRAIN_BATCH_DISTRIBUTION)
                    val_images = resize_images(
                        val_images, [net_config.IMAGE_W, net_config.IMAGE_H])
                    val_images = np.asarray(val_images, np.float32)
                    val_images -= avg_image

                    labels = self.one_hot_encoding(labels)
                    feed_dict = {self.vgg.imgs: val_images, y_: labels}
                    val_loss, val_accuracy, summary = sess.run(
                        [loss, accuracy_tensor, merged], feed_dict=feed_dict)
                    val_writer.add_summary(summary, i)
                    print '-'*15, 'global_step is %d, train loss value is %g, accuracy is %g, val loss is %g, val accuracy is %g' % \
                                  (global_step_value, loss_value, accuracy_value, val_loss, val_accuracy), '-'*15
                del train_images, labels
                gc.collect()
        writer.close()
        val_writer.close()
Beispiel #6
0
    def start_train(self):
        y_ = tf.placeholder(tf.float32, [None, net_config.OUTPUT_NODE])
        y = self.vgg.fcs_output
        global_step = tf.Variable(0, trainable=False)
        variable_averages = tf.train.ExponentialMovingAverage(
            net_config.MOVEING_AVERAGE_DECAY, global_step)
        variable_averages_op = variable_averages.apply(
            tf.trainable_variables())
        loss = calculate_loss(logits=y, labels=y_)
        tf.summary.scalar('loss', loss)
        train_step = tf.train.GradientDescentOptimizer(
            learning_rate=self.learning_rate).minimize(loss=loss,
                                                       global_step=global_step)
        with tf.control_dependencies([train_step, variable_averages_op]):
            train_op = tf.no_op(name='train')
        # 计算准确率
        accuracy_tensor = calculate_accuracy(logits=y, labels=y_)
        merged = tf.summary.merge_all()
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            log_path = './log/' + str(self.up_threshold) + '_' + str(
                self.down_threhold) + '/train'
            val_log_path = './log/' + str(self.up_threshold) + '_' + str(
                self.down_threhold) + '/val'
            if self.load_model:
                saver.restore(sess, self.load_model)
                print 'load model succssful', self.load_model
            writer = tf.summary.FileWriter(log_path, tf.get_default_graph())
            val_writer = tf.summary.FileWriter(val_log_path,
                                               tf.get_default_graph())
            for i in range(self.iterator_number):
                if i % 500 == 0 and i != 0:
                    print 'model save successful'
                    saver.save(sess, self.save_model_path)
                train_images, labels = self.dataset.get_next_train_batch(
                    net_config.TRAIN_BATCH_SIZE,
                    net_config.TRAIN_BATCH_DISTRIBUTION)

                labels = self.one_hot_encoding(labels)
                feed_dict = {self.vgg.imgs: train_images, y_: labels}
                _, loss_value, accuracy_value, summary, y_value, global_step_value = sess.run(
                    [train_op, loss, accuracy_tensor, merged, y, global_step],
                    feed_dict=feed_dict)
                writer.add_summary(summary, global_step_value)
                if (i % 10) == 0 and i != 0:
                    val_images, labels = self.dataset.get_next_val_batch(
                        net_config.TRAIN_BATCH_SIZE,
                        net_config.TRAIN_BATCH_DISTRIBUTION)
                    labels = self.one_hot_encoding(labels)
                    feed_dict = {self.vgg.imgs: val_images, y_: labels}
                    global_step_value, val_loss, val_accuracy, summary, logits = sess.run(
                        [global_step, loss, accuracy_tensor, merged, y],
                        feed_dict=feed_dict)
                    # print 'will save, accuracy is %g' % val_accuracy
                    # print np.shape(y_value)
                    calculate_acc_error(logits=np.argmax(logits, 1),
                                        label=np.argmax(labels, 1))
                    val_writer.add_summary(summary, global_step_value)
                    print '-' * 15, 'step is %d, train loss value is %g, accuracy is %g, val loss is %g, val accuracy is %g' % \
                                    (global_step_value, loss_value, accuracy_value, val_loss, val_accuracy), '-' * 15
                del train_images, labels
                gc.collect()
        writer.close()
        val_writer.close()
Beispiel #7
0
def val(image_tenor,
        logits,
        image_values,
        labels,
        label_values,
        save_model_path=None):
    is_training = tf.placeholder('bool', [], name='is_training')
    global_step = tf.get_variable('global_step', [],
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)
    val_step = tf.get_variable('val_step', [],
                               initializer=tf.constant_initializer(0),
                               trainable=False)

    loss_ = loss(logits, labels)
    predictions = tf.nn.softmax(logits)
    top1_error = top_k_error(predictions, labels, 1)
    labels_onehot = tf.one_hot(labels, 2)
    accuracy_tensor = calculate_accuracy(predictions, labels_onehot)

    # loss_avg
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, ema.apply([loss_]))
    tf.summary.scalar('loss_avg', ema.average(loss_))

    # validation stats
    ema = tf.train.ExponentialMovingAverage(0.9, val_step)
    val_op = tf.group(val_step.assign_add(1), ema.apply([top1_error]))
    top1_error_avg = ema.average(top1_error)
    tf.summary.scalar('val_top1_error_avg', top1_error_avg)

    opt = tf.train.MomentumOptimizer(0.01, MOMENTUM)
    grads = opt.compute_gradients(loss_)
    for grad, var in grads:
        if grad is not None and not True:
            tf.summary.histogram(var.op.name + '/gradients', grad)
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    batchnorm_updates = tf.get_collection(UPDATE_OPS_COLLECTION)
    batchnorm_updates_op = tf.group(*batchnorm_updates)
    train_op = tf.group(apply_gradient_op, batchnorm_updates_op)

    saver = tf.train.Saver(tf.all_variables())

    summary_op = tf.summary.merge_all()

    init = tf.initialize_all_variables()

    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)

    if FLAGS.resume:
        latest = tf.train.latest_checkpoint(
            '/home/give/PycharmProjects/StomachCanner/classification/Net/ResNetHeatMap/models/method5-512'
        )
        if not latest:
            print "No checkpoint to continue from in", FLAGS.train_dir
            sys.exit(1)
        print "resume", latest
        saver.restore(sess, latest)

    for x in xrange(FLAGS.max_steps + 1):
        start_time = time.time()

        step = sess.run(global_step)
        i = [loss_]

        write_summary = step % 100 and step > 1
        if write_summary:
            i.append(summary_op)
        o = sess.run(i, {
            image_tenor: image_values,
            labels: np.asarray(label_values, np.int32)
        })

        loss_value = o[0]

        duration = time.time() - start_time

        assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

        top1_error_value, accuracy_value, predictions_values = sess.run(
            [top1_error, accuracy_tensor, predictions],
            feed_dict={
                image_tenor: image_values,
                labels: np.asarray(label_values, np.int32)
            })
        predictions_values = np.argmax(predictions_values, axis=1)
        examples_per_sec = FLAGS.batch_size / float(duration)
        format_str = (
            'step %d, loss = %.2f, top1 error = %g, accuracy value = %g, 0 rate = %g, 1 rate = %g  (%.1f examples/sec; %.3f '
            'sec/batch)')
        print predictions_values