Exemplo n.º 1
0
def val(val_data_set, load_model_path, phases_names):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL * len(phases_names)
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    global_step = tf.Variable(0, trainable=False)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(x,
                        is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        phase_names=phases_names,
                        num_blocks=3)

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        if load_model_path:
            saver.restore(sess, load_model_path)

        validation_images, validation_labels = val_data_set.get_next_batch(
            None, None)

        validation_accuracy, logits = sess.run([accuracy_tensor, y],
                                               feed_dict={
                                                   x: validation_images,
                                                   y_: validation_labels
                                               })
        calculate_acc_error(logits=np.argmax(logits, 1),
                            label=validation_labels,
                            show=True)
        binary_acc = acc_binary_acc(
            logits=np.argmax(logits, 1),
            label=validation_labels,
        )
        print 'accuracy is %g, binary_acc is %g' % \
              (validation_accuracy, binary_acc)
Exemplo n.º 2
0
def train(train_data_set, val_data_set, load_model_path, save_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    # regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(x,
                        is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        num_blocks=3)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss_ = loss(logits=y, labels=tf.cast(y_, np.int32))
    tf.summary.scalar('loss', loss_)
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE).minimize(
            loss=loss_, global_step=global_step)
    # with tf.control_dependencies([train_step, vaeriable_average_op]):
    #     train_op = tf.no_op(name='train')

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if load_model_path:
            saver.restore(sess, load_model_path)
        writer = tf.summary.FileWriter('./log/fine_tuning/train',
                                       tf.get_default_graph())
        val_writer = tf.summary.FileWriter('./log/fine_tuning/val',
                                           tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = train_data_set.get_next_batch(
                sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
            images = changed_shape(images, [
                len(images), sub_Config.IMAGE_W, sub_Config.IMAGE_W,
                sub_Config.IMAGE_CHANNEL
            ])
            _, loss_value, accuracy_value, summary, global_step_value = sess.run(
                [train_op, loss_, accuracy_tensor, merge_op, global_step],
                feed_dict={
                    x: images,
                    y_: labels
                })
            writer.add_summary(summary=summary, global_step=global_step_value)
            if i % 500 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                import os
                saveedpath = os.path.join(save_model_path,
                                          str(global_step_value))
                if not os.path.exists(saveedpath):
                    os.mkdir(saveedpath)
                saveedpath += '/model.ckpt'
                saver.save(sess, saveedpath, global_step=global_step_value)
            if i % 100 == 0:
                validation_images, validation_labels = val_data_set.get_next_batch(
                    sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
                validation_images = changed_shape(validation_images, [
                    len(validation_images), sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W, 1
                ])
                validation_accuracy, validation_loss, summary, logits = sess.run(
                    [accuracy_tensor, loss_, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    })
                calculate_acc_error(logits=np.argmax(logits, 1),
                                    label=validation_labels,
                                    show=True)
                binary_acc = acc_binary_acc(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                )
                val_writer.add_summary(summary, global_step_value)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is %g, accuracy is %g, binary_acc is %g' % \
                      (global_step_value, loss_value, accuracy_value, validation_loss, validation_accuracy, binary_acc)
        writer.close()
        val_writer.close()
Exemplo n.º 3
0
def train(train_data_set, val_data_set, load_model_path, save_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    if sub_Config.NEED_MUL:
        tf.summary.image('input_x', x * 120, max_outputs=5)
    else:
        tf.summary.image('input_x', x)
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    # global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    regularizer = tf.contrib.layers.l2_regularizer(
        sub_Config.REGULARIZTION_RATE)
    y = inference(x, regularizer)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y, labels=tf.cast(y_, tf.int32))) + tf.add_n(
                tf.get_collection('losses'))
    tf.summary.scalar('loss', loss)
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE).minimize(
            loss=loss,
            # global_step=global_step
        )
    # with tf.control_dependencies([train_step, vaeriable_average_op]):
    #     train_op = tf.no_op(name='train')

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if load_model_path:
            saver.restore(sess, load_model_path)
        writer = tf.summary.FileWriter('./log/fine_tuning/train',
                                       tf.get_default_graph())
        val_writer = tf.summary.FileWriter('./log/fine_tuning/val',
                                           tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = train_data_set.images, train_data_set.labels
            images = changed_shape(images, [
                len(images), sub_Config.IMAGE_W, sub_Config.IMAGE_W,
                sub_Config.IMAGE_CHANNEL
            ])
            if i == 0:
                from PIL import Image
                image = Image.fromarray(
                    np.asarray(images[0, :, :, 0], np.uint8))
                image.show()
            _, loss_value, accuracy_value, summary = sess.run(
                [train_op, loss, accuracy_tensor, merge_op],
                feed_dict={
                    x: images,
                    y_: labels
                })
            writer.add_summary(summary=summary, global_step=i)
            if i % 1000 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                saver.save(sess, save_model_path)
            if i % 100 == 0:
                validation_images, validation_labels = val_data_set.images, val_data_set.labels
                validation_images = changed_shape(validation_images, [
                    len(validation_images), sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W, 1
                ])
                validation_accuracy, validation_loss, summary, logits = sess.run(
                    [accuracy_tensor, loss, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    })
                calculate_acc_error(logits=np.argmax(logits, 1),
                                    label=validation_labels,
                                    show=True)
                binary_acc = acc_binary_acc(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                )
                val_writer.add_summary(summary, i)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is %g, accuracy is %g, binary_acc is %g' % \
                      (i, loss_value, accuracy_value, validation_loss, validation_accuracy, binary_acc)
        writer.close()
        val_writer.close()
Exemplo n.º 4
0
def val(val_data_set, load_model_path, phases_names):

    x_ROI = tf.placeholder(tf.float32,
                           shape=[
                               None, net_config.ROI_SIZE_W,
                               net_config.ROI_SIZE_H,
                               net_config.IMAGE_CHANNEL * len(phases_names)
                           ],
                           name='input_x')

    x_EXPAND = tf.placeholder(tf.float32,
                              shape=[
                                  None, net_config.EXPAND_SIZE_W,
                                  net_config.EXPAND_SIZE_H,
                                  net_config.IMAGE_CHANNEL * len(phases_names)
                              ])
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    # regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small([x_ROI, x_EXPAND],
                        is_training=is_training,
                        num_classes=net_config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        phase_names=phases_names,
                        num_blocks=3)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss_ = loss(logits=y, labels=tf.cast(y_, np.int32))
    tf.summary.scalar('loss', loss_)

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        if load_model_path:
            saver.restore(sess, load_model_path)

        validation_images, validation_images_expand, validation_labels = val_data_set.get_next_batch(
        )

        validation_accuracy, validation_loss, summary, logits = sess.run(
            [accuracy_tensor, loss_, merge_op, y],
            feed_dict={
                x_ROI: validation_images,
                x_EXPAND: validation_images_expand,
                y_: validation_labels
            })
        calculate_acc_error(logits=np.argmax(logits, 1),
                            label=validation_labels,
                            show=True)
        binary_acc = acc_binary_acc(
            logits=np.argmax(logits, 1),
            label=validation_labels,
        )

        print 'validation loss value is %g, accuracy is %g, binary_acc is %g' % \
              (validation_loss, validation_accuracy, binary_acc)