Пример #1
0
def train_act(features_train, labels_train, features_test, labels_test):
    sess = tf.InteractiveSession()
    x = tf.placeholder(
        dtype=tf.float32,
        shape=[None, FLAGS.IMG_WIDTH, FLAGS.IMG_HEIGHT, FLAGS.IMG_CHANNEL],
        name='features')
    y = tf.placeholder(dtype=tf.float32,
                       shape=[None, FLAGS.cls],
                       name='labels')
    pred = model(data=x)
    with tf.name_scope('loss'):
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
    tf.summary.scalar('loss', loss)
    tf.summary.histogram('loss', loss)
    with tf.name_scope('train'):
        train = tf.train.MomentumOptimizer(
            learning_rate=FLAGS.LEARNING_RATE,
            momentum=FLAGS.MOMENTUM).minimize(loss)
    with tf.name_scope('accuracy'):
        accuracy = tf.reduce_mean(
            tf.cast(tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)), tf.float32))
    tf.summary.scalar('accuracy', accuracy)
    tf.summary.histogram('accuracy', accuracy)

    merge = tf.summary.merge_all()
    logwriter = tf.summary.FileWriter(FLAGS.LOG_DIR, sess.graph)
    initial = tf.global_variables_initializer()

    sess.run(initial)
    data_size = features_train.shape[0]
    iterations = int(data_size / FLAGS.batch_size)
    for _ in range(FLAGS.epoches):
        for i in range(iterations):
            data = []
            labels = []
            if i == iterations - 1:
                data = features_train[i * FLAGS.batch_size:data_size, :, :, :]
                labels = labels_train[i * FLAGS.batch_size:data_size]
            else:
                data = features_train[i * FLAGS.batch_size:(i + 1) *
                                      FLAGS.batch_size, :, :, :]
                labels = labels_train[i * FLAGS.batch_size:(i + 1) *
                                      FLAGS.batch_size]
            sess.run(train, feed_dict={x: data, y: labels})
            if i % 10 == 0:
                summary, accuracy_res = sess.run([merge, accuracy],
                                                 feed_dict={
                                                     x: features_test,
                                                     y: labels_test
                                                 })
                logwriter.add_summary(summary, i)
                print(visualize.get_time() +
                      '   epoch %d, train_iteration at %d, test score: %f ' %
                      (_, i, accuracy_res))
    sess.close()
    logwriter.close()
Пример #2
0
import tensorflow as tf

from tools import visualize, dataset

NUMBER_CLASSES = 10
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
IMAGE_CHANNELS = 3
DATASET_PATH = '/home/workspace/cifar10/pythonver'
LEARNING_RATE = 0.001
MOMENTUM = 0.9
batch_size = 128
epoches = 5
KEEP_PROB = 0.6
LOG_DIR = 'Log--' + visualize.get_time()


def convolution(data, kernel, strides, name, bias, padding='SAME'):
    with tf.name_scope(name):
        conv = tf.nn.conv2d(input=data,
                            filter=kernel,
                            strides=[1, strides, strides, 1],
                            padding=padding,
                            name=name)
        with tf.name_scope(name + '_relu'):
            conv = tf.nn.bias_add(conv, bias)
            relu = tf.nn.relu(conv, name=name + '_relu')
        return relu
    return conv
Пример #3
0
tf.app.flags.DEFINE_boolean('train_mode', True, 'True--train, False--eval')
tf.app.flags.DEFINE_boolean('bottleneck_residual_flag', True,
                            'True--bottleneck residual, False--residual')
tf.app.flags.DEFINE_integer('residual_numbers', 50,
                            'numbers of residual module')
tf.app.flags.DEFINE_integer('cls', 10, 'numbers of classifiers')
tf.app.flags.DEFINE_integer('IMG_WIDTH', 32, 'image width')
tf.app.flags.DEFINE_integer('IMG_HEIGHT', 32, 'image height')
tf.app.flags.DEFINE_integer('IMG_CHANNEL', 3, 'image channel')
tf.app.flags.DEFINE_integer('batch_size', 12, 'batch size')
tf.app.flags.DEFINE_integer('epoches', 1, 'epoches')
tf.app.flags.DEFINE_float('LEARNING_RATE', 0.1,
                          'learning rate for momentum GD')
tf.app.flags.DEFINE_float('MOMENTUM', 0.9, 'momentum rate for momentum GD')
tf.app.flags.DEFINE_string('data_path', 'cifar10-pythonver', 'path of dataset')
tf.app.flags.DEFINE_string('LOG_DIR', 'Log--' + visualize.get_time(),
                           'tensorboard log dir')


def convolution(data, kernel, strides, name, padding='SAME'):
    with tf.name_scope(name):
        conv = tf.nn.conv2d(input=data,
                            filter=kernel,
                            strides=[1, strides, strides, 1],
                            padding=padding,
                            name=name)
        return conv


def pooling(data, ksize, strides, name, padding='VALID'):
    with tf.name_scope(name):
import tensorflow as tf
import six
from tools import visualize


FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('cls', 10, 'numbers of classifiers')
tf.app.flags.DEFINE_integer('IMG_WIDTH', 299, 'image width')
tf.app.flags.DEFINE_integer('IMG_HEIGHT', 299, 'image height')
tf.app.flags.DEFINE_integer('IMG_CHANNEL', 3, 'image channel')
tf.app.flags.DEFINE_integer('batch_size', 5, 'batch size')
tf.app.flags.DEFINE_integer('epoches', 1, 'epoches')
tf.app.flags.DEFINE_float('LEARNING_RATE', 0.0001, 'learning rate for momentum GD')
tf.app.flags.DEFINE_float('MOMENTUM', 0.9, 'momentum rate for momentum GD')
tf.app.flags.DEFINE_string('data_path', '/home/workspace/cc', 'path of dataset')
tf.app.flags.DEFINE_string('LOG_DIR', 'Log--' + visualize.get_time(), 'tensorboard log dir')


def convolution(data, kernel, strides, padding='SAME', name=None):
    with tf.name_scope(name):
        conv = tf.nn.conv2d(input=data,
                            filter=tf.Variable(tf.truncated_normal(shape=kernel, stddev=1e-2, dtype=tf.float32)),
                            strides=[1, strides, strides, 1], padding=padding, name=name)
        return conv


def pooling(data, ksize, strides, padding='SAME', type='max', name=None):
    with tf.name_scope(name):
        if type == 'max':
            pool = tf.nn.max_pool(value=data, ksize=[1, ksize, ksize, 1],
                                  strides=[1, strides, strides, 1], padding=padding, name=name)