Example #1
0
import tensorflow as tf
from data import cifar10, utilities

from . import vgg

logger = logging.getLogger(__name__)

# Config:
BATCH_SIZE = 64

# Data:
data_generator = utilities.finite_generator(cifar10.get_test(), BATCH_SIZE)

# Define the model:
n_input = tf.placeholder(tf.float32,
                         shape=cifar10.get_shape_input(),
                         name="input")
n_label = tf.placeholder(tf.int64,
                         shape=cifar10.get_shape_label(),
                         name="label")

# Build the model
n_output = vgg.build(n_input)
accuracy = tf.reduce_sum(
    tf.cast(tf.equal(tf.argmax(n_output, axis=1), n_label), tf.int32))
global_step = tf.Variable(0, trainable=False, name='global_step')

# Model loader
pre_train_saver = tf.train.Saver()
load_pretrain = lambda sess: pre_train_saver.restore(sess, "cnn/train_logs/")
Example #2
0
STEP = 20
DATASET_SIZE = 50000

LEARNING_RATE = 0.0001
OPTIMIZER = tf.train.AdamOptimizer(learning_rate = LEARNING_RATE)

network = 'vgg'
mode = 'normal'
logdir = 'cnn_{}_{}/train_logs/'.format(network,mode)

# Set up training data:
NUM_BATCHES = int(NUM_EPOCHS * DATASET_SIZE / BATCH_SIZE)
data_generator = utilities.infinite_generator(cifar10.get_train(), BATCH_SIZE)

# Define the placeholders:
n_input = tf.placeholder(tf.float32, shape=cifar10.get_shape_input(), name="input")
n_label = tf.placeholder(tf.int64, shape=cifar10.get_shape_label(), name="label")

# Build the model
n_output = vgg.build(n_input)

# Define the loss function
loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=n_output, labels=n_label, name="softmax"))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(n_output, axis=1), n_label), tf.float32))

# Tracking the loss over time
losses = []
batches = []

# Add summaries to track the state of training:
tf.summary.scalar('summary/loss', loss)