with graph.as_default(): training = tf.placeholder(dtype=tf.bool, name="is_training") is_testing = tf.placeholder(dtype=bool, shape=(), name="is_testing") # create global step for decaying learning rate global_step = tf.Variable(0, trainable=False) learning_rate = tf.train.exponential_decay(starting_rate, global_step, steps_per_epoch * epochs_per_decay, decay_factor, staircase=staircase) with tf.name_scope('inputs') as scope: image, label = read_and_decode_single_example(train_files, label_type="label_normal", normalize=False) X_def, y_def = tf.train.shuffle_batch([image, label], batch_size=batch_size, capacity=2000, min_after_dequeue=1000) # Placeholders X = tf.placeholder_with_default(X_def, shape=[None, 299, 299, 1]) y = tf.placeholder_with_default(y_def, shape=[None]) X = tf.cast(X, dtype=tf.float32) # Convolutional layer 1 with tf.name_scope('conv1') as scope: conv1 = tf.layers.conv2d( X, # Input data filters=32, # 32 filters
training = tf.placeholder(dtype=tf.bool, name="is_training") is_testing = tf.placeholder(dtype=bool, shape=(), name="is_testing") # create global step for decaying learning rate global_step = tf.Variable(0, trainable=False) learning_rate = tf.train.exponential_decay(starting_rate, global_step, steps_per_epoch * epochs_per_decay, decay_factor, staircase=staircase) with tf.name_scope('inputs') as scope: image, label = read_and_decode_single_example(train_files, label_type=how, normalize=False, distort=False) X_def, y_def = tf.train.shuffle_batch([image, label], batch_size=batch_size, capacity=2000, seed=None, min_after_dequeue=1000) # Placeholders X = tf.placeholder_with_default(X_def, shape=[None, 288, 288, 1]) y = tf.placeholder_with_default(y_def, shape=[None, 288, 288, 1]) # cast to float and scale input data X_adj = tf.cast(X, dtype=tf.float32) X_adj = _scale_input_data(X_adj,