# Placeholders X = tf.placeholder_with_default(X_def, shape=[None, 288, 288, 1]) y = tf.placeholder_with_default(y_def, shape=[None, 288, 288, 1]) # cast to float and scale input data X_adj = tf.cast(X, dtype=tf.float32) X_adj = _scale_input_data(X_adj, contrast=contrast, mu=127.0, scale=255.0) # optional online data augmentation if distort: X_adj, y = augment(X_adj, y, horizontal_flip=True, augment_labels=True, vertical_flip=True, mixup=0) # Convolutional layer 1 with tf.name_scope('conv1') as scope: conv1 = tf.layers.conv2d( X_adj, # Input data filters=32, # 32 filters kernel_size=(3, 3), # Kernel size: 5x5 strides=(2, 2), # Stride: 2 padding='SAME', # "same" padding activation=None, # None kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2, seed=100), kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lamC),
with tf.device('/cpu:0'): image, label = read_and_decode_single_example(train_files, label_type=how, normalize=False, distort=False) X_def, y_def = tf.train.shuffle_batch([image, label], batch_size=batch_size, capacity=2000, seed=None, min_after_dequeue=1000) # Placeholders X = tf.placeholder_with_default(X_def, shape=[None, 288, 288, 1]) y = tf.placeholder_with_default(y_def, shape=[None, 288, 288, 1]) X_fl = tf.cast(X, tf.float32) # optional online data augmentation if distort: X_dis, y_adj = augment(X_fl, y, horizontal_flip=True, augment_labels=True, vertical_flip=True, mixup=0) else: y_adj = y X_dis = X_fl # cast to float and scale input data X_adj = _scale_input_data(X_dis, contrast=contrast, mu=127.0, scale=255.0) # Convolutional layer 1 with tf.name_scope('conv1') as scope: conv1 = tf.layers.conv2d( X_adj, # Input data filters=32, # 32 filters kernel_size=(3, 3), # Kernel size: 5x5 strides=(2, 2), # Stride: 2 padding='SAME', # "same" padding