training=training, name='bn5') # apply relu conv5_bn_relu = tf.nn.relu(conv5, name='relu5') if stop: conv5_bn_relu = tf.stop_gradient(conv5_bn_relu, name="pool5_freeze") # print("conv5_bn_relu", conv5_bn_relu.shape) fc1 = _conv2d_batch_norm(conv5_bn_relu, 2048, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", seed=1013, lambd=lamC, name="fc_1") # print("fc1", fc1.shape) with tf.name_scope('unpool1') as scope: unpool1 = tf.layers.conv2d_transpose( fc1, filters=256, kernel_size=(4, 4), strides=(2, 2), padding='SAME', activation=tf.nn.elu,
X = tf.cast(X, dtype=tf.float32) # center the pixel data mu = tf.constant(mu, name="pixel_mean") X = tf.subtract(X, mu, name="centered_input") # scale the input # X = tf.divide(X, 255.0) # Convolutional layer 1 conv0 = _conv2d_batch_norm(X, 64, kernel_size=(3, 3), stride=(2, 2), training=training, epsilon=1e-8, padding="VALID", seed=100, lambd=lamC, name="1.1") ################################## ## Branch 1 conv1_res = _conv2d_batch_norm(conv0, 32, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", seed=101,
with tf.name_scope('inputs') as scope: image, label = read_and_decode_single_example(train_files, label_type=how, normalize=False) X_def, y_def = tf.train.shuffle_batch([image, label], batch_size=batch_size, capacity=2000, min_after_dequeue=1000) # Placeholders X = tf.placeholder_with_default(X_def, shape=[None, 299, 299, 1]) y = tf.placeholder_with_default(y_def, shape=[None]) # increase the contrast and cast to float X_adj = _scale_input_data(X, contrast=contrast, mu=mu) # Convolutional layer 1 conv1 = _conv2d_batch_norm(X_adj, 64, kernel_size=(3,3), stride=(1,1), training=training, epsilon=1e-8, padding="VALID", seed=100, lambd=lamC, name="1.1") conv1 = _conv2d_batch_norm(conv1, 64, kernel_size=(3, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", seed=100, lambd=lamC, name="1.2") # Max pooling layer 1 with tf.name_scope('pool1') as scope: pool1 = tf.layers.max_pooling2d( conv1, # Input pool_size=(2, 2), # Pool size: 2x2 strides=(2, 2), # Stride: 2 padding='SAME', # "same" padding name='pool1' ) # optional dropout if dropout: pool1 = tf.layers.dropout(pool1, rate=pooldropout_rate, seed=103, training=training)
with tf.name_scope('pool5') as scope: pool5 = tf.layers.max_pooling2d( conv5_bn_relu, pool_size=(2, 2), # Pool size: 2x2 strides=(2, 2), # Stride: 2 padding='SAME', name='pool5' ) if dropout: pool5 = tf.layers.dropout(pool5, rate=pooldropout_rate, seed=115, training=training) if stop: pool5 = tf.stop_gradient(pool5, name="pool5_freeze") fc1 = _conv2d_batch_norm(pool5, 2048, kernel_size=(5, 5), stride=(5, 5), training=training, epsilon=1e-8, padding="VALID", seed=1013, lambd=lamC, name="fc_1") fc2 = _conv2d_batch_norm(fc1, 2048, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="VALID", seed=1014, lambd=lamC, name="fc_2") with tf.name_scope('up_conv1') as scope: unpool1 = tf.layers.conv2d_transpose( fc2, filters=1024, kernel_size=(3, 3), strides=(3, 3), padding='SAME', activation=tf.nn.elu, kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2, seed=11435), kernel_regularizer=None, name='up_conv1'
def _stem(input, lamC=0.0, training=tf.placeholder(dtype=tf.bool, name="is_training")): conv1 = _conv2d_batch_norm(input, 32, kernel_size=(3, 3), stride=(2, 2), training=training, epsilon=1e-8, padding="VALID", seed=100, lambd=lamC, name="stem_1.1") conv1 = _conv2d_batch_norm(conv1, 32, kernel_size=(3, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="VALID", lambd=lamC, name="stem_1.2") conv1 = _conv2d_batch_norm(conv1, 64, kernel_size=(3, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name="stem_1.3") # Stem Reduce 1 with tf.name_scope('stem_pool1') as scope: pool1 = tf.layers.max_pooling2d( conv1, # Input pool_size=(3, 3), # Pool size: 2x2 strides=(2, 2), # Stride: 2 padding='VALID', # "same" padding name='stem_pool1') conv1 = _conv2d_batch_norm(conv1, 32, kernel_size=(3, 3), stride=(2, 2), training=training, epsilon=1e-8, padding="VALID", lambd=lamC, name="stem_pool1.1") # concat 1 with tf.name_scope("stem_concat1") as scope: concat1 = tf.concat([pool1, conv1], axis=3, name='stem_concat1') # Stem branch 1 conv11 = _conv2d_batch_norm(concat1, 48, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name="stem_1.1.1") conv11 = _conv2d_batch_norm(conv11, 64, kernel_size=(3, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="VALID", lambd=lamC, name="stem_1.1.2") # Stem Branch 2 conv12 = _conv2d_batch_norm(concat1, 48, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name="stem_1.2.1") conv12 = _conv2d_batch_norm(conv12, 64, kernel_size=(7, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name="stem_1.2.2") conv12 = _conv2d_batch_norm(conv12, 64, kernel_size=(1, 7), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name="stem_1.2.3") conv12 = _conv2d_batch_norm(conv12, 64, kernel_size=(3, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="VALID", lambd=lamC, name="stem_1.2.4") # concat 2 with tf.name_scope("stem_concat2") as scope: concat2 = tf.concat([conv11, conv12], axis=3, name='stem_concat2') # Stem Reduce 2 with tf.name_scope('stem_pool2') as scope: pool2 = tf.layers.max_pooling2d( concat2, # Input pool_size=(3, 3), # Pool size: 2x2 strides=(2, 2), # Stride: 2 padding='VALID', # "same" padding name='stem_pool2') conv13 = _conv2d_batch_norm(concat2, 128, kernel_size=(3, 3), stride=(2, 2), training=training, epsilon=1e-8, padding="VALID", lambd=lamC, name="stem_pool2.1") # concat 3 with tf.name_scope("stem_concat3") as scope: concat3 = tf.concat([pool2, conv13], axis=3, name='stem_concat3') return concat3
def _reduce_b(input, name, training=tf.placeholder(dtype=tf.bool, name="is_training"), lamC=0.0): # branch 1 with tf.name_scope(name + "reduce_b_branch_1") as scope: branch1 = tf.layers.max_pooling2d( input, # Input pool_size=(3, 3), # Pool size: 2x2 strides=(2, 2), # Stride: 2 padding='VALID', # "same" padding name=name + 'b_reduce_branch_1') # branch 2 branch2 = _conv2d_batch_norm(input, 96, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "b_reduce_branch_2") branch2 = _conv2d_batch_norm(branch2, 96, kernel_size=(3, 3), stride=(2, 2), training=training, epsilon=1e-8, padding="VALID", lambd=lamC, name=name + "b_reduce_branch_2.1") # Branch 3 branch3 = _conv2d_batch_norm(input, 128, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "b_reduce_branch_3.1") branch3 = _conv2d_batch_norm(branch3, 128, kernel_size=(1, 7), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "b_reduce_branch_3.2") branch3 = _conv2d_batch_norm(branch3, 192, kernel_size=(7, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "b_reduce_branch_3.3") branch3 = _conv2d_batch_norm(branch3, 192, kernel_size=(3, 3), stride=(2, 2), training=training, epsilon=1e-8, padding="VALID", lambd=lamC, name=name + "b_reduce_branch_3.4") # concat 1 with tf.name_scope(name + "b_reduce_concat_1") as scope: concat1 = tf.concat([branch1, branch2, branch3], axis=3, name=name + 'b_reduce_concat') return concat1
gamma_initializer=tf.ones_initializer(), moving_mean_initializer=tf.zeros_initializer(), moving_variance_initializer=tf.ones_initializer(), training=training, fused=True, name='bn_up_conv4' ) if dropout: unpool4 = tf.layers.dropout(unpool4, rate=convdropout_rate, seed=14537, training=training) # activation unpool4 = tf.nn.relu(unpool4, name='relu10') # conv layer - 160x160x64 uconv5 = _conv2d_batch_norm(unpool4, 64, kernel_size=(3, 3), stride=(1, 1), training=training, lambd=lamC, name="up_conv5", activation="relu") # resize to 320x320x64 with tf.name_scope('resize_6') as scope: unpool6 = tf.image.resize_images(uconv5, size=[size // 2, size // 2], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) # 320x320x32 uconv5 = _conv2d_batch_norm(unpool6, 32, kernel_size=(3, 3), stride=(1, 1), training=training, lambd=lamC, name="up_conv6", activation="relu") # 320x320x32 uconv6 = _conv2d_batch_norm(uconv5, 32, kernel_size=(3, 3), stride=(1, 1), training=training, lambd=lamC, name="up_conv7", activation="relu") # 640x640x16 with tf.name_scope('upsample_4') as scope:
def _block_c(input, name, lamC=0.0, training=tf.placeholder(dtype=tf.bool, name="is_training")): ## Branch 1 - average pool and 1x1 conv with tf.name_scope(name + "b_branch_1_pool") as scope: branch1 = tf.layers.average_pooling2d( input, # Input pool_size=(2, 2), # Pool size: 2x2 strides=(1, 1), # Stride: 2 padding='SAME', # "same" padding name=name + "c_branch_1_pool") branch1 = _conv2d_batch_norm(branch1, 128, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "c_branch_1_conv_1.0") ## Branch 2 - 1x1 conv branch2 = _conv2d_batch_norm(input, 128, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "c_branch_2_conv_1.0") ## Branch 3 branch3 = _conv2d_batch_norm(input, 192, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "c_branch_3_conv_1.0") branch31 = _conv2d_batch_norm(branch3, 128, kernel_size=(1, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "c_branch_3_conv_1.1") branch32 = _conv2d_batch_norm(branch3, 128, kernel_size=(3, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "c_branch_3_conv_1.2") ## Branch 4 branch4 = _conv2d_batch_norm(input, 192, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "c_branch_4_conv_1.0") branch4 = _conv2d_batch_norm(branch4, 256, kernel_size=(1, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "c_branch_4_conv_1.1") branch4 = _conv2d_batch_norm(branch4, 256, kernel_size=(3, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "c_branch_4_conv_1.2") branch41 = _conv2d_batch_norm(branch4, 128, kernel_size=(1, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "c_branch_4_conv_1.3") branch42 = _conv2d_batch_norm(branch4, 128, kernel_size=(3, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", lambd=lamC, name=name + "c_branch_4_conv_1.4") # concat 1 with tf.name_scope(name + "b_concat_1") as scope: concat1 = tf.concat( [branch1, branch2, branch31, branch32, branch41, branch42], axis=3, name=name + 'b_concat_1') return concat1
name='pool5') if dropout: pool5 = tf.layers.dropout(pool5, rate=pooldropout_rate, seed=115, training=training) if stop: pool5 = tf.stop_gradient(pool5, name="pool5_freeze") fc1 = _conv2d_batch_norm(pool5, 2048, kernel_size=(5, 5), stride=(5, 5), training=training, epsilon=1e-8, padding="VALID", seed=1013, lambd=lamC, name="fc_1") fc1 = tf.layers.dropout(fc1, rate=fcdropout_rate, seed=11537, training=training) fc2 = _conv2d_batch_norm(fc1, 2048, kernel_size=(1, 1), stride=(1, 1), training=training,
batch_size=batch_size, capacity=2000, min_after_dequeue=1000) # Placeholders X = tf.placeholder_with_default(X_def, shape=[None, 299, 299, 1]) y = tf.placeholder_with_default(y_def, shape=[None]) X = tf.cast(X, dtype=tf.float32) # Convolutional layer 1 conv1 = _conv2d_batch_norm(X, 32, kernel_size=(3, 3), stride=(2, 2), training=training, epsilon=1e-8, padding="VALID", seed=100, lambd=lamC, name="1.1") conv1 = _conv2d_batch_norm(conv1, 32, kernel_size=(3, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", seed=None, lambd=lamC, name="1.2")
X_def, y_def = tf.train.shuffle_batch([image, label], batch_size=batch_size, capacity=2000, seed=None, min_after_dequeue=1000) # Placeholders X = tf.placeholder_with_default(X_def, shape=[None, 299, 299, 1]) y = tf.placeholder_with_default(y_def, shape=[None]) #X = tf.cast(X, dtype=tf.float32) X_adj = _scale_input_data(X, contrast=contrast, mu=0, scale=255.0) # data augmentation if distort: X_adj, y = augment(X_adj, y, horizontal_flip=True, vertical_flip=True, mixup=0) conv1 = _conv2d_batch_norm(X_adj, 64, kernel_size=(3, 3), stride=(2, 2), training=training, epsilon=1e-8, padding="SAME", seed=1000, lambd=lamC, name="1.0") ############################################################ ## Branch 1 conv11 = _conv2d_batch_norm(conv1, 32, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", seed=1001, lambd=lamC, name="1.1.1") conv11 = _conv2d_batch_norm(conv11, 32, kernel_size=(3, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", seed=1002, lambd=lamC, name="1.1.2") conv11 = _conv2d_batch_norm(conv11, 32, kernel_size=(3, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", seed=1003, lambd=lamC, name="1.1.3") ########################################################## ## Branch 2 conv12 = _conv2d_batch_norm(conv1, 32, kernel_size=(1, 1), stride=(1, 1), training=training, epsilon=1e-8,
# increase the contrast and cast to float X_adj = tf.image.adjust_contrast(X, 2.0) X_adj = tf.cast(X_adj, dtype=tf.float32) # center the pixel data mu = tf.constant(mu, name="pixel_mean", dtype=tf.float32) X_adj = tf.subtract(X_adj, mu, name="centered_input") # scale the data X_adj = tf.divide(X_adj, 255.0) conv1 = _conv2d_batch_norm(X_adj, 32, kernel_size=(3, 3), stride=(2, 2), training=training, epsilon=1e-8, padding="VALID", seed=100, lambd=lamC, name="1.1") conv1 = _conv2d_batch_norm(conv1, 32, kernel_size=(3, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="VALID", seed=101, lambd=lamC, name="1.2") conv1 = _conv2d_batch_norm(conv1,
y_adj = y X_dis = X_fl # cast to float and scale input data X_adj = _scale_input_data(X_dis, contrast=contrast, mu=127.0, scale=255.0) # Convolutional layer 1 - output 320x320 conv1 = _conv2d_batch_norm(X_adj, 32, kernel_size=(3, 3), stride=(2, 2), training=training, epsilon=1e-8, padding="SAME", seed=None, lambd=0.0, name="1.0", activation="relu") conv1 = _conv2d_batch_norm(conv1, 32, kernel_size=(3, 3), stride=(1, 1), training=training, epsilon=1e-8, padding="SAME", seed=None, lambd=0.0,
X_def, y_def = tf.train.shuffle_batch([image, label], batch_size=batch_size, capacity=2000, min_after_dequeue=1000) # Placeholders X = tf.placeholder_with_default(X_def, shape=[None, 299, 299, 1]) y = tf.placeholder_with_default(y_def, shape=[None]) X = tf.cast(X, dtype=tf.float32) # downsize input conv1 = _conv2d_batch_norm(X, filters=32, stride=(2, 2), training=training, padding="SAME", name="1.1") # layer 1 branch 1 conv12 = _conv2d_batch_norm(conv1, filters=32, padding="SAME", training=training, name="1.2") conv12 = _conv2d_batch_norm(conv12, filters=32, padding="SAME", training=training, name="1.3")
staircase=staircase) with tf.name_scope('inputs') as scope: image, label = read_and_decode_single_example(train_files, label_type="label_normal", normalize=False) X_def, y_def = tf.train.shuffle_batch([image, label], batch_size=batch_size, capacity=2000, min_after_dequeue=1000) # Placeholders X = tf.placeholder_with_default(X_def, shape=[None, 299, 299, 1]) y = tf.placeholder_with_default(y_def, shape=[None]) X = tf.cast(X, dtype=tf.float32) # Input stem conv1 = _conv2d_batch_norm(X, filters=32, stride=(1, 1), training=training, padding="VALID", name="1.1") # Max pooling layer 1 with tf.name_scope('pool1') as scope: pool1 = tf.layers.max_pooling2d( conv1, # Input pool_size=(2, 2), strides=(2, 2), padding='VALID', name='pool1' ) # Layer 2 branch 1 conv2 = _conv2d_batch_norm(pool1, filters=48, stride=(1, 1), training=training, padding="SAME", name="2.1") conv2 = _conv2d_batch_norm(conv2, filters=48, stride=(1, 1), training=training, padding="SAME", name="2.2")