def dilated_icpr_rate6(x, is_training, weight_decay, crop_size, num_input_bands, num_classes, extract_features, batch_norm=True): # Reshape input_data picture x = tf.reshape(x, shape=[-1, crop_size, crop_size, num_input_bands]) conv1 = _conv_layer(x, [5, 5, num_input_bands, 64], "conv1", weight_decay, is_training, rate=1, batch_norm=batch_norm, is_normal_conv=False) conv2 = _conv_layer(conv1, [5, 5, 64, 64], 'conv2', weight_decay, is_training, rate=2, batch_norm=batch_norm, is_normal_conv=False) conv3 = _conv_layer(conv2, [4, 4, 64, 128], "conv3", weight_decay, is_training, rate=3, batch_norm=batch_norm, is_normal_conv=False) conv4 = _conv_layer(conv3, [4, 4, 128, 128], "conv4", weight_decay, is_training, rate=4, batch_norm=batch_norm, is_normal_conv=False) conv5 = _conv_layer(conv4, [3, 3, 128, 256], "conv5", weight_decay, is_training, rate=5, batch_norm=batch_norm, is_normal_conv=False) conv6 = _conv_layer(conv5, [3, 3, 256, 256], "conv6", weight_decay, is_training, rate=6, batch_norm=batch_norm, is_normal_conv=False) with tf.compat.v1.variable_scope('conv_classifier') as scope: kernel = _variable_with_weight_decay('weights', shape=[1, 1, 256, num_classes], ini=tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32), weight_decay=weight_decay) biases = _variable_on_cpu('biases', [num_classes], tf.constant_initializer(0.0)) conv = tf.nn.conv2d(conv6, kernel, [1, 1, 1, 1], padding='SAME') conv_classifier = tf.nn.bias_add(conv, biases, name=scope.name) return conv_classifier
def dilated_icpr_rate6_densely(x, is_training, weight_decay, crop_size, num_input_bands, num_classes, extract_features, batch_norm=True): # Reshape input_data picture x = tf.reshape(x, shape=[-1, crop_size, crop_size, num_input_bands]) conv1 = _conv_layer(x, [5, 5, num_input_bands, 32], "conv1", weight_decay, is_training, rate=1, batch_norm=batch_norm) conv2 = _conv_layer(conv1, [5, 5, 32, 32], 'conv2', weight_decay, is_training, rate=2, batch_norm=batch_norm, is_normal_conv=False) try: c1 = tf.concat([conv1, conv2], 3) # c1 = 32+32 = 64 except: c1 = tf.concat(concat_dim=3, values=[conv1, conv2]) conv3 = _conv_layer(c1, [4, 4, 64, 64], "conv3", weight_decay, is_training, rate=3, batch_norm=batch_norm, is_normal_conv=False) try: c2 = tf.concat([c1, conv3], 3) # c2 = 64+64 = 128 except: c2 = tf.concat(concat_dim=3, values=[c1, conv3]) conv4 = _conv_layer(c2, [4, 4, 128, 64], "conv4", weight_decay, is_training, rate=4, batch_norm=batch_norm, is_normal_conv=False) try: c3 = tf.concat([c2, conv4], 3) # c3 = 128+64 = 192 except: c3 = tf.concat(concat_dim=3, values=[c2, conv4]) conv5 = _conv_layer(c3, [3, 3, 192, 128], "conv5", weight_decay, is_training, rate=5, batch_norm=batch_norm, is_normal_conv=False) try: c4 = tf.concat([c3, conv5], 3) # c4 = 192+128 = 320 except: c4 = tf.concat(concat_dim=3, values=[c3, conv5]) conv6 = _conv_layer(c4, [3, 3, 320, 128], "conv6", weight_decay, is_training, rate=6, batch_norm=batch_norm, is_normal_conv=False) try: c5 = tf.concat([c4, conv6], 3) # c5 = 320+256 = 448 except: c5 = tf.concat(concat_dim=3, values=[c4, conv6]) with tf.compat.v1.variable_scope('conv_classifier') as scope: kernel = _variable_with_weight_decay('weights', shape=[1, 1, 448, num_classes], ini=tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32), weight_decay=weight_decay) biases = _variable_on_cpu('biases', [num_classes], tf.constant_initializer(0.0)) conv = tf.nn.conv2d(c5, kernel, [1, 1, 1, 1], padding='SAME') conv_classifier = tf.nn.bias_add(conv, biases, name=scope.name) return conv_classifier
def dilated_grsl_rate8(x, is_training, weight_decay, crop_size, num_input_bands, num_classes, extract_features): # Reshape input_data picture x = tf.reshape(x, shape=[-1, crop_size, crop_size, num_input_bands]) conv1 = _conv_layer(x, [5, 5, num_input_bands, 64], "conv1", weight_decay, is_training, rate=1, activation='lrelu') pool1 = _max_pool(conv1, kernel=[1, 3, 3, 1], strides=[1, 1, 1, 1], name='pool1') conv2 = _conv_layer(pool1, [5, 5, 64, 64], 'conv2', weight_decay, is_training, rate=2, activation='lrelu', is_normal_conv=False) pool2 = _max_pool(conv2, kernel=[1, 3, 3, 1], strides=[1, 1, 1, 1], name='pool2') conv3 = _conv_layer(pool2, [4, 4, 64, 128], 'conv3', weight_decay, is_training, rate=3, activation='lrelu', is_normal_conv=False) pool3 = _max_pool(conv3, kernel=[1, 3, 3, 1], strides=[1, 1, 1, 1], name='pool3') conv4 = _conv_layer(pool3, [4, 4, 128, 128], "conv4", weight_decay, is_training, rate=4, activation='lrelu', is_normal_conv=False) pool4 = _max_pool(conv4, kernel=[1, 3, 3, 1], strides=[1, 1, 1, 1], name='pool4') conv5 = _conv_layer(pool4, [3, 3, 128, 192], "conv5", weight_decay, is_training, rate=5, activation='lrelu', is_normal_conv=False) pool5 = _max_pool(conv5, kernel=[1, 3, 3, 1], strides=[1, 1, 1, 1], name='pool5') conv6 = _conv_layer(pool5, [3, 3, 192, 192], "conv6", weight_decay, is_training, rate=6, activation='lrelu', is_normal_conv=False) pool6 = _max_pool(conv6, kernel=[1, 3, 3, 1], strides=[1, 1, 1, 1], name='pool6') conv7 = _conv_layer(pool6, [3, 3, 192, 256], "conv7", weight_decay, is_training, rate=7, activation='lrelu', is_normal_conv=False) pool7 = _max_pool(conv7, kernel=[1, 3, 3, 1], strides=[1, 1, 1, 1], name='pool7') conv8 = _conv_layer(pool7, [3, 3, 256, 256], "conv8", weight_decay, is_training, rate=8, activation='lrelu', is_normal_conv=False) pool8 = _max_pool(conv8, kernel=[1, 3, 3, 1], strides=[1, 1, 1, 1], name='pool8') with tf.compat.v1.variable_scope('conv_classifier') as scope: kernel = _variable_with_weight_decay('weights', shape=[1, 1, 256, num_classes], ini=tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32), weight_decay=weight_decay) biases = _variable_on_cpu('biases', [num_classes], tf.constant_initializer(0.0)) conv = tf.nn.conv2d(pool8, kernel, [1, 1, 1, 1], padding='SAME') conv_classifier = tf.nn.bias_add(conv, biases, name=scope.name) if extract_features is True: return [tf.image.resize_bilinear(conv1, [32, 32]), 64], \ [tf.image.resize_bilinear(pool5, [32, 32]), 192], \ [tf.image.resize_bilinear(pool8, [32, 32]), 256], conv_classifier else: return conv_classifier
def unet(x, dropout, is_training, weight_decay, crop, num_input_bands, num_classes, crop_size, extract_features): x = tf.reshape(x, shape=[-1, crop, crop, num_input_bands]) conv1_1 = _conv_layer(x, [3, 3, num_input_bands, 64], "conv1_1", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') conv1_2 = _conv_layer(conv1_1, [3, 3, 64, 64], "conv1_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') s_pool1 = crop_size pool1 = _max_pool(conv1_2, [1, 2, 2, 1], [1, 2, 2, 1], 'pool1', pad='SAME') conv2_1 = _conv_layer(pool1, [3, 3, 64, 128], "conv2_1", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') conv2_2 = _conv_layer(conv2_1, [3, 3, 128, 128], "conv2_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') s_pool2 = math.ceil(s_pool1 / float(2)) # s_pool2 = math.ceil(float(s_pool1 - 2 + 1) / float(2)) pool2 = _max_pool(conv2_2, [1, 2, 2, 1], [1, 2, 2, 1], 'pool2', pad='SAME') conv3_1 = _conv_layer(pool2, [3, 3, 128, 256], "conv3_1", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') conv3_2 = _conv_layer(conv3_1, [3, 3, 256, 256], "conv3_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') s_pool3 = math.ceil(s_pool2 / float(2)) # s_pool3 = math.ceil(float(s_pool2 - 2 + 1) / float(2)) print(s_pool1, s_pool2, s_pool3) # pool3 = _max_pool(conv3_2, [1, 2, 2, 1], [1, 2, 2, 1], 'pool3', pad='SAME') # # conv4_1 = _conv_layer(pool2, [3, 3, 128, 256], "conv3_1", weight_decay, is_training, # strides=[1, 1, 1, 1], pad='SAME') # conv4_2 = _conv_layer(conv3_1, [3, 3, 256, 256], "conv3_2", weight_decay, is_training, # strides=[1, 1, 1, 1], pad='SAME') # ------------------------End of encoder----------------------------- new_shape = [ tf.shape(conv3_2)[0], tf.shape(conv3_2)[1] * 2, tf.shape(conv3_2)[2] * 2, tf.shape(conv3_2)[3] // 2 ] try: output_shape = tf.pack(new_shape) except: output_shape = tf.stack(new_shape) deconv2_1 = _deconv_layer(conv3_2, [2, 2, 128, 256], output_shape, 'deconv2_1', weight_decay, [1, 2, 2, 1], pad='SAME', has_bias=True) # deconv2_2 = _crop_and_concat(conv2_2, deconv2_1, (s_pool2, s_pool2), (s_pool3*2, s_pool3*2)) deconv2_2 = tf.concat(values=[conv2_2, deconv2_1], axis=-1) deconv2_3 = _conv_layer(deconv2_2, [3, 3, 256, 128], "deconv2_3", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv2_4 = _conv_layer(deconv2_3, [3, 3, 128, 128], "deconv2_4", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') new_shape = [ tf.shape(deconv2_4)[0], tf.shape(deconv2_4)[1] * 2, tf.shape(deconv2_4)[2] * 2, tf.shape(deconv2_4)[3] // 2 ] try: output_shape = tf.pack(new_shape) except: output_shape = tf.stack(new_shape) deconv1_1 = _deconv_layer(deconv2_4, [2, 2, 64, 128], output_shape, 'deconv1_1', weight_decay, [1, 2, 2, 1], pad='SAME', has_bias=True) # deconv1_2 = _crop_and_concat(conv1_2, deconv1_1, (s_pool2, s_pool2), (s_pool2, s_pool2)) deconv1_2 = tf.concat(values=[conv1_2, deconv1_1], axis=-1) deconv1_3 = _conv_layer(deconv1_2, [3, 3, 128, 64], "deconv1_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv1_4 = _conv_layer(deconv1_3, [3, 3, 64, 64], "deconv1_3", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') with tf.variable_scope('conv_classifier') as scope: kernel = _variable_with_weight_decay( 'weights', shape=[1, 1, 64, num_classes], ini=tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32), weight_decay=weight_decay) biases = _variable_on_cpu('biases', [num_classes], tf.constant_initializer(0.0)) conv = tf.nn.conv2d(deconv1_4, kernel, [1, 1, 1, 1], padding='SAME') conv_classifier = tf.nn.bias_add(conv, biases, name=scope.name) # resize to 32x32 or 64x64 # save only the first 64 maps if extract_features is True: return [tf.image.resize_bilinear(conv1_1, [32, 32]), 64], \ [tf.image.resize_bilinear(conv3_2, [32, 32]), 256], \ [tf.image.resize_bilinear(deconv1_4, [32, 32]), 64], conv_classifier else: return conv_classifier
def unet_road_detection(x, dropout, is_training, weight_decay, crop, num_input_bands, num_classes, crop_size, extract_features): x = tf.reshape(x, shape=[-1, crop, crop, num_input_bands]) conv1_1 = _conv_layer(x, [3, 3, num_input_bands, 64], "conv1_1", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME', activation='elu') conv1_2 = _conv_layer(conv1_1, [3, 3, 64, 64], "conv1_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME', activation='elu') s_pool1 = crop_size pool1 = _max_pool(conv1_2, [1, 2, 2, 1], [1, 2, 2, 1], 'pool1', pad='SAME') conv2_1 = _conv_layer(pool1, [3, 3, 64, 128], "conv2_1", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME', activation='elu') conv2_2 = _conv_layer(conv2_1, [3, 3, 128, 128], "conv2_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME', activation='elu') s_pool2 = math.ceil(s_pool1 / float(2)) # s_pool2 = math.ceil(float(s_pool1 - 2 + 1) / float(2)) pool2 = _max_pool(conv2_2, [1, 2, 2, 1], [1, 2, 2, 1], 'pool2', pad='SAME') conv3_1 = _conv_layer(pool2, [3, 3, 128, 256], "conv3_1", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME', activation='elu') conv3_2 = _conv_layer(conv3_1, [3, 3, 256, 256], "conv3_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME', activation='elu') s_pool3 = math.ceil(s_pool2 / float(2)) # s_pool3 = math.ceil(float(s_pool2 - 2 + 1) / float(2)) pool3 = _max_pool(conv3_2, [1, 2, 2, 1], [1, 2, 2, 1], 'pool3', pad='SAME') conv4_1 = _conv_layer(pool3, [3, 3, 256, 512], "conv4_1", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME', activation='elu') conv4_2 = _conv_layer(conv4_1, [3, 3, 512, 512], "conv4_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME', activation='elu') # s_pool4 = math.ceil(s_pool3 / float(2)) # pool4 = _max_pool(conv4_2, [1, 2, 2, 1], [1, 2, 2, 1], 'pool4', pad='SAME') # # conv5_1 = _conv_layer(pool4, [3, 3, 512, 1024], "conv5_1", weight_decay, is_training, # strides=[1, 1, 1, 1], pad='SAME', activation='elu') # conv5_2 = _conv_layer(conv5_1, [3, 3, 1024, 1024], "conv5_2", weight_decay, is_training, # strides=[1, 1, 1, 1], pad='SAME', activation='elu') # ---------------------------------End of encoder---------------------------------- aspp = atrous_spatial_pyramid_pooling(conv4_2, [6, 12, 18], weight_decay, is_training) # deconv4_1 = _deconv_layer(aspp, [2, 2, 512, 1024], _get_shape(conv5_2), 'deconv4_1', weight_decay, # [1, 2, 2, 1], pad='SAME', has_bias=True) # # deconv4_2 = _crop_and_concat(conv2_2, deconv2_1, (s_pool2, s_pool2), (s_pool3*2, s_pool3*2)) # deconv4_2 = tf.concat(values=[conv4_2, deconv4_1], axis=-1) # deconv4_3 = _conv_layer(deconv4_2, [3, 3, 1024, 512], "deconv4_3", weight_decay, is_training, # strides=[1, 1, 1, 1], pad='SAME') # deconv4_4 = _conv_layer(deconv4_3, [3, 3, 512, 512], "deconv4_4", weight_decay, is_training, # strides=[1, 1, 1, 1], pad='SAME') deconv3_1 = _deconv_layer(aspp, [2, 2, 256, 512], _get_shape(conv4_2), 'deconv3_1', weight_decay, [1, 2, 2, 1], pad='SAME', has_bias=True) # deconv3_2 = _crop_and_concat(conv2_2, deconv2_1, (s_pool2, s_pool2), (s_pool3*2, s_pool3*2)) deconv3_2 = tf.concat(values=[conv3_2, deconv3_1], axis=-1) deconv3_3 = _conv_layer(deconv3_2, [3, 3, 512, 256], "deconv3_3", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv3_4 = _conv_layer(deconv3_3, [3, 3, 256, 256], "deconv3_4", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv2_1 = _deconv_layer(deconv3_4, [2, 2, 128, 256], _get_shape(conv3_2), 'deconv2_1', weight_decay, [1, 2, 2, 1], pad='SAME', has_bias=True) # deconv2_2 = _crop_and_concat(conv2_2, deconv2_1, (s_pool2, s_pool2), (s_pool3*2, s_pool3*2)) deconv2_2 = tf.concat(values=[conv2_2, deconv2_1], axis=-1) deconv2_3 = _conv_layer(deconv2_2, [3, 3, 256, 128], "deconv2_3", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv2_4 = _conv_layer(deconv2_3, [3, 3, 128, 128], "deconv2_4", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv1_1 = _deconv_layer(deconv2_4, [2, 2, 64, 128], _get_shape(deconv2_4), 'deconv1_1', weight_decay, [1, 2, 2, 1], pad='SAME', has_bias=True) # deconv1_2 = _crop_and_concat(conv1_2, deconv1_1, (s_pool2, s_pool2), (s_pool2, s_pool2)) deconv1_2 = tf.concat(values=[conv1_2, deconv1_1], axis=-1) deconv1_3 = _conv_layer(deconv1_2, [3, 3, 128, 64], "deconv1_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv1_4 = _conv_layer(deconv1_3, [3, 3, 64, 64], "deconv1_3", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') with tf.variable_scope('conv_classifier') as scope: kernel = _variable_with_weight_decay( 'weights', shape=[1, 1, 64, num_classes], ini=tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32), weight_decay=weight_decay) biases = _variable_on_cpu('biases', [num_classes], tf.constant_initializer(0.0)) conv = tf.nn.conv2d(deconv1_4, kernel, [1, 1, 1, 1], padding='SAME') conv_classifier = tf.nn.bias_add(conv, biases, name=scope.name) return conv_classifier
def segnet(x, dropout, is_training, weight_decay, crop, num_input_bands, num_classes, crop_size, extract_features): x = tf.reshape(x, shape=[-1, crop, crop, num_input_bands]) batch_size = tf.shape(x)[0] norm1 = tf.nn.local_response_normalization(x, depth_radius=5, bias=1.0, alpha=0.0001, beta=0.75, name='norm1') conv1_1 = _conv_layer(norm1, [3, 3, num_input_bands, 64], "conv1_1", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') conv1_2 = _conv_layer(conv1_1, [3, 3, 64, 64], "conv1_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') h_pool1, w_pool1 = crop_size, crop_size pool1, pool1_index, shape_1 = _max_pool_with_argmax(conv1_2, [1, 2, 2, 1], [1, 2, 2, 1], 'pool1', pad='SAME') conv2_1 = _conv_layer(pool1, [3, 3, 64, 128], "conv2_1", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') conv2_2 = _conv_layer(conv2_1, [3, 3, 128, 128], "conv2_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') h_pool2, w_pool2 = math.ceil(h_pool1 / float(2)), math.ceil(w_pool1 / float(2)) pool2, pool2_index, shape_2 = _max_pool_with_argmax(conv2_2, [1, 2, 2, 1], [1, 2, 2, 1], 'pool2', pad='SAME') conv3_1 = _conv_layer(pool2, [3, 3, 128, 256], "conv3_1", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') conv3_2 = _conv_layer(conv3_1, [3, 3, 256, 256], "conv3_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') h_pool3, w_pool3 = math.ceil(h_pool2 / float(2)), math.ceil(w_pool2 / float(2)) pool3, pool3_index, shape_3 = _max_pool_with_argmax(conv3_2, [1, 2, 2, 1], [1, 2, 2, 1], 'pool3', pad='SAME') # ------------------------End of encoder----------------------------- decoder_dropout3 = tf.layers.dropout(pool3, rate=(1 - dropout), name="decoder_dropout3") deconv3_1 = _up_pooling(decoder_dropout3, pool3_index, shape_3, h_pool3, w_pool3, batch_size, name="unpool_3") deconv3_2 = _conv_layer(deconv3_1, [3, 3, 256, 256], "deconv3_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv3_3 = _conv_layer(deconv3_2, [3, 3, 256, 256], "deconv3_3", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv3_4 = _conv_layer(deconv3_3, [3, 3, 256, 128], "deconv3_4", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') decoder_dropout2 = tf.layers.dropout(deconv3_4, rate=(1 - dropout), name="decoder_dropout2") deconv2_1 = _up_pooling(decoder_dropout2, pool2_index, shape_2, h_pool2, w_pool2, batch_size, name="unpool_2") deconv2_2 = _conv_layer(deconv2_1, [3, 3, 128, 128], "deconv2_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv2_3 = _conv_layer(deconv2_2, [3, 3, 128, 64], "deconv2_3", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv1_1 = _up_pooling(deconv2_3, pool1_index, shape_1, h_pool1, w_pool1, batch_size, name="unpool_1") deconv1_2 = _conv_layer(deconv1_1, [3, 3, 64, 64], "deconv1_2", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') deconv1_3 = _conv_layer(deconv1_2, [3, 3, 64, 64], "deconv1_3", weight_decay, is_training, strides=[1, 1, 1, 1], pad='SAME') with tf.variable_scope('conv_classifier') as scope: kernel = _variable_with_weight_decay( 'weights', shape=[1, 1, 64, num_classes], ini=tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32), weight_decay=weight_decay) biases = _variable_on_cpu('biases', [num_classes], tf.constant_initializer(0.0)) conv = tf.nn.conv2d(deconv1_3, kernel, [1, 1, 1, 1], padding='SAME') conv_classifier = tf.nn.bias_add(conv, biases, name=scope.name) if extract_features is True: return [tf.image.resize_bilinear(conv1_1, [32, 32]), 64], \ [tf.image.resize_bilinear(conv3_2, [32, 32]), 256], \ [tf.image.resize_bilinear(deconv1_3, [32, 32]), 64], conv_classifier else: return conv_classifier
def pixelwise(x, dropout, is_training, weight_decay, crop, num_input_bands, num_classes, extract_features): x = tf.reshape(x, shape=[-1, crop, crop, num_input_bands]) conv1 = _conv_layer(x, [5, 5, num_input_bands, 64], "conv1", weight_decay, is_training, batch_norm=True) pool1 = _max_pool(conv1, kernel=[1, 2, 2, 1], strides=[1, 2, 2, 1], name='pool1') conv2 = _conv_layer(pool1, [4, 4, 64, 128], "conv2", weight_decay, is_training, batch_norm=True) pool2 = _max_pool(conv2, kernel=[1, 2, 2, 1], strides=[1, 2, 2, 1], name='pool2') conv3 = _conv_layer(pool2, [3, 3, 128, 256], "conv3", weight_decay, is_training, batch_norm=True) pool3 = _max_pool(conv3, kernel=[1, 2, 2, 1], strides=[1, 2, 2, 1], name='pool3') reshape = tf.reshape(pool3, [-1, 4 * 4 * 256]) drop_fc1 = tf.nn.dropout(reshape, dropout) fc1 = _fc_layer(drop_fc1, [4 * 4 * 256, 1024], weight_decay, 'fc1', batch_norm=True, is_training=is_training, activation='relu') drop_fc2 = tf.nn.dropout(fc1, dropout) fc2 = _fc_layer(drop_fc2, [1024, 1024], weight_decay, 'fc2', batch_norm=True, is_training=is_training, activation='relu') # Output, class prediction with tf.variable_scope('fc3_logits') as scope: weights = _variable_with_weight_decay( 'weights', [1024, num_classes], ini=tf.contrib.layers.xavier_initializer(dtype=tf.float32), weight_decay=weight_decay) biases = _variable_on_cpu('biases', [num_classes], tf.constant_initializer(0.1)) logits = tf.add(tf.matmul(fc2, weights), biases, name=scope.name) return logits