def up_conv(self, x): num_out_channels = get_num_channels(x) // 2 x = deconv_2d(inputs=x, filter_size=2, num_filters=num_out_channels, layer_name='conv_up', stride=2, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl) return x
def down_conv(self, x): num_out_channels = get_num_channels(x) * 2 x = conv_2d(inputs=x, filter_size=2, num_filters=num_out_channels, layer_name='conv_down', stride=2, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl, keep_prob=self.keep_prob_pl, activation=self.act_fcn) return x
def down_conv(self, x): num_out_channels = get_num_channels(x) x = BN_Relu_conv_2d(inputs=x, filter_size=1, num_filters=num_out_channels, layer_name='conv_down', stride=1, add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl, use_relu=True) x = tf.nn.dropout(x, self.keep_prob_pl) x = max_pool(x, self.conf.pool_filter_size, stride=2, name='maxpool') return x
def conv_block_up(self, layer_input, fine_grained_features, num_convolutions): x = tf.concat((layer_input, fine_grained_features), axis=-1) n_channels = get_num_channels(layer_input) for i in range(num_convolutions): x = conv_2d(inputs=x, filter_size=self.k_size, num_filters=n_channels, layer_name='conv_' + str(i + 1), add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl, keep_prob=self.keep_prob_pl, dropconnect=True) if i == num_convolutions - 1: x = x + layer_input x = self.act_fcn(x, name='prelu_' + str(i + 1)) return x
def conv_block_down(self, layer_input, num_convolutions): x = layer_input n_channels = get_num_channels(x) if n_channels == 1: n_channels = self.conf.start_channel_num for i in range(num_convolutions): x = conv_2d(inputs=x, filter_size=self.k_size, num_filters=n_channels, layer_name='conv_' + str(i + 1), add_batch_norm=self.conf.use_BN, is_train=self.is_training_pl, keep_prob=self.keep_prob_pl, dropconnect=True) if i == num_convolutions - 1: x = x + layer_input x = self.act_fcn(x, name='prelu_' + str(i + 1)) return x