def _expanding_block(self, block_num, _input, layer, option='concat'): output = _conv2d( _input, kernel_size=1, stride=1, output_feature=self.output_channels[str(block_num)][0], use_bias=True, name='conv_1') #print('ex1',output) output = deconv_bn_prelu( output, output_channels=self.output_channels[str(block_num)][1], is_training=self.is_training, name='deconv') #print('de1',output) if option == 'sum': output = tf.add(output, layer, name='elemwise_sum') else: output = tf.concat(values=(output, layer), axis=-1, name='concat') # 26*50*40*64 / 52*100*80*32 / 104*200*160*16 output = conv_bn_prelu(output, kernel_size=3, stride=1, output_channels=int(output.get_shape()[-1]), name='conv_2', is_training=self.is_training) #print('out1',output) output = conv_bn_prelu(output, kernel_size=3, stride=1, output_channels=int(output.get_shape()[-1]), name='conv_3', is_training=self.is_training) #print('out2',output) return output
def _expanding_block(self, block_num, _input, layer, option='concat'): # 13*25*20*32 / 26*50*40*32 / 52*100*80*16 #output = conv_bn_prelu(_input, kernel_size=1, stride=1, output_channels=self.output_channels[str(block_num)][0], # name='conv_1', is_training=self.is_training) output = _conv3d( _input, kernel_size=1, stride=1, output_feature=self.output_channels[str(block_num)][0], use_bias=True, name='conv_1') output = deconv_bn_prelu( output, output_channels=self.output_channels[str(block_num)][1], is_training=self.is_training, name='deconv') # 26*50*40*32 / 52*100*80*16 / 104*200*160*8 # 26*50*40*64 / 52*100*80*32 / 104*200*160*16 output = tf.concat(values=(output, layer), axis=4, name='concat') # 26*50*40*64 / 52*100*80*32 / 104*200*160*16 output = conv_bn_prelu(output, kernel_size=3, stride=1, output_channels=int(output.get_shape()[-1]), name='conv_2', is_training=self.is_training) output = conv_bn_prelu(output, kernel_size=3, stride=1, output_channels=int(output.get_shape()[-1]), name='conv_3', is_training=self.is_training) print(output) return output