def _expanding_block(self, block_num, _input, layer, option='concat'): output = _conv2d( _input, kernel_size=1, stride=1, output_feature=self.output_channels[str(block_num)][0], use_bias=True, name='conv_1') #print('ex1',output) output = deconv_bn_prelu( output, output_channels=self.output_channels[str(block_num)][1], is_training=self.is_training, name='deconv') #print('de1',output) if option == 'sum': output = tf.add(output, layer, name='elemwise_sum') else: output = tf.concat(values=(output, layer), axis=-1, name='concat') # 26*50*40*64 / 52*100*80*32 / 104*200*160*16 output = conv_bn_prelu(output, kernel_size=3, stride=1, output_channels=int(output.get_shape()[-1]), name='conv_2', is_training=self.is_training) #print('out1',output) output = conv_bn_prelu(output, kernel_size=3, stride=1, output_channels=int(output.get_shape()[-1]), name='conv_3', is_training=self.is_training) #print('out2',output) return output
def _expanding_block(self, block_num, _input, layer, option='concat'): # 13*25*20*32 / 26*50*40*32 / 52*100*80*16 #output = conv_bn_prelu(_input, kernel_size=1, stride=1, output_channels=self.output_channels[str(block_num)][0], # name='conv_1', is_training=self.is_training) output = _conv3d( _input, kernel_size=1, stride=1, output_feature=self.output_channels[str(block_num)][0], use_bias=True, name='conv_1') output = deconv_bn_prelu( output, output_channels=self.output_channels[str(block_num)][1], is_training=self.is_training, name='deconv') # 26*50*40*32 / 52*100*80*16 / 104*200*160*8 # 26*50*40*64 / 52*100*80*32 / 104*200*160*16 output = tf.concat(values=(output, layer), axis=4, name='concat') # 26*50*40*64 / 52*100*80*32 / 104*200*160*16 output = conv_bn_prelu(output, kernel_size=3, stride=1, output_channels=int(output.get_shape()[-1]), name='conv_2', is_training=self.is_training) output = conv_bn_prelu(output, kernel_size=3, stride=1, output_channels=int(output.get_shape()[-1]), name='conv_3', is_training=self.is_training) print(output) return output
def _contracting_block(self, block_num, _input, input_layer): # xception contracte blcok output = conv_bn_prelu( _input, kernel_size=3, stride=1, output_channels=self.output_channels[str(block_num)][0], name='conv_1', is_training=self.is_training) output = conv_bn_prelu( output, kernel_size=3, stride=1, output_channels=self.output_channels[str(block_num)][1], name='conv_2', is_training=self.is_training) output = conv_bn_prelu( output, kernel_size=3, stride=2, output_channels=self.output_channels[str(block_num)][2], name='conv_3', is_training=self.is_training) #output = self.feature_pyramid(output,block_num) #output = self.cascade_feature(output, block_num) #print('output',output) # do conv 1 x 1 x 1 before sum #for i in range(block_num): #input_layer = crop_tensor(input_layer, block_num) #input = _conv3d(input, kernel_size=1, stride=1, output_feature=self.output_channels[str(block_num)][2], use_bias=True, name='conv_s2') input = conv_bn_prelu( input_layer, kernel_size=3, stride=self.stride[str(block_num)][0], output_channels=self.output_channels[str(block_num)][2], name='conv_s2', is_training=self.is_training) #input = tf.concat([input, input],axis=-1) #logits_slice = tf.slice(input_layer, [0, half_receptive, half_receptive, half_receptive, 0], #output.get_shape() #print(input) output = tf.add(output, input, name='elemwise_sum') output = self.feature_pyramid(output, block_num) #output = self.cascade_feature(output, block_num) print('output', output) return output
def _contracting_block(self, block_num, _input, input_layer): # xception contracte blcok output = conv_bn_prelu( _input, kernel_size=3, stride=1, output_channels=self.output_channels[str(block_num)][0], name='conv_1', is_training=self.is_training) output = conv_bn_prelu( output, kernel_size=3, stride=1, output_channels=self.output_channels[str(block_num)][1], name='conv_2', is_training=self.is_training) output = conv_bn_prelu( output, kernel_size=3, stride=1, output_channels=self.output_channels[str(block_num)][2], name='conv_3', is_training=self.is_training) #output = self.cascade_feature(output, block_num) # do conv 1 x 1 x 1 before sum #for i in range(block_num): #input_layer = crop_tensor(input_layer, block_num) #input = _conv3d(input, kernel_size=1, stride=1, output_feature=self.output_channels[str(block_num)][2], use_bias=True, name='conv_s2') # input = conv_bn_prelu(_input, kernel_size=3, stride=self.stride[str(block_num)][0], output_channels=self.output_channels[str(block_num)][2], # name='conv_s2', is_training=self.is_training) #output = self.Squeeze_excitation_layer(output,int(output.get_shape()[-1]),self.reduction_ratio,layer_name='SE%d'%block_num) _input = tf.concat([_input, _input], axis=-1, name='concat') output = tf.add(output, _input, name='elemwise_sum') output = conv_bn_prelu(output, kernel_size=3, stride=2, output_channels=int(output.get_shape()[-1]), use_bias=True, name='conv_s2', is_training=self.is_training) #output = self.feature_pyramid(output,block_num) #output = self.cascade_feature(output, block_num) print('output', output) return output
def _contracting_block(self, block_num, _input, input_layer): # xception contracte blcok #with tf.device(device_name_or_function=self.device[0]): # 52*100*80*16 / 26*50*40*32 / 13*25*20*64 output = conv_bn_prelu( _input, kernel_size=3, stride=1, output_channels=self.output_channels[str(block_num)][0], name='conv_1', is_training=self.is_training) # 52*100*80*8 / 26*50*40*32 / 13*25*20*64 # 52*100*80*16 / 26*50*40*32 / 13*25*20*64 output = conv_bn_prelu( output, kernel_size=3, stride=1, output_channels=self.output_channels[str(block_num)][1], name='conv_2', is_training=self.is_training) output = conv_bn_prelu( output, kernel_size=3, stride=2, output_channels=self.output_channels[str(block_num)][2], name='conv_3', is_training=self.is_training) output = self.feature_pyramid(output, block_num) #output = self.cascade_feature(output, block_num) #print('output',output) # do conv 1 x 1 x 1 before sum #for i in range(block_num): #input_layer = crop_tensor(input_layer, block_num) #input = _conv3d(input, kernel_size=1, stride=1, output_feature=self.output_channels[str(block_num)][2], use_bias=True, name='conv_s2') # long skip connection input = conv_bn_prelu( input_layer, kernel_size=3, stride=self.stride[str(block_num)][0], output_channels=self.output_channels[str(block_num)][2], name='conv_s2', is_training=self.is_training) output = tf.add(output, input, name='elemwise_sum') #output = self.feature_pyramid(output,block_num) return output
def _contracting_block(self, block_num, _input): # xception contracte blcok # 52*100*80*16 / 26*50*40*32 / 13*25*20*64 output = conv_bn_prelu( _input, kernel_size=3, stride=1, output_channels=self.output_channels[str(block_num)][0], name='conv_1', is_training=self.is_training) # 52*100*80*8 / 26*50*40*32 / 13*25*20*64 # 52*100*80*16 / 26*50*40*32 / 13*25*20*64 output = conv_bn_prelu( output, kernel_size=3, stride=1, output_channels=self.output_channels[str(block_num)][1], name='conv_2', is_training=self.is_training) output = conv_bn_prelu( output, kernel_size=3, stride=2, output_channels=self.output_channels[str(block_num)][2], name='conv_3', is_training=self.is_training) #output = self.feature_pyramid(output) # do conv 1 x 1 x 1 before sum input = _conv3d(_input, kernel_size=1, stride=2, output_feature=self.output_channels[str(block_num)][2], use_bias=True, name='conv_s2') #input = conv_bn_prelu(_input, kernel_size=1, stride=2, output_channels=self.output_channels[str(block_num)][2], # name='conv_s2', is_training=self.is_training) output = tf.add(output, input, name='elemwise_sum') output = self.feature_pyramid(output, block_num) return output
def _contracting_block(self, block_num, _input, input_layer): dilate_rate = int(2 * (4 - block_num)) # xception contracte blcok #output = atrous_bn_prelu(_input, kernel_size=3, stride=1, output_channels=self.output_channels[str(block_num)][0], #name='conv_1', is_training=self.is_training,dilation_rate=dilate_rate) output = self.cascade_feature(_input, block_num) #output = atrous_bn_prelu(output, kernel_size=3, stride=1, output_channels=self.output_channels[str(block_num)][1], #name='conv_2', is_training=self.is_training,dilation_rate=dilate_rate) output = conv_bn_prelu( output, kernel_size=3, stride=2, output_channels=self.output_channels[str(block_num)][2], name='conv_3', is_training=self.is_training) #output = tf.layers.max_pooling2d(inputs=output, pool_size=2, strides=2, name='pool1') #output = self.feature_pyramid(output,block_num) #output = self.cascade_feature(output, block_num) #print('output',output) # do conv 1 x 1 x 1 before sum #for i in range(block_num): #input_layer = crop_tensor(input_layer, block_num) #input = _conv2d(_input, kernel_size=1, stride=2, output_feature=self.output_channels[str(block_num)][2], use_bias=False, name='conv_s2') input = conv_bn_prelu( input_layer, kernel_size=3, stride=self.stride[str(block_num)][0], output_channels=self.output_channels[str(block_num)][2], name='conv_s2', is_training=self.is_training) #print(input) #output = self.Squeeze_excitation_layer(output, int(output.get_shape()[-1]), 16, layer_name='SE%d' % block_num) output = tf.add(output, input, name='elemwise_sum') #output = self.feature_pyramid(output,block_num) #print('output',output) return output
def inference_op(self, _input): conv_layer = [] dconv_layer = [] with tf.device(device_name_or_function=self.device[0]): output = conv_bn_prelu(_input, output_channels=8, kernel_size=3, stride=1, is_training=self.is_training, name='conv_1') # 104x200x160 8 input_layer = output conv_layer.append(output) for block_num in range(1, self.cont_block_num + 1): with tf.variable_scope('contract_block_%d' % block_num): output = self._contracting_block(block_num, output, input_layer) conv_layer.append(output) self.class_layer.append(conv_layer[-1]) for block_num in range(4, self.expand_block_num + 4): with tf.variable_scope('expand_block_%d' % block_num): output = self._expanding_block(block_num, output, conv_layer[2 - block_num]) dconv_layer.append(output) with tf.device(device_name_or_function=self.device[1]): '''auxiliary prediction''' # forth level auxiliary3_prob_4x = _conv2d(inputs=dconv_layer[0], output_feature=self.output_classes, kernel_size=1, stride=1, use_bias=True, name='auxiliary3_prob_4x') auxiliary3_prob_2x = deconv2d(inputs=auxiliary3_prob_4x, output_channels=self.output_classes, name='auxiliary3_prob_2x') auxiliary3_prob_1x = deconv2d(inputs=auxiliary3_prob_2x, output_channels=self.output_classes, name='auxiliary3_prob_1x') # third level auxiliary2_prob_2x = _conv2d(inputs=dconv_layer[1], output_feature=self.output_classes, kernel_size=1, stride=1, use_bias=True, name='auxiliary2_prob_2x') auxiliary2_prob_1x = deconv2d(inputs=auxiliary2_prob_2x, output_channels=self.output_classes, name='auxiliary2_prob_2x') # second level auxiliary1_prob_1x = _conv2d(inputs=dconv_layer[2], output_feature=self.output_classes, kernel_size=1, stride=1, use_bias=True, name='auxiliary1_prob_1x') with tf.variable_scope('last_stage'): # out_feature = int(output.get_shape()[-1]) / 2 #print(dconv_layer[0],dconv_layer[1],dconv_layer[2]) output1 = _conv2d(dconv_layer[0], kernel_size=1, stride=1, output_feature=5, use_bias=True, name='block1_conv1x1') output1 = BilinearUpsample2d(output1, up_factor=2) output2 = _conv2d(dconv_layer[1], kernel_size=1, stride=1, output_feature=5, use_bias=True, name='block2_conv1x1') output2 = tf.add(output1, output2) output2 = BilinearUpsample2d(output2, up_factor=2) output3 = _conv2d(dconv_layer[2], kernel_size=1, stride=1, output_feature=5, use_bias=True, name='block3_conv1x1') output3 = tf.add(output2, output3) output = _conv2d(output3, kernel_size=1, stride=1, output_feature=self.output_classes, use_bias=True, name='fc_layer') with tf.device(device_name_or_function=self.device[2]): with tf.variable_scope('prediction'): softmax_prob = tf.nn.softmax(logits=output, name='softmax_prob') predicted_label = tf.argmax(input=softmax_prob, axis=-1, name='predicted_label') if cfg.joint_train: with tf.variable_scope('class_layer'): cls_input = output cls_outputs = _conv2d(cls_input, kernel_size=3, stride=1, output_feature=int( cls_input.get_shape()[-1]), name='cls_conv1') # average pooling last_pool_kernel = int(cls_outputs.get_shape()[-2]) k = last_pool_kernel cls_outputs = tf.nn.avg_pool(cls_outputs, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='VALID') # print(k,outputs) # FC features_total = int(cls_outputs.get_shape()[-1]) cls_outputs = tf.reshape(cls_outputs, [-1, features_total]) W = tf.get_variable( shape=[features_total, 2], initializer=tf.contrib.layers.xavier_initializer(), name='W') bias = tf.get_variable(initializer=tf.constant(0.0, shape=[2]), name='bias') cls_outputs = tf.matmul(cls_outputs, W) + bias with tf.variable_scope('class_prediction'): cls_softmax = tf.nn.softmax(logits=cls_outputs, name='softmax_prob') cls_predict = tf.argmax(input=cls_softmax, axis=-1, name='predicted_label') return output, predicted_label, auxiliary1_prob_1x, auxiliary2_prob_1x, auxiliary3_prob_1x, cls_outputs, cls_predict else: return output, predicted_label, auxiliary1_prob_1x, auxiliary2_prob_1x, auxiliary3_prob_1x
def inference_op(self, _input): conv_layer = [] dconv_layer = [] # padding output output = tf.pad(_input, np.array([[0, 0], [1, 0], [1, 1], [0, 0], [0, 0]]), name='pad_1') with tf.device(device_name_or_function=self.device[0]): output = conv_bn_prelu(output, output_channels=8, kernel_size=3, stride=1, is_training=self.is_training, name='conv_1') # 104x200x160 8 input_layer = output conv_layer.append(output) for block_num in range(1, self.cont_block_num + 1): with tf.variable_scope('contract_block_%d' % block_num): output = self._contracting_block(block_num, output, input_layer) conv_layer.append(output) for block_num in range(4, self.expand_block_num + 4): with tf.variable_scope('expand_block_%d' % block_num): output = self._expanding_block(block_num, output, conv_layer[2 - block_num]) dconv_layer.append(output) with tf.device(device_name_or_function=self.device[1]): '''auxiliary prediction''' # forth level auxiliary3_prob_4x = _conv3d(inputs=dconv_layer[0], output_feature=self.output_classes, kernel_size=1, stride=1, use_bias=True, name='auxiliary3_prob_4x') auxiliary3_prob_2x = deconv3d(inputs=auxiliary3_prob_4x, output_channels=self.output_classes, name='auxiliary3_prob_2x') auxiliary3_prob_1x = deconv3d(inputs=auxiliary3_prob_2x, output_channels=self.output_classes, name='auxiliary3_prob_1x') # third level auxiliary2_prob_2x = _conv3d(inputs=dconv_layer[1], output_feature=self.output_classes, kernel_size=1, stride=1, use_bias=True, name='auxiliary2_prob_2x') auxiliary2_prob_1x = deconv3d(inputs=auxiliary2_prob_2x, output_channels=self.output_classes, name='auxiliary2_prob_2x') # second level auxiliary1_prob_1x = _conv3d(inputs=dconv_layer[2], output_feature=self.output_classes, kernel_size=1, stride=1, use_bias=True, name='auxiliary1_prob_1x') # with tf.variable_scope('last_stage'): # # out_feature = int(output.get_shape()[-1]) / 2 # #print(dconv_layer[0],dconv_layer[1],dconv_layer[2]) # output1 = _conv3d(dconv_layer[0], kernel_size=1, stride=1, output_feature=5, use_bias=True, # name='block1_conv1x1') # # #output1 = deconv3d(output1, output_channels=int(output1.get_shape()[-1]), name='block1_deconv') # # output1 = BilinearUpsample3d(output1, up_factor=2) # # output2 = _conv3d(dconv_layer[1], kernel_size=1, stride=1, output_feature=5, use_bias=True, # name='block2_conv1x1') # # output2 = tf.add(output1, output2) # # #output2 = deconv3d(output2, output_channels=int(output2.get_shape()[-1]), name='block2_deconv') # output2 = BilinearUpsample3d(output2, up_factor=2) # # output3 = _conv3d(dconv_layer[2], kernel_size=1, stride=1, output_feature=5, use_bias=True, # name='block3_conv1x1') # # output3 = tf.add(output2, output3) # output = _conv3d(output, kernel_size=1, stride=1, output_feature=self.output_classes, use_bias=True, name='fc_layer') output = output[:, :103, :198, :] with tf.device(device_name_or_function=self.device[2]): with tf.variable_scope('prediction'): softmax_prob = tf.nn.softmax(logits=output, name='softmax_prob') predicted_label = tf.argmax(input=softmax_prob, axis=4, name='predicted_label') #predicted_label = tf.nn.sigmoid(output, name='predicted_label') return output, predicted_label, auxiliary1_prob_1x, auxiliary2_prob_1x, auxiliary3_prob_1x
def inference_op(self, _input): conv_layer = [] dconv_layer = [] # padding output output = tf.pad(_input, np.array([[0, 0], [1, 0], [1, 1], [0, 0], [0, 0]]), name='pad_1') output = conv_bn_prelu(output, output_channels=8, kernel_size=3, stride=1, is_training=self.is_training, name='conv_1') # 104x200x160 8 conv_layer.append(output) for block_num in range(1, self.cont_block_num + 1): with tf.variable_scope('contract_block_%d' % block_num): output = self._contracting_block(block_num,output) conv_layer.append(output) for block_num in range(4, self.expand_block_num + 4): with tf.variable_scope('expand_block_%d' % block_num): output = self._expanding_block(block_num, output, conv_layer[2 - block_num]) dconv_layer.append(output) '''auxiliary prediction''' # forth level auxiliary3_prob_4x = _conv3d(inputs=dconv_layer[0], output_feature=1, kernel_size=1, stride=1, use_bias=True, name='auxiliary3_prob_4x') auxiliary3_prob_2x = deconv3d(inputs=auxiliary3_prob_4x, output_channels=1, name='auxiliary3_prob_2x') auxiliary3_prob_1x = deconv3d(inputs=auxiliary3_prob_2x, output_channels=1, name='auxiliary3_prob_1x') # third level auxiliary2_prob_2x = _conv3d(inputs=dconv_layer[1], output_feature=1, kernel_size=1, stride=1, use_bias=True, name='auxiliary2_prob_2x') auxiliary2_prob_1x = deconv3d(inputs=auxiliary2_prob_2x, output_channels=1, name='auxiliary2_prob_2x') # second level auxiliary1_prob_1x = _conv3d(inputs=dconv_layer[2], output_feature=1, kernel_size=1, stride=1, use_bias=True, name='auxiliary1_prob_1x') with tf.variable_scope('last_stage'): # out_feature = int(output.get_shape()[-1]) / 2 #print(dconv_layer[0],dconv_layer[1],dconv_layer[2]) output1 = _conv3d(dconv_layer[0], kernel_size=1, stride=1, output_feature=5, use_bias=True, name='block1_conv1x1') #output1 = deconv3d(output1, output_channels=int(output1.get_shape()[-1]), name='block1_deconv') output1 = BilinearUpsample3d(output1,up_factor=2) #print('block1_deconv1', output1) # out_feature = int(output.get_shape()[-1]) / 2 output2 = _conv3d(dconv_layer[1], kernel_size=1, stride=1, output_feature=5, use_bias=True, name='block2_conv1x1') output2 = tf.add(output1, output2) #output2 = deconv3d(output2, output_channels=int(output2.get_shape()[-1]), name='block2_deconv') output2 = BilinearUpsample3d(output2, up_factor=2) output3 = _conv3d(dconv_layer[2], kernel_size=1, stride=1, output_feature=5, use_bias=True, name='block3_conv1x1') output3 = tf.add(output2, output3) output = _conv3d(output3, kernel_size=1, stride=1, output_feature=1, use_bias=True, name='fc_layer') return output, auxiliary1_prob_1x, auxiliary2_prob_1x, auxiliary3_prob_1x
def inference_op(self, _input): conv_layer = [] dconv_layer = [] # padding output output = tf.pad(_input, np.array([[0, 0], [1, 0], [1, 1], [0, 0], [0, 0]]), name='pad_1') #print(output) output = conv_bn_prelu(output, output_channels=8, kernel_size=3, stride=1, is_training=self.is_training, name='conv_1') conv_layer.append(output) for i in range(self.cont_block_num): with tf.variable_scope('contract_block_%d' % i): output = self._contracting_block(output) #print(output) conv_layer.append(output) for i in range(self.expand_block_num): with tf.variable_scope('expand_block_%d' % i): output = self._expanding_block(output, conv_layer[2 - i]) dconv_layer.append(output) '''auxiliary prediction''' # forth level auxiliary3_prob_4x = _conv3d(inputs=dconv_layer[0], output_feature=1, kernel_size=1, stride=1, use_bias=True, name='auxiliary3_prob_4x') auxiliary3_prob_2x = deconv3d(inputs=auxiliary3_prob_4x, output_channels=1, name='auxiliary3_prob_2x') auxiliary3_prob_1x = deconv3d(inputs=auxiliary3_prob_2x, output_channels=1, name='auxiliary3_prob_1x') # third level auxiliary2_prob_2x = _conv3d(inputs=dconv_layer[1], output_feature=1, kernel_size=1, stride=1, use_bias=True, name='auxiliary2_prob_2x') auxiliary2_prob_1x = deconv3d(inputs=auxiliary2_prob_2x, output_channels=1, name='auxiliary2_prob_2x') # second level auxiliary1_prob_1x = _conv3d(inputs=dconv_layer[2], output_feature=1, kernel_size=1, stride=1, use_bias=True, name='auxiliary1_prob_1x') #print(auxiliary3_prob_1x,'\n',auxiliary2_prob_1x,'\n',auxiliary1_prob_1x) with tf.variable_scope('last_stage'): # out_feature = int(output.get_shape()[-1]) / 2 _output = _conv3d(dconv_layer[0], kernel_size=1, stride=1, output_feature=32, use_bias=True, name='block1_conv1x1') _output = deconv3d(_output, output_channels=int(_output.get_shape()[-1]), name='block1_deconv') #print('block1_deconv1', _output) # out_feature = int(output.get_shape()[-1]) / 2 _output2 = _conv3d(dconv_layer[1], kernel_size=1, stride=1, output_feature=32, use_bias=True, name='block2_conv1x1') _output = tf.add(_output, _output2) #print('1', _output) _output = deconv3d(_output, output_channels=int(_output.get_shape()[-1]), name='block2_deconv') # out_feature = int(output.get_shape()[-1]) / 2 _output3 = _conv3d(dconv_layer[2], kernel_size=1, stride=1, output_feature=32, use_bias=True, name='block3_conv1x1') output = tf.add(_output, _output3) output = _conv3d(output, kernel_size=1, stride=1, output_feature=1, use_bias=True, name='fc_layer') #logits = tf.nn.sigmoid(output)''' return output, auxiliary1_prob_1x, auxiliary2_prob_1x, auxiliary3_prob_1x