def layer_op(self, input_tensor, is_training): output_tensor = input_tensor for (kernel_size, n_features) in zip(self.kernels, self.n_chns): conv_op = ConvolutionalLayer(n_output_chns=n_features, kernel_size=kernel_size, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='{}'.format(n_features), padding='VALID', with_bn=False, with_bias=True) output_tensor = conv_op(output_tensor, is_training) if self.with_downsample_branch: branch_output = output_tensor else: branch_output = None if self.func == 'DOWNSAMPLE': downsample_op = DownSampleLayer('MAX', kernel_size=2, stride=2, name='down_2x_isotropic') output_tensor = downsample_op(output_tensor) if self.func == 'DOWNSAMPLE_ANISOTROPIC': downsample_op = DownSampleLayer('MAX', kernel_size=[2, 2, 1], stride=[2, 2, 1], name='down_2x2x1') output_tensor = downsample_op(output_tensor) elif self.func == 'UPSAMPLE': upsample_op = DeconvolutionalLayer(n_output_chns=self.n_chns[-1], kernel_size=2, stride=2, name='up_2x_isotropic', with_bn=False, with_bias=True) output_tensor = upsample_op(output_tensor, is_training) elif self.func == 'UPSAMPLE_ANISOTROPIC': upsample_op = DeconvolutionalLayer(n_output_chns=self.n_chns[-1], kernel_size=[2, 2, 1], stride=[2, 2, 1], name='up_2x2x1', with_bn=False, with_bias=True) output_tensor = upsample_op(output_tensor, is_training) elif self.func == 'NONE': pass # do nothing return output_tensor, branch_output
def up(ch, x): with tf.name_scope('up'): deconv_layer = DeconvolutionalLayer(ch, 3, stride=2, w_initializer=w_init) return tf.nn.relu(deconv_layer(x, is_training=is_training))
def up(ch, x): with tf.name_scope('up'): deconv_layer = DeconvolutionalLayer( n_output_chns=ch, kernel_size=3, stride=2, with_bn=True, with_bias=False, acti_func='relu', w_initializer=self.initializers['w']) return deconv_layer(x, is_training=is_training)
def up(ch, x, hack=False): with tf.name_scope('up'): deconv = DeconvolutionalLayer(ch, 3, with_bn=False, stride=2, w_initializer=w_init)( x, is_training=is_training) if hack: deconv = deconv[:, :, 1:, :] # hack to match Yipeng's image size return tf.nn.relu(tf.contrib.layers.batch_norm(deconv))
def up(ch, x): """ Performs deconvolution operation with kernel size 3, stride 2, batch norm, and relu :param ch: int, number of output channels for deconvolutional layer :param x: tensor, input to deconvolutional layer :return: tensor, output of deconvolutiona layer """ with tf.name_scope('up'): deconv_layer = DeconvolutionalLayer( n_output_chns=ch, kernel_size=3, stride=2, feature_normalization='batch', with_bias=False, acti_func='relu', w_initializer=self.initializers['w']) return deconv_layer(x, is_training=is_training)
def _test_deconv_layer_output_shape(self, rank, param_dict, output_shape, is_training=None, dropout_prob=None): if rank == 2: input_data = self.get_2d_input() elif rank == 3: input_data = self.get_3d_input() deconv_layer = DeconvolutionalLayer(**param_dict) output_data = deconv_layer(input_data, is_training=is_training, keep_prob=dropout_prob) print(deconv_layer) with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) output_value = sess.run(output_data) self.assertAllClose(output_shape, output_value.shape)
def layer_op(self, input_tensor, is_training): """ :param input_tensor: tensor, input to the UNet block :param is_training: boolean, True if network is in training mode :return: output tensor of the UNet block and branch before downsampling (if required) """ output_tensor = input_tensor for (kernel_size, n_features) in zip(self.kernels, self.n_chns): conv_op = ConvolutionalLayer(n_output_chns=n_features, kernel_size=kernel_size, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='{}'.format(n_features)) output_tensor = conv_op(output_tensor, is_training) if self.with_downsample_branch: branch_output = output_tensor else: branch_output = None if self.func == 'DOWNSAMPLE': downsample_op = DownSampleLayer('MAX', kernel_size=2, stride=2, name='down_2x2') output_tensor = downsample_op(output_tensor) elif self.func == 'UPSAMPLE': upsample_op = DeconvolutionalLayer(n_output_chns=self.n_chns[-1], kernel_size=2, stride=2, name='up_2x2') output_tensor = upsample_op(output_tensor, is_training) elif self.func == 'NONE': pass # do nothing return output_tensor, branch_output
def layer_op(self, images, is_training): block1_1 = ResBlock(self.base_chns[0], kernels=[[1, 3, 3], [1, 3, 3]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block1_1') block1_2 = ResBlock(self.base_chns[0], kernels=[[1, 3, 3], [1, 3, 3]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block1_2') block2_1 = ResBlock(self.base_chns[1], kernels=[[1, 3, 3], [1, 3, 3]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block2_1') block2_2 = ResBlock(self.base_chns[1], kernels=[[1, 3, 3], [1, 3, 3]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block2_2') block3_1 = ResBlock(self.base_chns[2], kernels=[[1, 3, 3], [1, 3, 3]], dilation_rates=[[1, 1, 1], [1, 1, 1]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block3_1') block3_2 = ResBlock(self.base_chns[2], kernels=[[1, 3, 3], [1, 3, 3]], dilation_rates=[[1, 2, 2], [1, 2, 2]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block3_2') block3_3 = ResBlock(self.base_chns[2], kernels=[[1, 3, 3], [1, 3, 3]], dilation_rates=[[1, 3, 3], [1, 3, 3]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block3_3') block4_1 = ResBlock(self.base_chns[3], kernels=[[1, 3, 3], [1, 3, 3]], dilation_rates=[[1, 3, 3], [1, 3, 3]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block4_1') block4_2 = ResBlock(self.base_chns[3], kernels=[[1, 3, 3], [1, 3, 3]], dilation_rates=[[1, 2, 2], [1, 2, 2]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block4_2') block4_3 = ResBlock(self.base_chns[3], kernels=[[1, 3, 3], [1, 3, 3]], dilation_rates=[[1, 1, 1], [1, 1, 1]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block4_3') fuse1 = ConvolutionalLayer(self.base_chns[0], kernel_size=[3, 1, 1], padding='VALID', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='fuse1') downsample1 = ConvolutionalLayer(self.base_chns[0], kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='downsample1') fuse2 = ConvolutionalLayer(self.base_chns[1], kernel_size=[3, 1, 1], padding='VALID', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='fuse2') downsample2 = ConvolutionalLayer(self.base_chns[1], kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='downsample2') fuse3 = ConvolutionalLayer(self.base_chns[2], kernel_size=[3, 1, 1], padding='VALID', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='fuse3') fuse4 = ConvolutionalLayer(self.base_chns[3], kernel_size=[3, 1, 1], padding='VALID', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='fuse4') feature_expand1 = ConvolutionalLayer( self.base_chns[1], kernel_size=[1, 1, 1], stride=[1, 1, 1], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='feature_expand1') feature_expand2 = ConvolutionalLayer( self.base_chns[2], kernel_size=[1, 1, 1], stride=[1, 1, 1], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='feature_expand2') feature_expand3 = ConvolutionalLayer( self.base_chns[3], kernel_size=[1, 1, 1], stride=[1, 1, 1], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='feature_expand3') centra_slice1 = TensorSliceLayer(margin=2) centra_slice2 = TensorSliceLayer(margin=1) pred_up1 = DeconvolutionalLayer(self.num_classes, kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='pred_up1') pred_up2_1 = DeconvolutionalLayer(self.num_classes * 2, kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='pred_up2_1') pred_up2_2 = DeconvolutionalLayer(self.num_classes * 2, kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='pred_up2_2') pred_up3_1 = DeconvolutionalLayer(self.num_classes * 4, kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='pred_up3_1') pred_up3_2 = DeconvolutionalLayer(self.num_classes * 4, kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, with_bn=self.acti_func != 'selu', name='pred_up3_2') final_pred = ConvLayer(self.num_classes, kernel_size=[1, 3, 3], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], name='final_pred') f1 = images f1 = block1_1(f1, is_training=is_training) f1 = block1_2(f1, is_training=is_training) f1 = fuse1(f1, is_training=is_training) f1 = downsample1(f1, is_training=is_training) if self.base_chns[0] != self.base_chns[1]: f1 = feature_expand1(f1, is_training=is_training) f1 = block2_1(f1, is_training=is_training) f1 = block2_2(f1, is_training=is_training) f1 = fuse2(f1, is_training=is_training) f2 = downsample2(f1, is_training=is_training) if self.base_chns[1] != self.base_chns[2]: f2 = feature_expand2(f2, is_training=is_training) f2 = block3_1(f2, is_training=is_training) f2 = block3_2(f2, is_training=is_training) f2 = block3_3(f2, is_training=is_training) f2 = fuse3(f2, is_training=is_training) f3 = f2 if self.base_chns[2] != self.base_chns[3]: f3 = feature_expand3(f3, is_training) f3 = block4_1(f3, is_training=is_training) f3 = block4_2(f3, is_training=is_training) f3 = block4_3(f3, is_training=is_training) f3 = fuse4(f3, is_training=is_training) p1 = centra_slice1(f1) p1 = pred_up1(p1, is_training=is_training) p2 = centra_slice2(f2) p2 = pred_up2_1(p2, is_training=is_training) p2 = pred_up2_2(p2, is_training=is_training) p3 = pred_up3_1(f3, is_training=is_training) p3 = pred_up3_2(p3, is_training=is_training) cat = tf.concat([p1, p2, p3], axis=4, name='concate') pred = final_pred(cat) return pred
def layer_op(self, codes, is_training): # Define the decoding fully-connected layers decoders_fc = [] for i in range(0, len(self.layer_sizes_decoder)): decoders_fc.append(FullyConnectedLayer( n_output_chns=self.layer_sizes_decoder[i], with_bias=True, with_bn=True, acti_func=self.acti_func_decoder[i], w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='decoder_fc_{}'.format(self.layer_sizes_decoder[i]))) print(decoders_fc[-1]) # Define the decoding convolutional layers decoders_cnn = [] decoders_upsamplers = [] for i in range(0, len(self.trans_conv_output_channels)): if self.upsampling_mode == 'DECONV': decoders_upsamplers.append(DeconvolutionalLayer( n_output_chns=self.trans_conv_output_channels[i], kernel_size=self.trans_conv_unpooling_factors[i], stride=self.trans_conv_unpooling_factors[i], padding='SAME', with_bias=True, with_bn=True, w_initializer=self.initializers['w'], w_regularizer=None, acti_func=None, name='decoder_upsampler_{}_{}'.format( self.trans_conv_unpooling_factors[i], self.trans_conv_unpooling_factors[i]))) print(decoders_upsamplers[-1]) decoders_cnn.append(DeconvolutionalLayer( n_output_chns=self.trans_conv_output_channels[i], kernel_size=self.trans_conv_kernel_sizes[i], stride=1, padding='SAME', with_bias=True, with_bn=True, #with_bn=not (i == len(self.trans_conv_output_channels) - 1), # No BN on output w_initializer=self.initializers['w'], w_regularizer=None, acti_func=self.acti_func_trans_conv[i], name='decoder_trans_conv_{}_{}'.format( self.trans_conv_kernel_sizes[i], self.trans_conv_output_channels[i]))) print(decoders_cnn[-1]) # Fully-connected decoder layers flow = codes for i in range(0, len(self.layer_sizes_decoder)): flow = decoders_fc[i](flow, is_training) # Reconstitute the feature maps flow = tf.reshape(flow, [-1] + self.downsampled_shape) # Convolutional decoder layers for i in range(0, len(self.trans_conv_output_channels)): if self.upsampling_mode == 'DECONV': flow = decoders_upsamplers[i](flow, is_training) elif self.upsampling_mode == 'CHANNELWISE_DECONV': flow = UpSampleLayer( 'CHANNELWISE_DECONV', kernel_size=self.trans_conv_unpooling_factors[i], stride=self.trans_conv_unpooling_factors[i])(flow) elif self.upsampling_mode == 'REPLICATE': flow = UpSampleLayer( 'REPLICATE', kernel_size=self.trans_conv_unpooling_factors[i], stride=self.trans_conv_unpooling_factors[i])(flow) flow = decoders_cnn[i](flow, is_training) return flow
def layer_op(self, images, is_training, layer_id=-1): assert (layer_util.check_spatial_dims( images, lambda x: x % 8 == 0)) # go through self.layers, create an instance of each layer # and plugin data layer_instances = [] ### first convolution layer params = self.layers[0] first_conv_layer = ConvolutionalLayer( n_output_chns=params['n_features'], kernel_size=params['kernel_size'], stride=2, acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = first_conv_layer(images, is_training) layer_instances.append((first_conv_layer, flow)) ### resblocks, all kernels dilated by 1 (normal convolution) params = self.layers[1] with DilatedTensor(flow, dilation_factor=1) as dilated: for j in range(params['repeat']): res_block = HighResBlock( params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### resblocks, all kernels dilated by 2 params = self.layers[2] with DilatedTensor(flow, dilation_factor=2) as dilated: for j in range(params['repeat']): res_block = HighResBlock( params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### resblocks, all kernels dilated by 4 params = self.layers[3] with DilatedTensor(flow, dilation_factor=4) as dilated: for j in range(params['repeat']): res_block = HighResBlock( params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### 1x1x1 convolution layer params = self.layers[4] fc_layer = ConvolutionalLayer( n_output_chns=params['n_features'], kernel_size=params['kernel_size'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) ### 3x3x3 deconvolution layer params = self.layers[4] fc_layer = DeconvolutionalLayer( n_output_chns=params['n_features'], kernel_size=3, stride=2, acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='deconv') flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) ### 1x1x1 convolution layer params = self.layers[5] fc_layer = ConvolutionalLayer( n_output_chns=params['n_features'], kernel_size=params['kernel_size'], acti_func=None, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) # set training properties if is_training: self._print(layer_instances) return layer_instances[-1][1] return layer_instances[layer_id][1]
def layer_op(self, images, is_training, bn_momentum=0.9, layer_id=-1): # image_size should be divisible by 8 # spatial_dims = images.get_shape()[1:-1].as_list() # assert (spatial_dims[-2] % 16 == 0 ) # assert (spatial_dims[-1] % 16 == 0 ) block1 = UNetBlock((self.n_features[0], self.n_features[0]), ((1, 3, 3), (1, 3, 3)), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B1') block2 = UNetBlock((self.n_features[1], self.n_features[1]), ((1, 3, 3), (1, 3, 3)), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B2') block3 = UNetBlock((self.n_features[2], self.n_features[2]), ((1, 3, 3), (1, 3, 3)), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B3') block4 = UNetBlock((self.n_features[3], self.n_features[3]), ((1, 3, 3), (1, 3, 3)), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B4') block5 = UNetBlock((self.n_features[4], self.n_features[4]), ((1, 3, 3), (1, 3, 3)), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B5') block6 = UNetBlock((self.n_features[3], self.n_features[3]), ((1, 3, 3), (1, 3, 3)), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B6') block7 = UNetBlock((self.n_features[2], self.n_features[2]), ((1, 3, 3), (1, 3, 3)), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B7') block8 = UNetBlock((self.n_features[1], self.n_features[1]), ((1, 3, 3), (1, 3, 3)), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B8') block9 = UNetBlock((self.n_features[0], self.n_features[0]), ((1, 3, 3), (1, 3, 3)), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B9') conv = ConvLayer(n_output_chns=self.num_classes, kernel_size=(1, 1, 1), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], with_bias=True, name='conv') down1 = DownSampleLayer('MAX', kernel_size=(1, 2, 2), stride=(1, 2, 2), name='down1') down2 = DownSampleLayer('MAX', kernel_size=(1, 2, 2), stride=(1, 2, 2), name='down2') down3 = DownSampleLayer('MAX', kernel_size=(1, 2, 2), stride=(1, 2, 2), name='down3') down4 = DownSampleLayer('MAX', kernel_size=(1, 2, 2), stride=(1, 2, 2), name='down4') up1 = DeconvolutionalLayer(n_output_chns=self.n_features[3], kernel_size=(1, 2, 2), stride=(1, 2, 2), name='up1') up2 = DeconvolutionalLayer(n_output_chns=self.n_features[2], kernel_size=(1, 2, 2), stride=(1, 2, 2), name='up2') up3 = DeconvolutionalLayer(n_output_chns=self.n_features[1], kernel_size=(1, 2, 2), stride=(1, 2, 2), name='up3') up4 = DeconvolutionalLayer(n_output_chns=self.n_features[0], kernel_size=(1, 2, 2), stride=(1, 2, 2), name='up4') f1 = block1(images, is_training, bn_momentum) d1 = down1(f1) f2 = block2(d1, is_training, bn_momentum) d2 = down2(f2) f3 = block3(d2, is_training, bn_momentum) d3 = down3(f3) f4 = block4(d3, is_training, bn_momentum) d4 = down4(f4) f5 = block5(d4, is_training, bn_momentum) # add dropout to the original version f5 = tf.nn.dropout(f5, self.dropout) f5up = up1(f5, is_training, bn_momentum) f4cat = tf.concat((f4, f5up), axis=-1) f6 = block6(f4cat, is_training, bn_momentum) # add dropout to the original version f6 = tf.nn.dropout(f6, self.dropout) f6up = up2(f6, is_training, bn_momentum) f3cat = tf.concat((f3, f6up), axis=-1) f7 = block7(f3cat, is_training, bn_momentum) # add dropout to the original version f7 = tf.nn.dropout(f7, self.dropout) f7up = up3(f7, is_training, bn_momentum) f2cat = tf.concat((f2, f7up), axis=-1) f8 = block8(f2cat, is_training, bn_momentum) # add dropout to the original version f8 = tf.nn.dropout(f8, self.dropout) f8up = up4(f8, is_training, bn_momentum) f1cat = tf.concat((f1, f8up), axis=-1) f9 = block9(f1cat, is_training, bn_momentum) # add dropout to the original version f9 = tf.nn.dropout(f9, self.dropout) output = conv(f9) return output
def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs): """ :param images: tensor to input to the network. Size has to be divisible by 8 :param is_training: boolean, True if network is in training mode :param layer_id: int, index of the layer to return as output :param unused_kwargs: :return: output of layer indicated by layer_id """ assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)) # go through self.layers, create an instance of each layer # and plugin data layer_instances = [] ### first convolution layer params = self.layers[0] first_conv_layer = ConvolutionalLayer( n_output_chns=params['n_features'], kernel_size=params['kernel_size'], stride=2, acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = first_conv_layer(images, is_training) layer_instances.append((first_conv_layer, flow)) ### resblocks, all kernels dilated by 1 (normal convolution) params = self.layers[1] with DilatedTensor(flow, dilation_factor=1) as dilated: for j in range(params['repeat']): res_block = HighResBlock(params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### resblocks, all kernels dilated by 2 params = self.layers[2] with DilatedTensor(flow, dilation_factor=2) as dilated: for j in range(params['repeat']): res_block = HighResBlock(params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### resblocks, all kernels dilated by 4 params = self.layers[3] with DilatedTensor(flow, dilation_factor=4) as dilated: for j in range(params['repeat']): res_block = HighResBlock(params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### 1x1x1 convolution layer params = self.layers[4] fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'], kernel_size=params['kernel_size'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) ### 3x3x3 deconvolution layer params = self.layers[4] fc_layer = DeconvolutionalLayer(n_output_chns=params['n_features'], kernel_size=3, stride=2, acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='deconv') flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) ### 1x1x1 convolution layer params = self.layers[5] fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'], kernel_size=params['kernel_size'], acti_func=None, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) # set training properties if is_training: self._print(layer_instances) return layer_instances[-1][1] return layer_instances[layer_id][1]
def layer_op(self, images, is_training, bn_momentum=0.9, layer_id=-1): block1 = FCNBlock((self.n_features[0], self.n_features[0]), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B1') block2 = FCNBlock((self.n_features[1], self.n_features[1]), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B2') block3 = FCNBlock( (self.n_features[2], self.n_features[2], self.n_features[2]), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B3') block4 = FCNBlock( (self.n_features[3], self.n_features[3], self.n_features[3]), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B4') block5 = FCNBlock( (self.n_features[4], self.n_features[4], self.n_features[4]), w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='B5') conv6 = ConvolutionalLayer(n_output_chns=self.n_features[-1] * 2, kernel_size=[1, 3, 3], with_bias=True, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='conv6') conv7 = ConvolutionalLayer(n_output_chns=self.n_features[-1] * 2, kernel_size=[1, 1, 1], with_bias=True, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='conv7') conv_score3 = ConvolutionalLayer(n_output_chns=self.num_classes, kernel_size=[1, 3, 3], with_bias=True, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=None, name='score3') conv_score4 = ConvolutionalLayer(n_output_chns=self.num_classes, kernel_size=[1, 3, 3], with_bias=True, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=None, name='score4') conv_score5 = ConvolutionalLayer(n_output_chns=self.num_classes, kernel_size=[1, 3, 3], with_bias=True, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=None, name='score5') up1 = DeconvolutionalLayer(n_output_chns=self.num_classes, kernel_size=(1, 4, 4), stride=(1, 2, 2), with_bias=True, acti_func=None, name='up1') up2 = DeconvolutionalLayer(n_output_chns=self.num_classes, kernel_size=(1, 4, 4), stride=(1, 2, 2), with_bias=True, acti_func=None, name='up2') up3 = DeconvolutionalLayer(n_output_chns=self.num_classes, kernel_size=(1, 16, 16), stride=(1, 8, 8), with_bias=True, acti_func=None, name='up3') f1 = block1(images, is_training, bn_momentum) f2 = block2(f1, is_training, bn_momentum) f3 = block3(f2, is_training, bn_momentum) f3 = tf.nn.dropout(f3, self.dropout) f4 = block4(f3, is_training, bn_momentum) f4 = tf.nn.dropout(f4, self.dropout) f5 = block5(f4, is_training, bn_momentum) f5 = tf.nn.dropout(f5, self.dropout) f6 = conv6(f5, is_training, bn_momentum) f6 = tf.nn.dropout(f6, self.dropout) f7 = conv7(f6, is_training, bn_momentum) f7 = tf.nn.dropout(f7, self.dropout) score3 = conv_score3(f3, is_training, bn_momentum) score4 = conv_score4(f4, is_training, bn_momentum) score5 = conv_score5(f7, is_training, bn_momentum) pred = up1(score5, is_training, bn_momentum) pred = pred + score4 pred = up2(pred, is_training, bn_momentum) pred = pred + score3 pred = up3(pred, is_training, bn_momentum) return pred
def layer_op(self, images, is_training): block1_1 = ResBlock(self.base_chns[0], kernels=[[1, 3, 3], [1, 3, 3]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block1_1') block1_2 = ResBlock(self.base_chns[0], kernels=[[1, 3, 3], [1, 3, 3]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block1_2') block2_1 = ResBlock(self.base_chns[1], kernels=[[1, 3, 3], [1, 3, 3]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block2_1') block2_2 = ResBlock(self.base_chns[1], kernels=[[1, 3, 3], [1, 3, 3]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block2_2') block3_1 = ResBlock(self.base_chns[2], kernels=[[1, 3, 3], [1, 3, 3]], dilation_rates=[[1, 1, 1], [1, 1, 1]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block3_1') block3_2 = ResBlock(self.base_chns[2], kernels=[[1, 3, 3], [1, 3, 3]], dilation_rates=[[1, 2, 2], [1, 2, 2]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block3_2') block4_1 = ResBlock(self.base_chns[3], kernels=[[1, 3, 3], [1, 3, 3]], dilation_rates=[[1, 3, 3], [1, 3, 3]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block4_1') block4_2 = ResBlock(self.base_chns[3], kernels=[[1, 3, 3], [1, 3, 3]], dilation_rates=[[1, 2, 2], [1, 2, 2]], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='block4_2') fuse1 = ConvolutionalLayer( self.base_chns[0], kernel_size=[3, 1, 1], # Convolution on intra layers padding='VALID', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='fuse1') downsample1 = ConvolutionalLayer(self.base_chns[0], kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='downsample1') fuse2 = ConvolutionalLayer(self.base_chns[1], kernel_size=[3, 1, 1], padding='VALID', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='fuse2') downsample2 = ConvolutionalLayer(self.base_chns[1], kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='downsample2') fuse3 = ConvolutionalLayer(self.base_chns[2], kernel_size=[3, 1, 1], padding='VALID', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='fuse3') fuse4 = ConvolutionalLayer(self.base_chns[3], kernel_size=[3, 1, 1], padding='VALID', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='fuse4') feature_expand1 = ConvolutionalLayer( self.base_chns[1], # Output channels kernel_size=[1, 1, 1], stride=[1, 1, 1], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='feature_expand1') feature_expand2 = ConvolutionalLayer( self.base_chns[2], kernel_size=[1, 1, 1], stride=[1, 1, 1], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='feature_expand2') feature_expand3 = ConvolutionalLayer( self.base_chns[3], kernel_size=[1, 1, 1], stride=[1, 1, 1], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='feature_expand3') centra_slice1 = TensorSliceLayer(margin=2) centra_slice2 = TensorSliceLayer(margin=1) image_resize1 = ImageResize() image_resize2 = ImageResize() pred_up1 = DeconvolutionalLayer(self.num_classes, kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='pred_up1') pred_up2_1 = DeconvolutionalLayer(self.num_classes * 2, kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='pred_up2_1') pred_up2_2 = DeconvolutionalLayer(self.num_classes * 2, kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='pred_up2_2') pred_up3_1 = DeconvolutionalLayer(self.num_classes * 4, kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='pred_up3_1') pred_up3_2 = DeconvolutionalLayer(self.num_classes * 4, kernel_size=[1, 3, 3], stride=[1, 2, 2], padding='SAME', w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], acti_func=self.acti_func, name='pred_up3_2') final_pred = ConvLayer( self.num_classes, # Output two class: target and background kernel_size=[1, 3, 3], padding= 'SAME', # Same: keep shape; Valid: only get pixels with valid calculation. w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], name='final_pred') f1 = images f1 = block1_1(f1, is_training) f1 = block1_2(f1, is_training) f1 = fuse1(f1, is_training) f1 = downsample1(f1, is_training) img_resize1 = image_resize1(images, f1) img_resize1 = centra_slice2(img_resize1) f1 = tf.concat([img_resize1, f1], axis=4, name='concate') if (self.base_chns[0] != self.base_chns[1]): f1 = feature_expand1( f1, is_training ) # To keep same channel number in cascaded netblocks f1 = block2_1(f1, is_training) f1 = block2_2(f1, is_training) f1 = fuse2(f1, is_training) f2 = downsample2(f1, is_training) img_resize2 = image_resize2(images, f2) img_resize2 = centra_slice1(img_resize2) f2 = tf.concat([img_resize2, f2], axis=4, name='concate') if (self.base_chns[1] != self.base_chns[2]): f2 = feature_expand2(f2, is_training) f2 = block3_1(f2, is_training) f2 = block3_2(f2, is_training) f2 = fuse3(f2, is_training) f3 = f2 if (self.base_chns[2] != self.base_chns[3]): f3 = feature_expand3(f3, is_training) f3 = block4_1(f3, is_training) f3 = block4_2(f3, is_training) f3 = fuse4(f3, is_training) p1 = centra_slice1(f1) p1 = pred_up1(p1, is_training) p2 = centra_slice2(f2) p2 = pred_up2_1(p2, is_training) p2 = pred_up2_2(p2, is_training) p3 = pred_up3_1(f3, is_training) p3 = pred_up3_2(p3, is_training) cat = tf.concat([p1, p2, p3], axis=4, name='concate') pred = final_pred(cat) return pred
def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs): assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)) layer_instances = [] images2 = CubicResizeLayer((16, 16, 16))(images) params = self.layers[0] first_conv_layer = ConvolutionalLayer( n_output_chns=params['n_features'], kernel_size=params['kernel_size'], with_bias=True, with_bn=False, acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = first_conv_layer(images2, is_training) layer_instances.append((first_conv_layer, flow)) params = self.layers[1] conv_layer = ConvolutionalLayer(n_output_chns=params['n_features'], kernel_size=params['kernel_size'], with_bias=True, with_bn=False, acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = conv_layer(flow, is_training) layer_instances.append((conv_layer, flow)) params = self.layers[2] for j in range(params['repeat']): conv_layer = ConvolutionalLayer( n_output_chns=params['n_features'], kernel_size=params['kernel_size'], with_bias=True, with_bn=False, acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) flow = conv_layer(flow, is_training) layer_instances.append((conv_layer, flow)) params = self.layers[3] conv_layer = ConvolutionalLayer(n_output_chns=params['n_features'], kernel_size=params['kernel_size'], with_bias=True, with_bn=False, acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = conv_layer(flow, is_training) layer_instances.append((conv_layer, flow)) params = self.layers[4] deconv_layer = DeconvolutionalLayer( n_output_chns=params['n_features'], kernel_size=params['kernel_size'], stride=2, padding='SAME', with_bias=True, with_bn=False, acti_func=None, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = deconv_layer(flow, is_training) layer_instances.append((deconv_layer, flow)) if is_training: self._print(layer_instances) return layer_instances[-1][1] return layer_instances[layer_id][1]