def layer_op(self, input_tensor, is_training): """ :param input_tensor: tensor, input to the network :param is_training: boolean, True if network is in training mode :return: merged tensor after backend layers """ n_modality = input_tensor.shape.as_list()[-1] n_chns = input_tensor.shape.as_list()[-2] rank = input_tensor.shape.ndims perm = [i for i in range(rank)] perm[-2], perm[-1] = perm[-1], perm[-2] output_tensor = input_tensor for layer in range(self.n_layers): # modalities => feature channels output_tensor = tf.transpose(output_tensor, perm=perm) output_tensor = tf.unstack(output_tensor, axis=-1) for (idx, tensor) in enumerate(output_tensor): block_name = 'M_F_{}_{}'.format(layer, idx) highresblock_op = HighResBlock( n_output_chns=n_modality, kernels=(3, 1), with_res=True, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name=block_name) output_tensor[idx] = highresblock_op(tensor, is_training) print(highresblock_op) output_tensor = tf.stack(output_tensor, axis=-1) # feature channels => modalities output_tensor = tf.transpose(output_tensor, perm=perm) output_tensor = tf.unstack(output_tensor, axis=-1) for (idx, tensor) in enumerate(output_tensor): block_name = 'F_M_{}_{}'.format(layer, idx) highresblock_op = HighResBlock( n_output_chns=n_chns, kernels=(3, 1), with_res=True, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name=block_name) output_tensor[idx] = highresblock_op(tensor, is_training) print(highresblock_op) output_tensor = tf.stack(output_tensor, axis=-1) if self.func == 'MAX': output_tensor = tf.reduce_max(output_tensor, axis=-1) elif self.func == 'AVERAGE': output_tensor = tf.reduce_mean(output_tensor, axis=-1) return output_tensor
def test_3d_increase_shape(self): input_shape = (2, 16, 16, 16, 8) x = tf.ones(input_shape) highres_layer = HighResBlock(n_output_chns=16, kernels=(3, 3), with_res=True) out = highres_layer(x, is_training=True) print(highres_layer) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) out = sess.run(out) self.assertAllClose((2, 16, 16, 16, 16), out.shape)
def test_3d_reg_reduce_shape(self): input_shape = (2, 16, 16, 16, 8) x = tf.ones(input_shape) highres_layer = HighResBlock( n_output_chns=4, kernels=(3, 3), with_res=True, w_regularizer=regularizers.l2_regularizer(0.3)) out = highres_layer(x, is_training=True) print(highres_layer) with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) out = sess.run(out) self.assertAllClose((2, 16, 16, 16, 4), out.shape)
def layer_op(self, images, is_training, layer_id=-1): assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)) # go through self.layers, create an instance of each layer # and plugin data layer_instances = [] ### first convolution layer params = self.layers[0] first_conv_layer = ConvolutionalLayer( n_output_chns=params['n_features'], kernel_size=params['kernel_size'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = first_conv_layer(images, is_training) layer_instances.append((first_conv_layer, flow)) ### resblocks, all kernels dilated by 1 (normal convolution) params = self.layers[1] with DilatedTensor(flow, dilation_factor=1) as dilated: for j in range(params['repeat']): res_block = HighResBlock(params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### resblocks, all kernels dilated by 2 params = self.layers[2] with DilatedTensor(flow, dilation_factor=2) as dilated: for j in range(params['repeat']): res_block = HighResBlock(params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### resblocks, all kernels dilated by 4 params = self.layers[3] with DilatedTensor(flow, dilation_factor=4) as dilated: for j in range(params['repeat']): res_block = HighResBlock(params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### 3x3x3 convolution layer params = self.layers[4] fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'], kernel_size=params['kernel_size'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) ### 1x1x1 convolution layer params = self.layers[5] fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'], kernel_size=params['kernel_size'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) ### 1x1x1 convolution layer params = self.layers[6] fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'], kernel_size=params['kernel_size'], acti_func=None, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) # set training properties if is_training: self._print(layer_instances) return layer_instances[-1][1] return layer_instances[layer_id][1]
def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs): """ :param images: tensor to input to the network. Size has to be divisible by 8 :param is_training: boolean, True if network is in training mode :param layer_id: int, index of the layer to return as output :param unused_kwargs: :return: output of layer indicated by layer_id """ assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)) # go through self.layers, create an instance of each layer # and plugin data layer_instances = [] ### first convolution layer params = self.layers[0] first_conv_layer = ConvolutionalLayer( n_output_chns=params['n_features'], kernel_size=params['kernel_size'], stride=2, acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = first_conv_layer(images, is_training) layer_instances.append((first_conv_layer, flow)) ### resblocks, all kernels dilated by 1 (normal convolution) params = self.layers[1] with DilatedTensor(flow, dilation_factor=1) as dilated: for j in range(params['repeat']): res_block = HighResBlock(params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### resblocks, all kernels dilated by 2 params = self.layers[2] with DilatedTensor(flow, dilation_factor=2) as dilated: for j in range(params['repeat']): res_block = HighResBlock(params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### resblocks, all kernels dilated by 4 params = self.layers[3] with DilatedTensor(flow, dilation_factor=4) as dilated: for j in range(params['repeat']): res_block = HighResBlock(params['n_features'], params['kernels'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % (params['name'], j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor ### 1x1x1 convolution layer params = self.layers[4] fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'], kernel_size=params['kernel_size'], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) ### 3x3x3 deconvolution layer params = self.layers[4] fc_layer = DeconvolutionalLayer(n_output_chns=params['n_features'], kernel_size=3, stride=2, acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='deconv') flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) ### 1x1x1 convolution layer params = self.layers[5] fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'], kernel_size=params['kernel_size'], acti_func=None, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name=params['name']) flow = fc_layer(flow, is_training) layer_instances.append((fc_layer, flow)) # set training properties if is_training: self._print(layer_instances) return layer_instances[-1][1] return layer_instances[layer_id][1]
def layer_op(self, input_tensor, is_training, layer_id=-1): layer_instances = [] scores_instances = [] first_conv_layer = ConvolutionalLayer( n_output_chns=self.num_features[0], with_bn=True, kernel_size=3, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], acti_func=self.acti_func, name='conv_1_1') flow = first_conv_layer(input_tensor, is_training) layer_instances.append((first_conv_layer, flow)) # SCALE 1 with DilatedTensor(flow, dilation_factor=1) as dilated: for j in range(self.num_res_blocks[0]): res_block = HighResBlock(self.num_features[0], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % ('res_1', j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor score_layer_scale1 = ScoreLayer( num_features=self.num_fea_score_layers[0], num_classes=self.num_classes) score_1 = score_layer_scale1(flow, is_training) scores_instances.append(score_1) # if is_training: # loss_s1 = WGDL(score_1, labels) # tf.add_to_collection('multiscale_loss', loss_s1/num_scales) # # SCALE 2 with DilatedTensor(flow, dilation_factor=2) as dilated: for j in range(self.num_res_blocks[1]): res_block = HighResBlock(self.num_features[1], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % ('res_2', j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor score_layer_scale2 = ScoreLayer( num_features=self.num_fea_score_layers[1], num_classes=self.num_classes) score_2 = score_layer_scale2(flow, is_training) # score_2 = self.score_layer(flow, self.num_fea_score_layers[1]) up_score_2 = score_2 scores_instances.append(up_score_2) # if is_training: # loss_s2 = self.WGDL(score_2, labels) # # loss_s2 = self.new_dice_loss(score_2, labels) # tf.add_to_collection('multiscale_loss', loss_s2/num_scales) # SCALE 3 ## dowsampling factor = 2 downsample_scale3 = DownSampleLayer(func='AVG', kernel_size=2, stride=2) flow = downsample_scale3(flow) layer_instances.append((downsample_scale3, flow)) with DilatedTensor(flow, dilation_factor=1) as dilated: for j in range(self.num_res_blocks[2]): res_block = HighResBlock(self.num_features[2], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % ('res_3', j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor score_layer_scale3 = ScoreLayer( num_features=self.num_fea_score_layers[2], num_classes=self.num_classes) score_3 = score_layer_scale3(flow, is_training) upsample_indep_scale3 = UpSampleLayer( func='CHANNELWISE_DECONV', kernel_size=2, stride=2, w_initializer=tf.constant_initializer(1.0, dtype=tf.float32)) up_score_3 = upsample_indep_scale3(score_3) scores_instances.append(up_score_3) # up_score_3 = self.feature_indep_upsample_conv(score_3, factor=2) # if is_training: # loss_s3 = self.WGDL(up_score_3, labels) # # loss_s3 = self.new_dice_loss(up_score_3, labels) # tf.add_to_collection('multiscale_loss', loss_s3/num_scales) # SCALE 4 with DilatedTensor(flow, dilation_factor=2) as dilated: for j in range(self.num_res_blocks[3]): res_block = HighResBlock(self.num_features[3], acti_func=self.acti_func, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='%s_%d' % ('res_4', j)) dilated.tensor = res_block(dilated.tensor, is_training) layer_instances.append((res_block, dilated.tensor)) flow = dilated.tensor score_layer_scale4 = ScoreLayer( num_features=self.num_fea_score_layers[3], num_classes=self.num_classes) score_4 = score_layer_scale4(flow, self.num_fea_score_layers[3], is_training) upsample_indep_scale4 = UpSampleLayer( func='CHANNELWISE_DECONV', kernel_size=1, stride=2, w_initializer=tf.constant_initializer(1.0, dtype=tf.float32)) up_score_4 = upsample_indep_scale4(score_4) scores_instances.append(up_score_4) # if is_training: # loss_s4 = self.WGDL(up_score_4, labels) # # loss_s4 = self.new_dice_loss(up_score_4, labels) # tf.add_to_collection('multiscale_loss', loss_s4/num_scales) # FUSED SCALES merge_layer = MergeLayer('WEIGHTED_AVERAGE') soft_scores = [] for s in scores_instances: soft_scores.append(tf.nn.softmax(s)) fused_score = merge_layer(soft_scores) scores_instances.append(fused_score) if is_training: return scores_instances return fused_score