Пример #1
0
    def test_3d_dilating_shape(self):
        x = self.get_3d_input()
        with DilatedTensor(x, 4) as dilated:
            intermediate = dilated.tensor
        x = dilated.tensor

        with self.cached_session() as sess:
            out = sess.run(x)
            out_dilated = sess.run(intermediate)
            self.assertAllClose((2, 16, 16, 16, 8), out.shape)
            self.assertAllClose((128, 4, 4, 4, 8), out_dilated.shape)
Пример #2
0
    def layer_op(self, images, is_training, layer_id=-1):
        assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0))
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []

        ### first convolution layer
        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images, is_training)
        layer_instances.append((first_conv_layer, flow))

        ### resblocks, all kernels dilated by 1 (normal convolution)
        params = self.layers[1]
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 2
        params = self.layers[2]
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 4
        params = self.layers[3]
        with DilatedTensor(flow, dilation_factor=4) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### 3x3x3 convolution layer
        params = self.layers[4]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=self.acti_func,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[5]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=self.acti_func,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[6]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=None,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        # set training properties
        if is_training:
            self._print(layer_instances)
            return layer_instances[-1][1]
        return layer_instances[layer_id][1]
Пример #3
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        """

        :param images: tensor to input to the network. Size has to be divisible by 8
        :param is_training: boolean, True if network is in training mode
        :param layer_id: int, index of the layer to return as output
        :param unused_kwargs:
        :return: output of layer indicated by layer_id
        """
        assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0))
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []

        ### first convolution layer
        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            stride=2,
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images, is_training)
        layer_instances.append((first_conv_layer, flow))

        ### resblocks, all kernels dilated by 1 (normal convolution)
        params = self.layers[1]
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 2
        params = self.layers[2]
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 4
        params = self.layers[3]
        with DilatedTensor(flow, dilation_factor=4) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### 1x1x1 convolution layer
        params = self.layers[4]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=self.acti_func,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 3x3x3 deconvolution layer
        params = self.layers[4]
        fc_layer = DeconvolutionalLayer(n_output_chns=params['n_features'],
                                        kernel_size=3,
                                        stride=2,
                                        acti_func=self.acti_func,
                                        w_initializer=self.initializers['w'],
                                        w_regularizer=self.regularizers['w'],
                                        name='deconv')
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[5]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=None,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        # set training properties
        if is_training:
            self._print(layer_instances)
            return layer_instances[-1][1]
        return layer_instances[layer_id][1]
Пример #4
0
    def layer_op(self, input_tensor, is_training):
        """
        :param input_tensor: tensor, input to the network
        :param is_training: boolean, True if network is in training mode
        :return: tensor, output of the autofocus block
        """
        output_tensor = input_tensor

        ########################################################################
        # 1: Create first of two autofocus layer of autofocus block.
        ########################################################################
        # A convolution without feature norm and activation.
        conv_1 = ConvLayer(n_output_chns = self.n_output_chns[0],
                           kernel_size = self.kernel_size[0],
                           padding='SAME',
                           dilation = 1,
                           w_initializer = self.initializers['w'],
                           w_regularizer = self.regularizers['w'],
                           name = 'conv_1')

        # Create two conv layers for the attention model. The output of the
        # attention model will be needed for the K parallel conv layers.

        # First convolutional layer of the attention model (conv l,1).
        conv_att_11 = ConvLayer(n_output_chns = int(self.n_input_chns[0]/2),
                                kernel_size = self.kernel_size[0],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_11')

        # Second convolutional layer of the attention model (conv l,2).
        conv_att_12 = ConvLayer(n_output_chns = self.num_branches,
                                kernel_size = [1, 1, 1],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_12')

        # Batch norm (BN) layer for each of the K parallel convolutions
        bn_layer_1 = []
        for i in range(self.num_branches):
            bn_layer_1.append(BNLayer(regularizer = self.regularizers['w'],
                                      name = 'bn_layer_1_{}'.format(i)))

        # Activation function used in the first attention model
        acti_op_1 = ActiLayer(func = self.acti_func,
                              regularizer = self.regularizers['w'],
                              name = 'acti_op_1')

        ########################################################################
        # 2: Create second of two autofocus layer of autofocus block.
        ########################################################################
        # A convolution without feature norm and activation.
        conv_2 = ConvLayer(n_output_chns = self.n_output_chns[1],
                           kernel_size = self.kernel_size[1],
                           padding='SAME',
                           dilation = 1,
                           w_initializer = self.initializers['w'],
                           w_regularizer = self.regularizers['w'],
                           name = 'conv_2')

        # Create two conv layers for the attention model. The output of the
        # attention model will be needed for the K parallel conv layers.
        # First convolutional layer of the attention model (conv l,1).
        conv_att_21 = ConvLayer(n_output_chns = int(self.n_input_chns[1]/2),
                                kernel_size = self.kernel_size[1],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_21')

        # Second convolutional layer of the attention model (conv l,2).
        conv_att_22 = ConvLayer(n_output_chns = self.num_branches,
                                kernel_size = [1, 1, 1],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_22')

        # Batch norm (BN) layer for each of the K parallel convolutions
        bn_layer_2 = []
        for i in range(self.num_branches):
            bn_layer_2.append(BNLayer(regularizer = self.regularizers['w'],
                                      name = 'bn_layer_2_{}'.format(i)))

        # Activation function used in the second attention model
        acti_op_2 = ActiLayer(func = self.acti_func,
                              regularizer = self.regularizers['w'],
                              name = 'acti_op_2')

        ########################################################################
        # 3: Create other parameterised layers
        ########################################################################
        acti_op = ActiLayer(func = self.acti_func,
                            regularizer = self.regularizers['w'],
                            name = 'acti_op')

        ########################################################################
        # 4: Connect layers
        ########################################################################
        # compute attention weights for the K parallel conv layers in the first
        # autofocus convolutional layer
        feature_1 = output_tensor
        att_1 = acti_op_1(conv_att_11(feature_1))
        att_1 = conv_att_12(att_1)
        att_1 = tf.nn.softmax(att_1, axis=1)

        # Create K dilated tensors as input to the autofocus layer. This
        # simulates the K parallel convolutions with different dilation
        # rates. Doing it this way ensures the required weight sharing.
        dilated_tensor_1 = []
        for i in range(self.num_branches):
            dilated_1 = output_tensor
            with DilatedTensor(dilated_1, dilation_factor = self.dilation_list[i]) as dilated:
                dilated.tensor = conv_1(dilated.tensor)
                dilated.tensor = bn_layer_1[i](dilated.tensor, is_training)
            dilated.tensor = dilated.tensor * att_1[:,:,:,:,i:(i+1)]
            dilated_tensor_1.append(dilated.tensor)
        output_tensor = tf.add_n(dilated_tensor_1)
        output_tensor = acti_op(output_tensor)

        # compute attention weights for the K parallel conv layers in the second
        # autofocus convolutional layer
        feature_2 = output_tensor
        att_2 = acti_op_2(conv_att_21(feature_2))
        att_2 = conv_att_22(att_2)
        att_2 = tf.nn.softmax(att_2, axis=1)

        # Create K dilated tensors as input to the autofocus layer. This
        # simulates the K parallel convolutions with different dilation
        # rates. Doing it this way ensures the required weight sharing.
        dilated_tensor_2 = []
        for i in range(self.num_branches):
            dilated_2 = output_tensor
            with DilatedTensor(dilated_2, dilation_factor = self.dilation_list[i]) as dilated:
                dilated.tensor = conv_2(dilated.tensor)
                dilated.tensor = bn_layer_2[i](dilated.tensor, is_training)
            dilated.tensor = dilated.tensor * att_2[:,:,:,:,i:(i+1)]
            dilated_tensor_2.append(dilated.tensor)
        output_tensor = tf.add_n(dilated_tensor_2)

        # make residual connection using ElementwiseLayer with SUM
        if self.with_res:
            output_tensor = ElementwiseLayer('SUM')(output_tensor, input_tensor)

        # apply the last ReLU activation
        output_tensor = acti_op(output_tensor)
        print("output_tensor:", output_tensor)

        return output_tensor
Пример #5
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        layer_instances = []
        scores_instances = []
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=self.num_features[0],
            with_bn=True,
            kernel_size=3,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='conv_1_1')
        flow = first_conv_layer(input_tensor, is_training)
        layer_instances.append((first_conv_layer, flow))

        # SCALE 1
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(self.num_res_blocks[0]):
                res_block = HighResBlock(self.num_features[0],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_1', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale1 = ScoreLayer(
            num_features=self.num_fea_score_layers[0],
            num_classes=self.num_classes)
        score_1 = score_layer_scale1(flow, is_training)
        scores_instances.append(score_1)
        # if is_training:
        #     loss_s1 = WGDL(score_1, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s1/num_scales)

        # # SCALE 2
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(self.num_res_blocks[1]):
                res_block = HighResBlock(self.num_features[1],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_2', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale2 = ScoreLayer(
            num_features=self.num_fea_score_layers[1],
            num_classes=self.num_classes)
        score_2 = score_layer_scale2(flow, is_training)

        # score_2 = self.score_layer(flow, self.num_fea_score_layers[1])
        up_score_2 = score_2
        scores_instances.append(up_score_2)
        # if is_training:
        #     loss_s2 =  self.WGDL(score_2, labels)
        #     # loss_s2 = self.new_dice_loss(score_2, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s2/num_scales)

        # SCALE 3
        ## dowsampling factor = 2
        downsample_scale3 = DownSampleLayer(func='AVG',
                                            kernel_size=2,
                                            stride=2)
        flow = downsample_scale3(flow)
        layer_instances.append((downsample_scale3, flow))
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(self.num_res_blocks[2]):
                res_block = HighResBlock(self.num_features[2],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_3', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale3 = ScoreLayer(
            num_features=self.num_fea_score_layers[2],
            num_classes=self.num_classes)
        score_3 = score_layer_scale3(flow, is_training)

        upsample_indep_scale3 = UpSampleLayer(
            func='CHANNELWISE_DECONV',
            kernel_size=2,
            stride=2,
            w_initializer=tf.constant_initializer(1.0, dtype=tf.float32))
        up_score_3 = upsample_indep_scale3(score_3)
        scores_instances.append(up_score_3)

        # up_score_3 = self.feature_indep_upsample_conv(score_3, factor=2)
        # if is_training:
        #     loss_s3 = self.WGDL(up_score_3, labels)
        #     # loss_s3 = self.new_dice_loss(up_score_3, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s3/num_scales)

        # SCALE 4
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(self.num_res_blocks[3]):
                res_block = HighResBlock(self.num_features[3],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_4', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale4 = ScoreLayer(
            num_features=self.num_fea_score_layers[3],
            num_classes=self.num_classes)
        score_4 = score_layer_scale4(flow, self.num_fea_score_layers[3],
                                     is_training)

        upsample_indep_scale4 = UpSampleLayer(
            func='CHANNELWISE_DECONV',
            kernel_size=1,
            stride=2,
            w_initializer=tf.constant_initializer(1.0, dtype=tf.float32))
        up_score_4 = upsample_indep_scale4(score_4)
        scores_instances.append(up_score_4)

        # if is_training:
        #     loss_s4 = self.WGDL(up_score_4, labels)
        #     # loss_s4 = self.new_dice_loss(up_score_4, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s4/num_scales)

        # FUSED SCALES
        merge_layer = MergeLayer('WEIGHTED_AVERAGE')
        soft_scores = []
        for s in scores_instances:
            soft_scores.append(tf.nn.softmax(s))
        fused_score = merge_layer(soft_scores)
        scores_instances.append(fused_score)
        if is_training:
            return scores_instances
        return fused_score
Пример #6
0
    def layer_op(self, images, is_training, layer_id=-1):
        assert layer_util.check_spatial_dims(images['T1'],
                                             lambda x: x % 8 == 0)
        assert set(images.keys()).issubset(
            set(MODALITIES)
        ), 'image has to be a dictionnary with keys in %s' % (MODALITIES)
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []
        layer_modalities = dict()

        for mod in MODALITIES:

            layer_modalities[mod] = []

            ### first convolution layer BRATS
            params = self.layers[0]
            first_conv_layer = ConvolutionalLayer(
                n_output_chns=params['n_features'],
                kernel_size=params['kernel_size'],
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='%s_%s' % (params['name'], mod))

            layer_instances.append((first_conv_layer, ''))
            layer_modalities[mod].append(first_conv_layer)

            params = self.layers[1]
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%s_%d' %
                                         (params['name'], mod, j))
                layer_instances.append((res_block, ''))
                layer_modalities[mod].append(res_block)

        flow = []
        for mod in images.keys():
            flow_mod = layer_modalities[mod][0](images[mod], is_training)
            for lay in layer_modalities[mod][1:]:
                flow_mod = lay(flow_mod, is_training)
            flow.append(flow_mod)

        flow = 1 / len(images.keys()) * tf.add_n(flow)

        ### resblocks, all kernels dilated by 1 (normal convolution)
        params = self.layers[1]
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 2
        params = self.layers[2]
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 4
        params = self.layers[3]
        with DilatedTensor(flow, dilation_factor=4) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### 1x1x1 convolution layer
        params = self.layers[4]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=self.acti_func,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[5]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=None,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        # set training properties
        if is_training:
            self._print(layer_instances)
            return layer_instances[-1][1]
        return layer_instances[layer_id][1]
Пример #7
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        assert layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []

        input_tensor_T1, input_tensor_T2, input_tensor_PD = tf.split(
            value=images, num_or_size_splits=3, axis=-1, num=None)

        ### first convolution layer
        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            with_bias=True,
            with_bn=False,
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images, is_training)
        layer_instances.append((first_conv_layer, flow))

        ### resblocks, all kernels dilated by 1 (normal convolution)
        params = self.layers[1]
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 2
        params = self.layers[2]
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 4
        params = self.layers[3]
        with DilatedTensor(flow, dilation_factor=4) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### 1x1x1 convolution layer
        params = self.layers[4]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      with_bias=True,
                                      with_bn=False,
                                      acti_func=self.acti_func,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[5]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      with_bias=True,
                                      with_bn=False,
                                      acti_func=None,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        output_tensor_res = ElementwiseLayer('SUM')(input_tensor_T1, flow)

        # set training properties
        if is_training:
            self._print(layer_instances)
            # return layer_instances[-1][1]
            return output_tensor_res
        # return layer_instances[layer_id][1]
        return output_tensor_res