def layer_op(self, inputs, is_training=True):
        """
        The general connections is::

            (inputs)--o-conv_0--conv_1-+-- (outputs)
                      |                |
                      o----------------o

        ``conv_0``, ``conv_1`` layers are specified by ``type_string``.
        """
        conv_flow = inputs
        # batch normalisation layers
        bn_0 = BNLayer(**self.bn_param)
        bn_1 = BNLayer(**self.bn_param)
        # activation functions //regularisers?
        acti_0 = Acti(func=self.acti_func)
        acti_1 = Acti(func=self.acti_func)
        # convolutions
        conv_0 = Conv(acti_func=None,
                      with_bias=False,
                      with_bn=False,
                      **self.conv_param)
        conv_1 = Conv(acti_func=None,
                      with_bias=False,
                      with_bn=False,
                      **self.conv_param)

        if self.type_string == 'original':
            conv_flow = acti_0(bn_0(conv_0(conv_flow), is_training))
            conv_flow = bn_1(conv_1(conv_flow), is_training)
            conv_flow = ElementwiseLayer('SUM')(conv_flow, inputs)
            conv_flow = acti_1(conv_flow)
            return conv_flow

        if self.type_string == 'conv_bn_acti':
            conv_flow = acti_0(bn_0(conv_0(conv_flow), is_training))
            conv_flow = acti_1(bn_1(conv_1(conv_flow), is_training))
            return ElementwiseLayer('SUM')(conv_flow, inputs)

        if self.type_string == 'acti_conv_bn':
            conv_flow = bn_0(conv_0(acti_0(conv_flow)), is_training)
            conv_flow = bn_1(conv_1(acti_1(conv_flow)), is_training)
            return ElementwiseLayer('SUM')(conv_flow, inputs)

        if self.type_string == 'bn_acti_conv':
            conv_flow = conv_0(acti_0(bn_0(conv_flow, is_training)))
            conv_flow = conv_1(acti_1(bn_1(conv_flow, is_training)))
            return ElementwiseLayer('SUM')(conv_flow, inputs)

        raise ValueError('Unknown type string')
Exemple #2
0
 def layer_op(self, input_tensor, is_training):
     output_tensor = input_tensor
     for (i, k) in enumerate(self.kernels):
         # Batch Normalization is removed from the residual blocks.
         # create parameterised layers
         # bn_op = BNLayer(regularizer=self.regularizers['w'],
         #                 name='bn_{}'.format(i))
         acti_op = ActiLayer(func=self.acti_func,
                             regularizer=self.regularizers['w'],
                             name='acti_{}'.format(i))
         conv_op = ConvLayer(n_output_chns=self.n_output_chns,
                             kernel_size=k,
                             stride=1,
                             padding='SAME',
                             with_bias=True,
                             w_initializer=self.initializers['w'],
                             w_regularizer=self.regularizers['w'],
                             name='conv_{}'.format(i))
         # connect layers
         # output_tensor = bn_op(output_tensor, is_training)
         output_tensor = acti_op(output_tensor)
         output_tensor = conv_op(output_tensor)
     # make residual connections
     if self.with_res:
         output_tensor = ElementwiseLayer('SUM')(output_tensor, input_tensor)
     return output_tensor
Exemple #3
0
 def layer_op(self, input_tensor, is_training):
     output_tensor = input_tensor
     for i in range(len(self.kernels)):
         # create parameterised layers
         input_shape = input_tensor.shape.as_list()
         n_input_chns = input_shape[-1]
         spatial_rank = layer_util.infer_spatial_rank(input_tensor)
         w_full_size = layer_util.expand_spatial_params(
         self.kernel_size, spatial_rank)
         w_full_size = w_full_size + (n_input_chns, self.n_output_chns)
         conv_kernel = tf.get_variable('w', shape=w_full_size,
         initializer=self.initializers['w'],
         regularizer=self.regularizers['w'])
         alphas = tf.get_variable(
             'alpha', input_tensor.shape[-1],
             initializer=tf.constant_initializer(0.0),
             regularizer=None)
         
         output_tensor = tf.layers.batch_normalization(input=output_tensor,alphas)
         output_tensor = self.prelu(input_tensor,name='acti_{}'.format(i))
         output_tensor = tf.nn.convolution(input=output_tensor,
                                       filter=conv_kernel,
                                       strides=self.strides,
                                       dilation_rate=self.dilation_rates,
                                       padding=self.padding,
                                       name='conv_{}'.format(i))
         output_tensor = ElementwiseLayer('SUM')(output_tensor, input_tensor)
     return output_tensor
    def layer_op(self, input_tensor, is_training=True):
        """
        output is an elementwise sum of deconvolution and additive upsampling::

            --(inputs)--o--deconvolution-------+--(outputs)--
                        |                      |
                        o--additive upsampling-o
        :param input_tensor:
        :param is_training:
        :return: an upsampled tensor with ``n_input_channels/n_splits``
            feature channels.
        """
        n_output_chns = check_divisible_channels(input_tensor, self.n_splits)
        # deconvolution path
        deconv_output = Deconv(n_output_chns=n_output_chns,
                               with_bias=False, feature_normalization='batch',
                               **self.deconv_param)(input_tensor, is_training)

        # additive upsampling path
        additive_output = AdditiveUpsampleLayer(
            new_size=deconv_output.get_shape().as_list()[1:-1],
            n_splits=self.n_splits)(input_tensor)

        output_tensor = ElementwiseLayer('SUM')(deconv_output, additive_output)
        return output_tensor
Exemple #5
0
 def layer_op(self, input_tensor, is_training):
     output_tensor = input_tensor
     for i in range(len(self.kernels)):
         # create parameterised layers
         bn_op = BNLayer(
             regularizer=self.
             regularizers['w'],  # Add regulizer for samplicity
             name='bn_{}'.format(i))
         acti_op = ActiLayer(func=self.acti_func,
                             regularizer=self.regularizers['w'],
                             name='acti_{}'.format(i))
         conv_op = ConvLayer(n_output_chns=self.n_output_chns,
                             kernel_size=self.kernels[i],
                             stride=self.strides[i],
                             dilation=self.dilation_rates[i],
                             w_initializer=self.initializers['w'],
                             w_regularizer=self.regularizers['w'],
                             name='conv_{}'.format(i))
         output_tensor = conv_op(output_tensor)
         output_tensor = acti_op(output_tensor)
         output_tensor = bn_op(
             output_tensor, is_training
         )  # Construct operation first and then connect them.
     # make residual connections
     if self.with_res:
         # The input is directly added to the output.
         output_tensor = ElementwiseLayer('SUM')(output_tensor,
                                                 input_tensor)
     return output_tensor
Exemple #6
0
    def layer_op_selu(self, input_tensor, is_training):
        output_tensor = input_tensor
        for i in range(len(self.kernels)):
            # create parameterised layers
            #             bn_op = BNLayer(regularizer=self.regularizers['w'],
            #                             name='bn_{}'.format(i))
            acti_op = ActiLayer(func=self.acti_func,
                                regularizer=self.regularizers['w'],
                                name='acti_{}'.format(i))
            conv_op = ConvLayer(n_output_chns=self.n_output_chns,
                                kernel_size=self.kernels[i],
                                stride=self.strides[i],
                                dilation=self.dilation_rates[i],
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                name='conv_{}'.format(i))
            # connect layers
            #             output_tensor = bn_op(output_tensor, is_training)
            output_tensor = conv_op(output_tensor)
            output_tensor = acti_op(output_tensor)

        # make residual connections
        if self.with_res:
            output_tensor = ElementwiseLayer('SUM')(output_tensor,
                                                    input_tensor)
        return output_tensor
Exemple #7
0
    def layer_op(self, input_tensor, is_training):
        """

        :param input_tensor: tensor, input to the network
        :param is_training: boolean, True if network is in training mode
        :return: tensor, output of the residual block
        """
        output_tensor = input_tensor
        for (i, k) in enumerate(self.kernels):
            # create parameterised layers
            bn_op = BNLayer(regularizer=self.regularizers['w'],
                            name='bn_{}'.format(i))
            acti_op = ActiLayer(func=self.acti_func,
                                regularizer=self.regularizers['w'],
                                name='acti_{}'.format(i))
            conv_op = ConvLayer(n_output_chns=self.n_output_chns,
                                kernel_size=k,
                                stride=1,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                name='conv_{}'.format(i))
            # connect layers
            output_tensor = bn_op(output_tensor, is_training)
            output_tensor = acti_op(output_tensor)
            output_tensor = conv_op(output_tensor)
        # make residual connections
        if self.with_res:
            output_tensor = ElementwiseLayer('SUM')(output_tensor,
                                                    input_tensor)
        return output_tensor
Exemple #8
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0))
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []
        input_tensor_res = images

        ### first convolution layer
        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            with_bias=True,
            with_bn=False,
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images, is_training)
        layer_instances.append((first_conv_layer, flow))

        ###
        params = self.layers[1]
        for j in range(params['repeat']):
            conv_layer = ConvolutionalLayer(
                n_output_chns=params['n_features'],
                kernel_size=params['kernel_size'],
                with_bias=True,
                with_bn=False,
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='%s_%d' % (params['name'], j))
            flow = conv_layer(flow, is_training)
            layer_instances.append((conv_layer, flow))

        ###
        params = self.layers[2]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      with_bias=True,
                                      with_bn=False,
                                      acti_func=None,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        output_tensor_res = ElementwiseLayer('SUM')(input_tensor_res, flow)

        # set training properties
        if is_training:
            self._print(layer_instances)
            # return layer_instances[-1][1]
            return output_tensor_res
        # return layer_instances[layer_id][1]
        return output_tensor_res
Exemple #9
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        # image_size  should be divisible by 4
        assert layer_util.check_spatial_dims(images, lambda x: x % 4 == 0)
        assert layer_util.check_spatial_dims(images, lambda x: x >= 21)
        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[0], self.n_features[1]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='d0')
        pool_1, conv_1 = block_layer(images, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[1], self.n_features[2]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='d1')
        up_1, _ = block_layer(pool_1, is_training)
        print(block_layer)

        block_layer = UNetBlock(
            'NONE', (self.n_features[1], self.n_features[1], self.num_classes),
            (3, 3),
            with_downsample_branch=True,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='u0')
        crop_layer = CropLayer(border=4, name='crop-8')
        concat_1 = ElementwiseLayer('CONCAT')(crop_layer(conv_1), up_1)
        print(block_layer)

        # for the last layer, upsampling path is not used
        _, output_tensor = block_layer(concat_1, is_training)

        output_conv_op = ConvolutionalLayer(
            n_output_chns=self.num_classes,
            kernel_size=1,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=None,
            name='{}'.format(self.num_classes),
            padding='VALID',
            with_bn=False,
            with_bias=True)
        final_output_tensor = output_conv_op(output_tensor, is_training)
        print(output_conv_op)

        return final_output_tensor
Exemple #10
0
    def test_3d_shape(self):
        input_shape = (2, 16, 16, 16, 6)
        x_1 = tf.ones(input_shape)
        input_shape = (2, 16, 16, 16, 6)
        x_2 = tf.zeros(input_shape)
        sum_layer = ElementwiseLayer('SUM')
        out_sum_1 = sum_layer(x_1, x_2)

        input_shape = (2, 16, 16, 16, 8)
        x_1 = tf.ones(input_shape)
        input_shape = (2, 16, 16, 16, 6)
        x_2 = tf.zeros(input_shape)
        sum_layer = ElementwiseLayer('SUM')
        out_sum_2 = sum_layer(x_1, x_2)

        input_shape = (2, 16, 16, 16, 6)
        x_1 = tf.ones(input_shape)
        input_shape = (2, 16, 16, 16, 8)
        x_2 = tf.zeros(input_shape)
        sum_layer = ElementwiseLayer('SUM')
        out_sum_3 = sum_layer(x_1, x_2)

        input_shape = (2, 16, 16, 16, 6)
        x_1 = tf.ones(input_shape)
        input_shape = (2, 16, 16, 16, 8)
        x_2 = tf.zeros(input_shape)
        sum_layer = ElementwiseLayer('CONCAT')
        out_sum_4 = sum_layer(x_1, x_2)

        with self.cached_session() as sess:
            sess.run(tf.global_variables_initializer())
            out = sess.run(out_sum_1)
            self.assertAllClose((2, 16, 16, 16, 6), out.shape)
            out = sess.run(out_sum_2)
            self.assertAllClose((2, 16, 16, 16, 8), out.shape)
            out = sess.run(out_sum_3)
            self.assertAllClose((2, 16, 16, 16, 6), out.shape)
            out = sess.run(out_sum_4)
            self.assertAllClose((2, 16, 16, 16, 14), out.shape)
Exemple #11
0
    def layer_op(self, main_flow, bypass_flow):
        """

        :param main_flow: tensor, input to the VNet block
        :param bypass_flow: tensor, input from skip connection
        :return: res_flow is tensor before final block operation (for residual connections),
            main_flow is final output tensor
        """
        for i in range(self.n_conv):
            main_flow = ConvLayer(name='conv_{}'.format(i),
                                  n_output_chns=self.n_feature_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  kernel_size=5)(main_flow)
            if i < self.n_conv - 1:  # no activation for the last conv layer
                main_flow = ActiLayer(
                    func=self.acti_func,
                    regularizer=self.regularizers['w'])(main_flow)
        res_flow = ElementwiseLayer('SUM')(main_flow, bypass_flow)

        if self.func == 'DOWNSAMPLE':
            main_flow = ConvLayer(name='downsample',
                                  n_output_chns=self.n_output_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  kernel_size=2,
                                  stride=2,
                                  with_bias=True)(res_flow)
        elif self.func == 'UPSAMPLE':
            main_flow = DeconvLayer(name='upsample',
                                    n_output_chns=self.n_output_chns,
                                    w_initializer=self.initializers['w'],
                                    w_regularizer=self.regularizers['w'],
                                    kernel_size=2,
                                    stride=2,
                                    with_bias=True)(res_flow)
        elif self.func == 'SAME':
            main_flow = ConvLayer(name='conv_1x1x1',
                                  n_output_chns=self.n_output_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  b_initializer=self.initializers['b'],
                                  b_regularizer=self.regularizers['b'],
                                  kernel_size=1,
                                  with_bias=True)(res_flow)
        main_flow = ActiLayer(self.acti_func)(main_flow)
        print(self)
        return res_flow, main_flow
Exemple #12
0
    def layer_op(self, main_flow, bypass_flow):
        for i in range(self.n_conv):
            main_flow = ConvLayer(name='conv_{}'.format(i),
                                  n_output_chns=self.n_feature_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  kernel_size=5)(main_flow)
            if i < self.n_conv - 1:  # no activation for the last conv layer
                main_flow = ActiLayer(
                    func=self.acti_func,
                    regularizer=self.regularizers['w'])(main_flow)
        res_flow = ElementwiseLayer('SUM')(main_flow, bypass_flow)

        if self.func == 'DOWNSAMPLE':
            main_flow = ConvLayer(name='downsample',
                                  n_output_chns=self.n_output_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  kernel_size=2,
                                  stride=2,
                                  with_bias=True)(res_flow)
        elif self.func == 'UPSAMPLE':
            main_flow = DeconvLayer(name='upsample',
                                    n_output_chns=self.n_output_chns,
                                    w_initializer=self.initializers['w'],
                                    w_regularizer=self.regularizers['w'],
                                    kernel_size=2,
                                    stride=2,
                                    with_bias=True)(res_flow)
        elif self.func == 'SAME':
            main_flow = ConvLayer(name='conv_1x1x1',
                                  n_output_chns=self.n_output_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  b_initializer=self.initializers['b'],
                                  b_regularizer=self.regularizers['b'],
                                  kernel_size=1,
                                  with_bias=True)(res_flow)
        main_flow = ActiLayer(self.acti_func)(main_flow)
        print(self)
        print('VNet is running')
        return res_flow, main_flow
Exemple #13
0
    def layer_op(self, thru_tensor, is_training):
        input_tensor = thru_tensor
        for (kernel_size, n_features) in zip(self.kernels, self.n_chns):
            # no activation before final 1x1x1 conv layer
            acti_func = self.acti_func if kernel_size > 1 else None
            feature_normalization = 'instance' if acti_func is not None else None

            conv_op = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=kernel_size,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=acti_func,
                name='{}'.format(n_features),
                feature_normalization=feature_normalization)
            thru_tensor = conv_op(thru_tensor, is_training)
#            thru_tensor = ElementwiseLayer('SUM')(thru_tensor, input_tensor)

        if self.with_downsample_branch:
            thru_tensor = ElementwiseLayer('SUM')(thru_tensor, input_tensor)
            branch_output = thru_tensor
        else:
            branch_output = None

        if self.func == 'DOWNSAMPLE':
            #            thru_tensor = ElementwiseLayer('SUM')(thru_tensor, input_tensor)
            downsample_op = DownSampleLayer('MAX',
                                            kernel_size=2,
                                            stride=2,
                                            name='down_2x2')
            thru_tensor = downsample_op(thru_tensor)

        elif self.func == 'UPSAMPLE':
            up_shape = [2 * int(thru_tensor.shape[i]) for i in (1, 2, 3)]
            upsample_op = LinearResizeLayer(up_shape)
            thru_tensor = upsample_op(thru_tensor)

        elif self.func == 'NONE':
            pass  # do nothing
        return thru_tensor, branch_output
    def layer_op(self, inputs, forwarding=None, is_training=True):
        """
        Consists of::

            (inputs)--upsampling-+-o--conv_1--conv_2--+--(conv_res)--
                                 | |                  |
            (forwarding)---------o o------------------o

        where upsampling method could be ``DeconvolutionalLayer``
        or ``ResidualUpsampleLayer``
        """
        if self.is_residual_upsampling:
            n_input_channels = inputs.get_shape().as_list()[-1]
            n_splits = float(n_input_channels) / float(self.n_output_chns)
            upsampled = ResUp(kernel_size=self.kernel_size,
                              stride=self.upsample_stride,
                              n_splits=n_splits,
                              acti_func=self.acti_func,
                              **self.conv_param)(inputs, is_training)
        else:
            upsampled = Deconv(n_output_chns=self.n_output_chns,
                               kernel_size=self.kernel_size,
                               stride=self.upsample_stride,
                               acti_func=self.acti_func,
                               with_bias=False,
                               feature_normalization='batch',
                               **self.conv_param)(inputs, is_training)

        if forwarding is None:
            conv_0 = upsampled
        else:
            conv_0 = ElementwiseLayer('SUM')(upsampled, forwarding)

        conv_res = ResUnit(n_output_chns=self.n_output_chns,
                           kernel_size=self.kernel_size,
                           acti_func=self.acti_func,
                           type_string=self.type_string,
                           **self.conv_param)(conv_0, is_training)
        return conv_res
    def layer_op(self, tensor):
        output = tensor
        for i in range(2 * len(self.kernels)):
            input_shape = tensor.shape.as_list()
            n_input_chns = input_shape[-1]
            spatial_rank = layer_util.infer_spatial_rank(tensor)
            w_full_size = layer_util.expand_spatial_params(
                self.kernel_size, spatial_rank)
            w_full_size = w_full_size + (n_input_chns, self.n_output_chns)
            conv_kernel = tf.get_variable('w',
                                          shape=w_full_size,
                                          regularizer=self.regularizers['w'])

            output = tf.layers.batch_normalization(input=tensor,
                                                   name='bn_{}'.format(i))
            output = self.selu(output, name='acti_{}'.format(i))
            output = tf.nn.convolution(input=output,
                                       filter=conv_kernel,
                                       strides=self.strides,
                                       dilation_rate=self.dilation_rates,
                                       padding=self.padding,
                                       name='conv_{}'.format(i))
            output = ElementwiseLayer('SUM')(output, tensor)
        return output
Exemple #16
0
    def layer_op(self,
                 images,
                 is_training,
                 keep_prob=0.5,
                 layer_id=-1,
                 **unused_kwargs):

        # crop 27x27x27 from 59x59x59
        crop_op = CropLayer(border=self.crop_diff, name='cropping_input')
        normal_path = crop_op(images)
        dilated_path = images
        print(crop_op)

        # dilated pathway

        # dilation rate: 1,2,4,2,8,2,4,2,1
        for n_features, rate in zip(self.conv2_features, self.dilation_rates):
            dilated_block = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                dilation=rate,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='dilated_conv_{}'.format(n_features))

            dilated_path = dilated_block(dilated_path, is_training)
            print(dilated_block)

        # normal pathway

        for n_features in self.conv1_features:

            # normal pathway convolutions
            conv_path_1 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='normal_conv_{}'.format(n_features))

            normal_path = conv_path_1(normal_path, is_training)
            print(conv_path_1)

        # concatenate both pathways
        output_tensor = ElementwiseLayer('CONCAT')(normal_path, dilated_path)

        # 1x1x1 convolution layer
        for n_features in self.fc_features:
            conv_fc = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=1,
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='conv_1x1x1_{}'.format(n_features))

            output_tensor = conv_fc(output_tensor,
                                    is_training,
                                    keep_prob=keep_prob)
            print('#----------------------------------- keep_prob: ',
                  keep_prob)
            print(conv_fc)

        # classification layer
        for n_features in self.conv_classification:
            conv_classification = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=1,
                acti_func=None,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='conv_1x1x1_{}'.format(n_features))
            output_tensor = conv_classification(output_tensor, is_training)
            print(conv_classification)

        return output_tensor
Exemple #17
0
    def layer_op(self, images, is_training, layer_id=-1):
        # image_size  should be divisible by 8
        assert layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)
        assert layer_util.check_spatial_dims(images, lambda x: x >= 89)
        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[0], self.n_features[1]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L1')
        pool_1, conv_1 = block_layer(images, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[1], self.n_features[2]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L2')
        pool_2, conv_2 = block_layer(pool_1, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[2], self.n_features[3]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L3')
        pool_3, conv_3 = block_layer(pool_2, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[3], self.n_features[4]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L4')
        up_3, _ = block_layer(pool_3, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[3], self.n_features[3]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R3')
        concat_3 = ElementwiseLayer('CONCAT')(conv_3, up_3)
        up_2, _ = block_layer(concat_3, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[2], self.n_features[2]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R2')
        concat_2 = ElementwiseLayer('CONCAT')(conv_2, up_2)
        up_1, _ = block_layer(concat_2, is_training)
        print(block_layer)

        block_layer = UNetBlock(
            'NONE', (self.n_features[1], self.n_features[1], self.num_classes),
            (3, 3, 1),
            with_downsample_branch=True,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='R1_FC')
        concat_1 = ElementwiseLayer('CONCAT')(conv_1, up_1)

        # for the last layer, upsampling path is not used
        _, output_tensor = block_layer(concat_1, is_training)

        crop_layer = CropLayer(border=44, name='crop-88')
        output_tensor = crop_layer(output_tensor)
        print(block_layer)
        return output_tensor
Exemple #18
0
    def layer_op(self, list_skips):

        # Define the decoding convolutional layers
        layer_instances = []  #list layers
        layer_mod = dict()
        decoders_fc = dict()
        flow = dict()

        list_skips = list_skips[::-1]

        for mod in ['seg']:
            layer_mod[mod] = []

            flow_mod = list_skips[0]

            if mod == 'seg':
                double = True
                n_output = 4
            else:
                double = True
                n_output = 1

            for i in range(len(self.layers)):

                params = self.layers[i]

                flow_mod = LinearResizeLayer(
                    list_skips[i + 1].shape.as_list()[1:-1])(flow_mod)

                print(mod)
                print(flow_mod)
                print('added with ')
                print(list_skips[i + 1])
                flow_mod = ElementwiseLayer('CONCAT')(flow_mod,
                                                      list_skips[i + 1])
                print(flow_mod)

                res_block = ResBlock(n_output_chns=params['n_features'],
                                     kernels=params['kernels'],
                                     acti_func='leakyrelu',
                                     encoding=False,
                                     double_n=double,
                                     w_initializer=self.initializers['w'],
                                     w_regularizer=self.regularizers['w'],
                                     name='%s_%s' % (params['name'], mod))
                layer_instances.append(res_block)
                layer_mod[mod].append(res_block)
                flow_mod = res_block(flow_mod)

            last_conv = ConvolutionalLayer(
                n_output_chns=n_output,
                kernel_size=(1, 1, 1),
                with_bn=False,
                acti_func=None,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='final_conv_seg')
            layer_instances.append(last_conv)
            flow_mod = last_conv(flow_mod)
            flow[mod] = flow_mod

        if True:
            self._print(layer_instances)
            return flow['seg']
        return flow['seg']
Exemple #19
0
    def layer_op(self, images, is_training, layer_id=-1, **unused_kwargs):
        """

        :param images: tensor, input to the network, size should be divisible by d_factor
        :param is_training: boolean, True if network is in training mode
        :param layer_id: not in use
        :param unused_kwargs:
        :return: tensor, network output
        """
        # image_size is defined as the largest context, then:
        #   downsampled path size: image_size / d_factor
        #   downsampled path output: image_size / d_factor - 16

        # to make sure same size of feature maps from both pathways:
        #   normal path size: (image_size / d_factor - 16) * d_factor + 16
        #   normal path output: (image_size / d_factor - 16) * d_factor

        # where 16 is fixed by the receptive field of conv layers
        # TODO: make sure label_size = image_size/d_factor - 16

        # image_size has to be an odd number and divisible by 3 and
        # smaller than the smallest image size of the input volumes

        # label_size should be (image_size/d_factor - 16) * d_factor

        assert self.d_factor % 2 == 1  # to make the downsampling centered
        assert (layer_util.check_spatial_dims(
            images, lambda x: x % self.d_factor == 0))
        assert (layer_util.check_spatial_dims(images, lambda x: x % 2 == 1)
                )  # to make the crop centered
        assert (layer_util.check_spatial_dims(images,
                                              lambda x: x > self.d_factor * 16)
                )  # required by receptive field

        # crop 25x25x25 from 57x57x57
        crop_op = CropLayer(border=self.crop_diff, name='cropping_input')
        normal_path = crop_op(images)
        print(crop_op)

        # downsample 19x19x19 from 57x57x57
        downsample_op = DownSampleLayer(func='CONSTANT',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor,
                                        padding='VALID',
                                        name='downsample_input')
        downsample_path = downsample_op(images)
        print(downsample_op)

        # convolutions for both pathways
        for n_features in self.conv_features:
            # normal pathway convolutions
            conv_path_1 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='normal_conv')
            normal_path = conv_path_1(normal_path, is_training)
            print(conv_path_1)

            # downsampled pathway convolutions
            conv_path_2 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='downsample_conv')
            downsample_path = conv_path_2(downsample_path, is_training)
            print(conv_path_2)

        # upsampling the downsampled pathway
        downsample_path = UpSampleLayer('REPLICATE',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor)(downsample_path)

        # concatenate both pathways
        output_tensor = ElementwiseLayer('CONCAT')(normal_path,
                                                   downsample_path)

        # 1x1x1 convolution layer
        for n_features in self.fc_features:
            conv_fc = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=1,
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='conv_1x1x1_{}'.format(n_features))
            output_tensor = conv_fc(output_tensor, is_training)
            print(conv_fc)

        return output_tensor
Exemple #20
0
    def layer_op(self, thru_tensor, is_training=True, **unused_kwargs):
        """

        :param thru_tensor: the input is modified in-place as it goes through the network
        :param is_training:
        :param unused_kwargs:
        :return:
        """
        # image_size  should be divisible by 16 because of max-pooling 4 times, 2x2x2
        assert layer_util.check_spatial_dims(thru_tensor,
                                             lambda x: x % 16 == 0)
        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[0], self.n_features[0]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L1')
        thru_tensor, conv_1 = block_layer(thru_tensor, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[1], self.n_features[1]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L2')
        thru_tensor, conv_2 = block_layer(thru_tensor, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[2], self.n_features[2]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L3')
        thru_tensor, conv_3 = block_layer(thru_tensor, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[3], self.n_features[3]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L4')
        thru_tensor, conv_4 = block_layer(thru_tensor, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[4], self.n_features[3]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='bottom')
        thru_tensor, _ = block_layer(thru_tensor, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[3], self.n_features[2]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R4')
        concat_4 = ElementwiseLayer('CONCAT')(conv_4, thru_tensor)
        thru_tensor, _ = block_layer(concat_4, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[2], self.n_features[1]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R3')
        concat_3 = ElementwiseLayer('CONCAT')(conv_3, thru_tensor)
        thru_tensor, _ = block_layer(concat_3, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[1], self.n_features[0]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R2')
        concat_2 = ElementwiseLayer('CONCAT')(conv_2, thru_tensor)
        thru_tensor, _ = block_layer(concat_2, is_training)
        print(block_layer)

        block_layer = UNetBlock(
            'NONE', (self.n_features[0], self.n_features[0], self.num_classes),
            (3, 3, 1),
            with_downsample_branch=False,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='R1_FC')

        concat_1 = ElementwiseLayer('CONCAT')(conv_1, thru_tensor)
        thru_tensor, _ = block_layer(concat_1, is_training)
        print(block_layer)

        return thru_tensor
Exemple #21
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        assert layer_util.check_spatial_dims(
            images, lambda x: x % 8 == 0)
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []

        input_tensor = images

        ### first convolution layer
        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            with_bias=True,
            with_bn=False,
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images, is_training)
        layer_instances.append((first_conv_layer, flow))

        ### resblocks, all kernels dilated by 1 (normal convolution)
        params = self.layers[1]
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(
                    params['n_features'],
                    params['kernels'],
                    acti_func=self.acti_func,
                    w_initializer=self.initializers['w'],
                    w_regularizer=self.regularizers['w'],
                    name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 2
        params = self.layers[2]
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(
                    params['n_features'],
                    params['kernels'],
                    acti_func=self.acti_func,
                    w_initializer=self.initializers['w'],
                    w_regularizer=self.regularizers['w'],
                    name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 4
        params = self.layers[3]
        with DilatedTensor(flow, dilation_factor=4) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(
                    params['n_features'],
                    params['kernels'],
                    acti_func=self.acti_func,
                    w_initializer=self.initializers['w'],
                    w_regularizer=self.regularizers['w'],
                    name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### 1x1x1 convolution layer
        params = self.layers[4]
        fc_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            with_bias=True,
            with_bn=False,
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[5]
        fc_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            with_bias=True,
            with_bn=False,
            acti_func=None,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        output_tensor_res = ElementwiseLayer('SUM')(input_tensor, flow)

        # set training properties
        if is_training:
            self._print(layer_instances)
            # return layer_instances[-1][1]
            return output_tensor_res
        # return layer_instances[layer_id][1]
        return output_tensor_res
Exemple #22
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        # image_size-4  should be divisible by 8
        #assert layer_util.check_spatial_dims(images, lambda x: x % 16 == 4)
        #assert layer_util.check_spatial_dims(images, lambda x: x >= 89)
        block_layer = UNetBlock('DOWNSAMPLE_ANISOTROPIC',
                                (self.n_features[0], self.n_features[1]),
                                ([3, 3, 1], [3, 3, 1]),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='d0')
        pool_1, conv_1 = block_layer(images, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE_ANISOTROPIC',
                                (self.n_features[1], self.n_features[2]),
                                ([3, 3, 1], [3, 3, 1]),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='d1')
        pool_2, conv_2 = block_layer(pool_1, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE_ANISOTROPIC',
                                (self.n_features[2], self.n_features[3]),
                                ([3, 3, 1], [3, 3, 1]),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='d2')
        pool_3, conv_3 = block_layer(pool_2, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE_ANISOTROPIC',
                                (self.n_features[3], self.n_features[4]),
                                ([3, 3, 3], [3, 3, 3]),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='d3')
        up_3, _ = block_layer(pool_3, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE_ANISOTROPIC',
                                (self.n_features[3], self.n_features[3]),
                                ([3, 3, 1], [3, 3, 1]),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='u2')
        crop_layer = CropLayer(border=[4, 4, 2], name='crop-8x8x0')
        concat_3 = ElementwiseLayer('CONCAT')(crop_layer(conv_3), up_3)
        up_2, _ = block_layer(concat_3, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE_ANISOTROPIC',
                                (self.n_features[2], self.n_features[2]),
                                ([3, 3, 1], [3, 3, 1]),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='u1')
        crop_layer = CropLayer(border=[16, 16, 2], name='crop-32x32x0')
        concat_2 = ElementwiseLayer('CONCAT')(crop_layer(conv_2), up_2)
        up_1, _ = block_layer(concat_2, is_training)
        print(block_layer)

        block_layer = UNetBlock(
            'NONE', (self.n_features[1], self.n_features[1], self.num_classes),
            ([3, 3, 1], [3, 3, 1]),
            with_downsample_branch=True,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='u0')
        crop_layer = CropLayer(border=[40, 40, 2], name='crop-80x80x0')
        concat_1 = ElementwiseLayer('CONCAT')(crop_layer(conv_1), up_1)
        print(block_layer)

        # for the last layer, upsampling path is not used
        _, output_tensor = block_layer(concat_1, is_training)

        output_conv_op = ConvolutionalLayer(
            n_output_chns=self.num_classes,
            kernel_size=1,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=None,
            name='{}'.format(self.num_classes),
            padding='VALID',
            with_bn=False,
            with_bias=True)
        final_output_tensor = output_conv_op(output_tensor, is_training)
        print(output_conv_op)

        return final_output_tensor
Exemple #23
0
    def layer_op(self, input_tensor, is_training):
        """
        :param input_tensor: tensor, input to the network
        :param is_training: boolean, True if network is in training mode
        :return: tensor, output of the autofocus block
        """
        output_tensor = input_tensor

        ########################################################################
        # 1: Create first of two autofocus layer of autofocus block.
        ########################################################################
        # A convolution without feature norm and activation.
        conv_1 = ConvLayer(n_output_chns = self.n_output_chns[0],
                           kernel_size = self.kernel_size[0],
                           padding='SAME',
                           dilation = 1,
                           w_initializer = self.initializers['w'],
                           w_regularizer = self.regularizers['w'],
                           name = 'conv_1')

        # Create two conv layers for the attention model. The output of the
        # attention model will be needed for the K parallel conv layers.

        # First convolutional layer of the attention model (conv l,1).
        conv_att_11 = ConvLayer(n_output_chns = int(self.n_input_chns[0]/2),
                                kernel_size = self.kernel_size[0],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_11')

        # Second convolutional layer of the attention model (conv l,2).
        conv_att_12 = ConvLayer(n_output_chns = self.num_branches,
                                kernel_size = [1, 1, 1],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_12')

        # Batch norm (BN) layer for each of the K parallel convolutions
        bn_layer_1 = []
        for i in range(self.num_branches):
            bn_layer_1.append(BNLayer(regularizer = self.regularizers['w'],
                                      name = 'bn_layer_1_{}'.format(i)))

        # Activation function used in the first attention model
        acti_op_1 = ActiLayer(func = self.acti_func,
                              regularizer = self.regularizers['w'],
                              name = 'acti_op_1')

        ########################################################################
        # 2: Create second of two autofocus layer of autofocus block.
        ########################################################################
        # A convolution without feature norm and activation.
        conv_2 = ConvLayer(n_output_chns = self.n_output_chns[1],
                           kernel_size = self.kernel_size[1],
                           padding='SAME',
                           dilation = 1,
                           w_initializer = self.initializers['w'],
                           w_regularizer = self.regularizers['w'],
                           name = 'conv_2')

        # Create two conv layers for the attention model. The output of the
        # attention model will be needed for the K parallel conv layers.
        # First convolutional layer of the attention model (conv l,1).
        conv_att_21 = ConvLayer(n_output_chns = int(self.n_input_chns[1]/2),
                                kernel_size = self.kernel_size[1],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_21')

        # Second convolutional layer of the attention model (conv l,2).
        conv_att_22 = ConvLayer(n_output_chns = self.num_branches,
                                kernel_size = [1, 1, 1],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_22')

        # Batch norm (BN) layer for each of the K parallel convolutions
        bn_layer_2 = []
        for i in range(self.num_branches):
            bn_layer_2.append(BNLayer(regularizer = self.regularizers['w'],
                                      name = 'bn_layer_2_{}'.format(i)))

        # Activation function used in the second attention model
        acti_op_2 = ActiLayer(func = self.acti_func,
                              regularizer = self.regularizers['w'],
                              name = 'acti_op_2')

        ########################################################################
        # 3: Create other parameterised layers
        ########################################################################
        acti_op = ActiLayer(func = self.acti_func,
                            regularizer = self.regularizers['w'],
                            name = 'acti_op')

        ########################################################################
        # 4: Connect layers
        ########################################################################
        # compute attention weights for the K parallel conv layers in the first
        # autofocus convolutional layer
        feature_1 = output_tensor
        att_1 = acti_op_1(conv_att_11(feature_1))
        att_1 = conv_att_12(att_1)
        att_1 = tf.nn.softmax(att_1, axis=1)

        # Create K dilated tensors as input to the autofocus layer. This
        # simulates the K parallel convolutions with different dilation
        # rates. Doing it this way ensures the required weight sharing.
        dilated_tensor_1 = []
        for i in range(self.num_branches):
            dilated_1 = output_tensor
            with DilatedTensor(dilated_1, dilation_factor = self.dilation_list[i]) as dilated:
                dilated.tensor = conv_1(dilated.tensor)
                dilated.tensor = bn_layer_1[i](dilated.tensor, is_training)
            dilated.tensor = dilated.tensor * att_1[:,:,:,:,i:(i+1)]
            dilated_tensor_1.append(dilated.tensor)
        output_tensor = tf.add_n(dilated_tensor_1)
        output_tensor = acti_op(output_tensor)

        # compute attention weights for the K parallel conv layers in the second
        # autofocus convolutional layer
        feature_2 = output_tensor
        att_2 = acti_op_2(conv_att_21(feature_2))
        att_2 = conv_att_22(att_2)
        att_2 = tf.nn.softmax(att_2, axis=1)

        # Create K dilated tensors as input to the autofocus layer. This
        # simulates the K parallel convolutions with different dilation
        # rates. Doing it this way ensures the required weight sharing.
        dilated_tensor_2 = []
        for i in range(self.num_branches):
            dilated_2 = output_tensor
            with DilatedTensor(dilated_2, dilation_factor = self.dilation_list[i]) as dilated:
                dilated.tensor = conv_2(dilated.tensor)
                dilated.tensor = bn_layer_2[i](dilated.tensor, is_training)
            dilated.tensor = dilated.tensor * att_2[:,:,:,:,i:(i+1)]
            dilated_tensor_2.append(dilated.tensor)
        output_tensor = tf.add_n(dilated_tensor_2)

        # make residual connection using ElementwiseLayer with SUM
        if self.with_res:
            output_tensor = ElementwiseLayer('SUM')(output_tensor, input_tensor)

        # apply the last ReLU activation
        output_tensor = acti_op(output_tensor)
        print("output_tensor:", output_tensor)

        return output_tensor
Exemple #24
0
    def layer_op(self, images, is_training, layer_id=-1):
        assert layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)

        if layer_util.infer_spatial_rank(images) == 2:
            padded_images = tf.tile(images, [1, 1, 1, self.n_features[0]])
        elif layer_util.infer_spatial_rank(images) == 3:
            padded_images = tf.tile(images, [1, 1, 1, 1, self.n_features[0]])
        else:
            raise ValueError('not supported spatial rank of the input image')
        # downsampling  blocks
        res_1, down_1 = VNetBlock('DOWNSAMPLE',
                                  1,
                                  self.n_features[0],
                                  self.n_features[1],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L1')(images, padded_images)
        res_2, down_2 = VNetBlock('DOWNSAMPLE',
                                  2,
                                  self.n_features[1],
                                  self.n_features[2],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L2')(down_1, down_1)
        res_3, down_3 = VNetBlock('DOWNSAMPLE',
                                  3,
                                  self.n_features[2],
                                  self.n_features[3],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L3')(down_2, down_2)
        res_4, down_4 = VNetBlock('DOWNSAMPLE',
                                  3,
                                  self.n_features[3],
                                  self.n_features[4],
                                  acti_func=self.acti_func,
                                  name='L4')(down_3, down_3)
        # upsampling blocks
        _, up_4 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[4],
                            self.n_features[4],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='V_')(down_4, down_4)
        concat_r4 = ElementwiseLayer('CONCAT')(up_4, res_4)
        _, up_3 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[4],
                            self.n_features[3],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R4')(concat_r4, up_4)
        concat_r3 = ElementwiseLayer('CONCAT')(up_3, res_3)
        _, up_2 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[3],
                            self.n_features[2],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R3')(concat_r3, up_3)
        concat_r2 = ElementwiseLayer('CONCAT')(up_2, res_2)
        _, up_1 = VNetBlock('UPSAMPLE',
                            2,
                            self.n_features[2],
                            self.n_features[1],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R2')(concat_r2, up_2)
        # final class score
        concat_r1 = ElementwiseLayer('CONCAT')(up_1, res_1)
        _, output_tensor = VNetBlock('SAME',
                                     1,
                                     self.n_features[1],
                                     self.num_classes,
                                     w_initializer=self.initializers['w'],
                                     w_regularizer=self.regularizers['w'],
                                     b_initializer=self.initializers['b'],
                                     b_regularizer=self.regularizers['b'],
                                     acti_func=self.acti_func,
                                     name='R1')(concat_r1, up_1)
        return output_tensor
Exemple #25
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        """

        :param images: tensor to input to the network. Size has to be divisible by 8
        :param is_training:  boolean, True if network is in training mode
        :param layer_id: not in use
        :param unused_kwargs: other conditional arguments, not in use
        :return: tensor, network output
        """
        assert layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)

        if layer_util.infer_spatial_rank(images) == 2:
            padded_images = tf.tile(images, [1, 1, 1, self.n_features[0]])
        elif layer_util.infer_spatial_rank(images) == 3:
            padded_images = tf.tile(images, [1, 1, 1, 1, self.n_features[0]])
        else:
            raise ValueError('not supported spatial rank of the input image')
        # downsampling  blocks
        res_1, down_1 = VNetBlock('DOWNSAMPLE',
                                  1,
                                  self.n_features[0],
                                  self.n_features[1],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L1')(images, padded_images)
        res_2, down_2 = VNetBlock('DOWNSAMPLE',
                                  2,
                                  self.n_features[1],
                                  self.n_features[2],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L2')(down_1, down_1)
        res_3, down_3 = VNetBlock('DOWNSAMPLE',
                                  3,
                                  self.n_features[2],
                                  self.n_features[3],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L3')(down_2, down_2)
        res_4, down_4 = VNetBlock('DOWNSAMPLE',
                                  3,
                                  self.n_features[3],
                                  self.n_features[4],
                                  acti_func=self.acti_func,
                                  name='L4')(down_3, down_3)
        # upsampling blocks
        _, up_4 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[4],
                            self.n_features[4],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='V_')(down_4, down_4)
        concat_r4 = ElementwiseLayer('CONCAT')(up_4, res_4)
        _, up_3 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[4],
                            self.n_features[3],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R4')(concat_r4, up_4)
        concat_r3 = ElementwiseLayer('CONCAT')(up_3, res_3)
        _, up_2 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[3],
                            self.n_features[2],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R3')(concat_r3, up_3)
        concat_r2 = ElementwiseLayer('CONCAT')(up_2, res_2)
        _, up_1 = VNetBlock('UPSAMPLE',
                            2,
                            self.n_features[2],
                            self.n_features[1],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R2')(concat_r2, up_2)
        # final class score
        concat_r1 = ElementwiseLayer('CONCAT')(up_1, res_1)
        _, output_tensor = VNetBlock('SAME',
                                     1,
                                     self.n_features[1],
                                     self.num_classes,
                                     w_initializer=self.initializers['w'],
                                     w_regularizer=self.regularizers['w'],
                                     b_initializer=self.initializers['b'],
                                     b_regularizer=self.regularizers['b'],
                                     acti_func=self.acti_func,
                                     name='R1')(concat_r1, up_1)
        return output_tensor