Beispiel #1
0
    def layer_op(self, inputs, is_training=True):
        """
        The general connections is::

            (inputs)--o-conv_0--conv_1-+-- (outputs)
                      |                |
                      o----------------o

        ``conv_0``, ``conv_1`` layers are specified by ``type_string``.
        """
        conv_flow = inputs
        # batch normalisation layers
        bn_0 = BNLayer(**self.bn_param)
        bn_1 = BNLayer(**self.bn_param)
        # activation functions //regularisers?
        acti_0 = Acti(func=self.acti_func)
        acti_1 = Acti(func=self.acti_func)
        # convolutions
        conv_0 = Conv(acti_func=None,
                      with_bias=False,
                      with_bn=False,
                      **self.conv_param)
        conv_1 = Conv(acti_func=None,
                      with_bias=False,
                      with_bn=False,
                      **self.conv_param)

        if self.type_string == 'original':
            conv_flow = acti_0(bn_0(conv_0(conv_flow), is_training))
            conv_flow = bn_1(conv_1(conv_flow), is_training)
            conv_flow = ElementwiseLayer('SUM')(conv_flow, inputs)
            conv_flow = acti_1(conv_flow)
            return conv_flow

        if self.type_string == 'conv_bn_acti':
            conv_flow = acti_0(bn_0(conv_0(conv_flow), is_training))
            conv_flow = acti_1(bn_1(conv_1(conv_flow), is_training))
            return ElementwiseLayer('SUM')(conv_flow, inputs)

        if self.type_string == 'acti_conv_bn':
            conv_flow = bn_0(conv_0(acti_0(conv_flow)), is_training)
            conv_flow = bn_1(conv_1(acti_1(conv_flow)), is_training)
            return ElementwiseLayer('SUM')(conv_flow, inputs)

        if self.type_string == 'bn_acti_conv':
            conv_flow = conv_0(acti_0(bn_0(conv_flow, is_training)))
            conv_flow = conv_1(acti_1(bn_1(conv_flow, is_training)))
            return ElementwiseLayer('SUM')(conv_flow, inputs)

        raise ValueError('Unknown type string')
Beispiel #2
0
    def layer_op(self, input_tensor, is_training):
        """

        :param input_tensor: tensor, input to the network
        :param is_training: boolean, True if network is in training mode
        :return: tensor, output of the residual block
        """
        output_tensor = input_tensor
        for (i, k) in enumerate(self.kernels):
            # create parameterised layers
            bn_op = BNLayer(regularizer=self.regularizers['w'],
                            name='bn_{}'.format(i))
            acti_op = ActiLayer(func=self.acti_func,
                                regularizer=self.regularizers['w'],
                                name='acti_{}'.format(i))
            conv_op = ConvLayer(n_output_chns=self.n_output_chns,
                                kernel_size=k,
                                stride=1,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                name='conv_{}'.format(i))
            # connect layers
            output_tensor = bn_op(output_tensor, is_training)
            output_tensor = acti_op(output_tensor)
            output_tensor = conv_op(output_tensor)
        # make residual connections
        if self.with_res:
            output_tensor = ElementwiseLayer('SUM')(output_tensor,
                                                    input_tensor)
        return output_tensor
    def layer_op(self, input_tensor, is_training=None, keep_prob=None):
        fc_layer = FCLayer(n_output_chns=self.n_output_chns,
                           with_bias=self.with_bias,
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           b_initializer=self.initializers['b'],
                           b_regularizer=self.regularizers['b'],
                           name='fc_')
        output_tensor = fc_layer(input_tensor)

        if self.with_bn:
            bn_layer = BNLayer(
                regularizer=self.regularizers['w'],
                moving_decay=self.moving_decay,
                eps=self.eps,
                name='bn_')
            output_tensor = bn_layer(output_tensor, is_training)

        if self.acti_func is not None:
            acti_layer = ActiLayer(
                func=self.acti_func,
                regularizer=self.regularizers['w'],
                name='acti_')
            output_tensor = acti_layer(output_tensor)

        if keep_prob is not None:
            dropout_layer = ActiLayer(func='dropout', name='dropout_')
            output_tensor = dropout_layer(output_tensor, keep_prob=keep_prob)

        return output_tensor
Beispiel #4
0
 def layer_op(self, input_tensor, is_training):
     output_tensor = input_tensor
     for i in range(len(self.kernels)):
         # create parameterised layers
         bn_op = BNLayer(
             regularizer=self.
             regularizers['w'],  # Add regulizer for samplicity
             name='bn_{}'.format(i))
         acti_op = ActiLayer(func=self.acti_func,
                             regularizer=self.regularizers['w'],
                             name='acti_{}'.format(i))
         conv_op = ConvLayer(n_output_chns=self.n_output_chns,
                             kernel_size=self.kernels[i],
                             stride=self.strides[i],
                             dilation=self.dilation_rates[i],
                             w_initializer=self.initializers['w'],
                             w_regularizer=self.regularizers['w'],
                             name='conv_{}'.format(i))
         output_tensor = conv_op(output_tensor)
         output_tensor = acti_op(output_tensor)
         output_tensor = bn_op(
             output_tensor, is_training
         )  # Construct operation first and then connect them.
     # make residual connections
     if self.with_res:
         # The input is directly added to the output.
         output_tensor = ElementwiseLayer('SUM')(output_tensor,
                                                 input_tensor)
     return output_tensor
Beispiel #5
0
    def layer_op(self, input_tensor, is_training=None, keep_prob=None):
        # init sub-layers
        deconv_layer = DeconvLayer(n_output_chns=self.n_output_chns,
                                   kernel_size=self.kernel_size,
                                   stride=self.stride,
                                   padding=self.padding,
                                   with_bias=self.with_bias,
                                   w_initializer=self.initializers['w'],
                                   w_regularizer=self.regularizers['w'],
                                   b_initializer=self.initializers['b'],
                                   b_regularizer=self.regularizers['b'],
                                   name='deconv_')
        output_tensor = deconv_layer(input_tensor)

        if self.with_bn:
            if is_training is None:
                raise ValueError('is_training argument should be '
                                 'True or False unless with_bn is False')
            bn_layer = BNLayer(regularizer=self.regularizers['w'],
                               moving_decay=self.moving_decay,
                               eps=self.eps,
                               name='bn_')
            output_tensor = bn_layer(output_tensor, is_training)

        if self.acti_func is not None:
            acti_layer = ActiLayer(func=self.acti_func,
                                   regularizer=self.regularizers['w'],
                                   name='acti_')
            output_tensor = acti_layer(output_tensor)

        if keep_prob is not None:
            dropout_layer = ActiLayer(func='dropout', name='dropout_')
            output_tensor = dropout_layer(output_tensor, keep_prob=keep_prob)
        return output_tensor
Beispiel #6
0
 def layer_op_prelu(self, input_tensor, is_training):
     output_tensor = input_tensor
     for i in range(len(self.kernels)):
         # create parameterised layers
         bn_op = BNLayer(regularizer=self.regularizers['w'],
                         name='bn_{}'.format(i))
         acti_op = ActiLayer(func=self.acti_func,
                             regularizer=self.regularizers['w'],
                             name='acti_{}'.format(i))
         conv_op = ConvLayer(n_output_chns=self.n_output_chns,
                             kernel_size=self.kernels[i],
                             stride=self.strides[i],
                             dilation=self.dilation_rates[i],
                             w_initializer=self.initializers['w'],
                             w_regularizer=self.regularizers['w'],
                             name='conv_{}'.format(i))
         # connect layers
         output_tensor = bn_op(output_tensor, is_training)
         output_tensor = acti_op(output_tensor)
         output_tensor = conv_op(output_tensor)
     # make residual connections
     if self.with_res:
         output_tensor = ElementwiseLayer('SUM')(output_tensor,
                                                 input_tensor)
     return output_tensor
Beispiel #7
0
    def create(self, input_chns):
        """

        :param input_chns: int, number of input channel
        :return: tuple, with series of convolutional layers
        """

        if self.n_output_chns == input_chns:
            b1 = self.Conv(self.bottle_neck_chns,
                           kernel_size=1,
                           stride=self.stride)
            b2 = self.Conv(self.bottle_neck_chns, kernel_size=3)
            b3 = self.Conv(self.n_output_chns, 1)
            return BottleneckBlockDesc1(conv=[b1, b2, b3])
        else:
            b1 = BNLayer()
            b2 = self.Conv(self.bottle_neck_chns,
                           kernel_size=1,
                           stride=self.stride,
                           acti_func=None,
                           feature_normalization=None)
            b3 = self.Conv(self.bottle_neck_chns, kernel_size=3)
            b4 = self.Conv(self.n_output_chns, kernel_size=1)
            b5 = self.Conv(self.n_output_chns,
                           kernel_size=1,
                           stride=self.stride,
                           acti_func=None,
                           feature_normalization=None)
            return BottleneckBlockDesc2(common_bn=b1,
                                        conv=[b2, b3, b4],
                                        conv_shortcut=b5)
Beispiel #8
0
 def create(self, input_chns):
     if self.n_output_chns == input_chns:
         b1 = self.Conv(self.bottle_neck_chns,
                        kernel_size=1,
                        stride=self.stride)
         b2 = self.Conv(self.bottle_neck_chns, kernel_size=3)
         b3 = self.Conv(self.n_output_chns, 1)
         return BottleneckBlockDesc1(conv=[b1, b2, b3])
     else:
         b1 = BNLayer()
         b2 = self.Conv(self.bottle_neck_chns,
                        kernel_size=1,
                        stride=self.stride,
                        acti_func=None,
                        with_bn=False)
         b3 = self.Conv(self.bottle_neck_chns, kernel_size=3)
         b4 = self.Conv(self.n_output_chns, kernel_size=1)
         b5 = self.Conv(self.n_output_chns,
                        kernel_size=1,
                        stride=self.stride,
                        acti_func=None,
                        with_bn=False)
         return BottleneckBlockDesc2(common_bn=b1,
                                     conv=[b2, b3, b4],
                                     conv_shortcut=b5)
Beispiel #9
0
    def layer_op(self, input_tensor, is_training=None, keep_prob=None):
        conv_layer = ConvLayer(n_output_chns=self.n_output_chns,
                               kernel_size=self.kernel_size,
                               stride=self.stride,
                               dilation=self.dilation,
                               padding=self.padding,
                               with_bias=self.with_bias,
                               w_initializer=self.initializers['w'],
                               w_regularizer=self.regularizers['w'],
                               b_initializer=self.initializers['b'],
                               b_regularizer=self.regularizers['b'],
                               padding_constant=self.padding_constant,
                               name='conv_')

        if self.feature_normalization == 'batch':
            if is_training is None:
                raise ValueError(
                    'is_training argument should be '
                    'True or False unless feature_normalization is False')
            bn_layer = BNLayer(regularizer=self.regularizers['w'],
                               moving_decay=self.moving_decay,
                               eps=self.eps,
                               name='bn_')
        elif self.feature_normalization == 'instance':
            in_layer = InstanceNormLayer(eps=self.eps, name='in_')
        elif self.feature_normalization == 'group':
            gn_layer = GNLayer(regularizer=self.regularizers['w'],
                               group_size=self.group_size,
                               eps=self.eps,
                               name='gn_')
        if self.acti_func is not None:
            acti_layer = ActiLayer(func=self.acti_func,
                                   regularizer=self.regularizers['w'],
                                   name='acti_')

        if keep_prob is not None:
            dropout_layer = ActiLayer(func='dropout', name='dropout_')

        def activation(output_tensor):
            if self.feature_normalization == 'batch':
                output_tensor = bn_layer(output_tensor, is_training)
            elif self.feature_normalization == 'instance':
                output_tensor = in_layer(output_tensor)
            elif self.feature_normalization == 'group':
                output_tensor = gn_layer(output_tensor)
            if self.acti_func is not None:
                output_tensor = acti_layer(output_tensor)
            if keep_prob is not None:
                output_tensor = dropout_layer(output_tensor,
                                              keep_prob=keep_prob)
            return output_tensor

        if self.preactivation:
            output_tensor = conv_layer(activation(input_tensor))
        else:
            output_tensor = activation(conv_layer(input_tensor))

        return output_tensor
Beispiel #10
0
 def create(self):
     bn=BNLayer()
     fc=FCLayer(self.num_classes)
     conv1=self.Conv(self.n_features[0], acti_func=None, with_bn=False)
     blocks=[]
     blocks+=[DownResBlock(self.n_features[1], self.n_blocks_per_resolution, 1, self.Conv)]
     for n in self.n_features[2:]:
         blocks+=[DownResBlock(n, self.n_blocks_per_resolution, 2, self.Conv)]
     return SE_ResNetDesc(bn=bn,fc=fc,conv1=conv1,blocks=blocks)
Beispiel #11
0
    def test_2d_bn_shape(self):
        x = self.get_2d_input()
        bn_layer = BNLayer()
        out_bn = bn_layer(x, is_training=True)
        print(bn_layer)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())

            out = sess.run(out_bn)
            x_shape = tuple(x.get_shape().as_list())
            self.assertAllClose(x_shape, out.shape)
Beispiel #12
0
    def test_2d_bn_reg_shape(self):
        x = self.get_2d_input()
        bn_layer = BNLayer(regularizer=regularizers.l2_regularizer(0.5))
        out_bn = bn_layer(x, is_training=True)
        test_bn = bn_layer(x, is_training=False)
        print(bn_layer)
        reg_loss = tf.add_n(bn_layer.regularizer_loss())

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())

            out = sess.run(out_bn)
            x_shape = tuple(x.shape.as_list())
            self.assertAllClose(x_shape, out.shape)
            # self.assertAllClose(np.zeros(x_shape), out)

            out = sess.run(test_bn)
            self.assertAllClose(x_shape, out.shape)
            # self.assertAllClose(np.zeros(x_shape), out)

            out = sess.run(reg_loss)
            self.assertAlmostEqual(out, 2.0)
Beispiel #13
0
    def test_2d_bn_reg_shape(self):
        x = self.get_2d_input()
        bn_layer = BNLayer(regularizer=regularizers.l2_regularizer(0.5))
        out_bn = bn_layer(x, is_training=True)
        test_bn = bn_layer(x, is_training=False)
        print(bn_layer)
        reg_loss = tf.add_n(bn_layer.regularizer_loss())

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())

            out = sess.run(out_bn)
            x_shape = tuple(x.get_shape().as_list())
            self.assertAllClose(x_shape, out.shape)
            # self.assertAllClose(np.zeros(x_shape), out)

            out = sess.run(test_bn)
            self.assertAllClose(x_shape, out.shape)
            # self.assertAllClose(np.zeros(x_shape), out)

            out = sess.run(reg_loss)
            self.assertAlmostEqual(out, 2.0)
Beispiel #14
0
    def test_3d_bn_reg_shape(self):
        x = self.get_3d_input()
        bn_layer = BNLayer(regularizer=regularizers.l2_regularizer(0.5))
        out_bn = bn_layer(x, is_training=True)
        test_bn = bn_layer(x, is_training=False)
        print(bn_layer)

        with self.cached_session() as sess:
            sess.run(tf.global_variables_initializer())

            out = sess.run(out_bn)
            x_shape = tuple(x.shape.as_list())
            self.assertAllClose(x_shape, out.shape)
            # self.assertAllClose(np.zeros(x_shape), out)

            out = sess.run(test_bn)
            self.assertAllClose(x_shape, out.shape)
Beispiel #15
0
    def layer_op(self, input_tensor, is_training=None, keep_prob=None):
        conv_layer = ConvLayer(n_output_chns=self.n_output_chns,
                               kernel_size=self.kernel_size,
                               stride=self.stride,
                               dilation=self.dilation,
                               padding=self.padding,
                               with_bias=self.with_bias,
                               w_initializer=self.initializers['w'],
                               w_regularizer=self.regularizers['w'],
                               b_initializer=self.initializers['b'],
                               b_regularizer=self.regularizers['b'],
                               name='conv_')

        output_tensor = conv_layer(input_tensor)

        #self.CONV_KERNEL = conv_layer.CONV_KERNEL

        if self.with_bn:
            if is_training is None:
                raise ValueError('is_training argument should be '
                                 'True or False unless with_bn is False')
            bn_layer = BNLayer(regularizer=self.regularizers['w'],
                               moving_decay=self.moving_decay,
                               eps=self.eps,
                               name='bn_')
            output_tensor = bn_layer(output_tensor, is_training)

        if self.acti_func is not None:
            acti_layer = ActiLayer(func=self.acti_func,
                                   regularizer=self.regularizers['w'],
                                   name='acti_')
            output_tensor = acti_layer(output_tensor)

        OUTPUT_TENSOR = tf.identity(output_tensor, name="MYOUTPUTTENSOR")
        #print("__________OUTPUT_TENSOR: ", OUTPUT_TENSOR.name)

        if keep_prob is not None:
            dropout_layer = ActiLayer(func='dropout', name='dropout_')
            output_tensor = dropout_layer(output_tensor, keep_prob=keep_prob)

        OUTPUT_TENSORR = tf.identity(output_tensor,
                                     name="MYOUTPUTTENSOR_DROPOUT")
        #print("_________OUTPUT_TENSORR dropout: ", OUTPUT_TENSORR.name)

        return output_tensor
Beispiel #16
0
    def create_network(self):
        hyper = self.hyperparameters

        # Initial Convolution
        net_initial_conv = ConvolutionalLayer(hyper['n_input_channels'][0],
                                              kernel_size=5,
                                              stride=2)

        # Dense Block Params
        downsample_channels = list(hyper['n_input_channels'][1:]) + [None]
        num_blocks = len(hyper["n_dense_channels"])
        use_bdo = self.architecture_parameters['use_bdo']

        # Create DenseBlocks
        net_dense_vblocks = []

        for idx in range(num_blocks):
            dense_ch = hyper["n_dense_channels"][idx]  # Num dense channels
            seg_ch = hyper["n_seg_channels"][idx]  # Num segmentation ch
            down_ch = downsample_channels[idx]  # Num of downsampling ch
            dil_rate = hyper["dilation_rates"][idx]  # Dilation rate

            # Dense feature block
            dblock = DenseFeatureStackBlockWithSkipAndDownsample(
                dense_ch,
                3,
                dil_rate,
                seg_ch,
                down_ch,
                use_bdo,
                acti_func='relu')

            net_dense_vblocks.append(dblock)

        # Segmentation
        net_seg_layer = ConvolutionalLayer(self.num_classes,
                                           kernel_size=hyper['final_kernel'],
                                           with_bn=False,
                                           with_bias=True)

        return DenseVNetDesc(initial_bn=BNLayer(),
                             initial_conv=net_initial_conv,
                             dense_vblocks=net_dense_vblocks,
                             seg_layer=net_seg_layer)
Beispiel #17
0
    def create(self):
        """

        :return: tuple with batch norm layer, fully connected layer, first conv layer and all residual blocks
        """
        bn = BNLayer()
        fc = FCLayer(self.num_classes)
        conv1 = self.Conv(self.n_features[0],
                          acti_func=None,
                          feature_normalization=None)
        blocks = []
        blocks += [
            DownResBlock(self.n_features[1], self.n_blocks_per_resolution, 1,
                         self.Conv)
        ]
        for n in self.n_features[2:]:
            blocks += [
                DownResBlock(n, self.n_blocks_per_resolution, 2, self.Conv)
            ]
        return SE_ResNetDesc(bn=bn, fc=fc, conv1=conv1, blocks=blocks)
    def layer_op(self, input_tensor, is_training=None, keep_prob=None):
        fc_layer = FCLayer(n_output_chns=self.n_output_chns,
                           with_bias=self.with_bias,
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           b_initializer=self.initializers['b'],
                           b_regularizer=self.regularizers['b'],
                           name='fc_')
        output_tensor = fc_layer(input_tensor)

        if self.feature_normalization == 'batch':
            if is_training is None:
                raise ValueError(
                    'is_training argument should be '
                    'True or False unless feature_normalization is False')
            bn_layer = BNLayer(regularizer=self.regularizers['w'],
                               moving_decay=self.moving_decay,
                               eps=self.eps,
                               name='bn_')
            output_tensor = bn_layer(output_tensor, is_training)
        elif self.feature_normalization == 'instance':
            in_layer = InstanceNormLayer(eps=self.eps, name='in_')
            output_tensor = in_layer(output_tensor)
        elif self.feature_normalization == 'group':
            gn_layer = GNLayer(regularizer=self.regularizers['w'],
                               group_size=self.group_size,
                               eps=self.eps,
                               name='gn_')
            output_tensor = gn_layer(output_tensor)

        if self.acti_func is not None:
            acti_layer = ActiLayer(func=self.acti_func,
                                   regularizer=self.regularizers['w'],
                                   name='acti_')
            output_tensor = acti_layer(output_tensor)

        if keep_prob is not None:
            dropout_layer = ActiLayer(func='dropout', name='dropout_')
            output_tensor = dropout_layer(output_tensor, keep_prob=keep_prob)

        return output_tensor
Beispiel #19
0
    def layer_op(self, input_tensor, is_training):
        """
        :param input_tensor: tensor, input to the network
        :param is_training: boolean, True if network is in training mode
        :return: tensor, output of the autofocus block
        """
        output_tensor = input_tensor

        ########################################################################
        # 1: Create first of two autofocus layer of autofocus block.
        ########################################################################
        # A convolution without feature norm and activation.
        conv_1 = ConvLayer(n_output_chns = self.n_output_chns[0],
                           kernel_size = self.kernel_size[0],
                           padding='SAME',
                           dilation = 1,
                           w_initializer = self.initializers['w'],
                           w_regularizer = self.regularizers['w'],
                           name = 'conv_1')

        # Create two conv layers for the attention model. The output of the
        # attention model will be needed for the K parallel conv layers.

        # First convolutional layer of the attention model (conv l,1).
        conv_att_11 = ConvLayer(n_output_chns = int(self.n_input_chns[0]/2),
                                kernel_size = self.kernel_size[0],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_11')

        # Second convolutional layer of the attention model (conv l,2).
        conv_att_12 = ConvLayer(n_output_chns = self.num_branches,
                                kernel_size = [1, 1, 1],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_12')

        # Batch norm (BN) layer for each of the K parallel convolutions
        bn_layer_1 = []
        for i in range(self.num_branches):
            bn_layer_1.append(BNLayer(regularizer = self.regularizers['w'],
                                      name = 'bn_layer_1_{}'.format(i)))

        # Activation function used in the first attention model
        acti_op_1 = ActiLayer(func = self.acti_func,
                              regularizer = self.regularizers['w'],
                              name = 'acti_op_1')

        ########################################################################
        # 2: Create second of two autofocus layer of autofocus block.
        ########################################################################
        # A convolution without feature norm and activation.
        conv_2 = ConvLayer(n_output_chns = self.n_output_chns[1],
                           kernel_size = self.kernel_size[1],
                           padding='SAME',
                           dilation = 1,
                           w_initializer = self.initializers['w'],
                           w_regularizer = self.regularizers['w'],
                           name = 'conv_2')

        # Create two conv layers for the attention model. The output of the
        # attention model will be needed for the K parallel conv layers.
        # First convolutional layer of the attention model (conv l,1).
        conv_att_21 = ConvLayer(n_output_chns = int(self.n_input_chns[1]/2),
                                kernel_size = self.kernel_size[1],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_21')

        # Second convolutional layer of the attention model (conv l,2).
        conv_att_22 = ConvLayer(n_output_chns = self.num_branches,
                                kernel_size = [1, 1, 1],
                                padding = 'SAME',
                                w_initializer = self.initializers['w'],
                                w_regularizer = self.regularizers['w'],
                                name = 'conv_att_22')

        # Batch norm (BN) layer for each of the K parallel convolutions
        bn_layer_2 = []
        for i in range(self.num_branches):
            bn_layer_2.append(BNLayer(regularizer = self.regularizers['w'],
                                      name = 'bn_layer_2_{}'.format(i)))

        # Activation function used in the second attention model
        acti_op_2 = ActiLayer(func = self.acti_func,
                              regularizer = self.regularizers['w'],
                              name = 'acti_op_2')

        ########################################################################
        # 3: Create other parameterised layers
        ########################################################################
        acti_op = ActiLayer(func = self.acti_func,
                            regularizer = self.regularizers['w'],
                            name = 'acti_op')

        ########################################################################
        # 4: Connect layers
        ########################################################################
        # compute attention weights for the K parallel conv layers in the first
        # autofocus convolutional layer
        feature_1 = output_tensor
        att_1 = acti_op_1(conv_att_11(feature_1))
        att_1 = conv_att_12(att_1)
        att_1 = tf.nn.softmax(att_1, axis=1)

        # Create K dilated tensors as input to the autofocus layer. This
        # simulates the K parallel convolutions with different dilation
        # rates. Doing it this way ensures the required weight sharing.
        dilated_tensor_1 = []
        for i in range(self.num_branches):
            dilated_1 = output_tensor
            with DilatedTensor(dilated_1, dilation_factor = self.dilation_list[i]) as dilated:
                dilated.tensor = conv_1(dilated.tensor)
                dilated.tensor = bn_layer_1[i](dilated.tensor, is_training)
            dilated.tensor = dilated.tensor * att_1[:,:,:,:,i:(i+1)]
            dilated_tensor_1.append(dilated.tensor)
        output_tensor = tf.add_n(dilated_tensor_1)
        output_tensor = acti_op(output_tensor)

        # compute attention weights for the K parallel conv layers in the second
        # autofocus convolutional layer
        feature_2 = output_tensor
        att_2 = acti_op_2(conv_att_21(feature_2))
        att_2 = conv_att_22(att_2)
        att_2 = tf.nn.softmax(att_2, axis=1)

        # Create K dilated tensors as input to the autofocus layer. This
        # simulates the K parallel convolutions with different dilation
        # rates. Doing it this way ensures the required weight sharing.
        dilated_tensor_2 = []
        for i in range(self.num_branches):
            dilated_2 = output_tensor
            with DilatedTensor(dilated_2, dilation_factor = self.dilation_list[i]) as dilated:
                dilated.tensor = conv_2(dilated.tensor)
                dilated.tensor = bn_layer_2[i](dilated.tensor, is_training)
            dilated.tensor = dilated.tensor * att_2[:,:,:,:,i:(i+1)]
            dilated_tensor_2.append(dilated.tensor)
        output_tensor = tf.add_n(dilated_tensor_2)

        # make residual connection using ElementwiseLayer with SUM
        if self.with_res:
            output_tensor = ElementwiseLayer('SUM')(output_tensor, input_tensor)

        # apply the last ReLU activation
        output_tensor = acti_op(output_tensor)
        print("output_tensor:", output_tensor)

        return output_tensor
Beispiel #20
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        hp = self.hyperparameters
        if is_training and hp['augmentation_scale'] > 0:
            aug = Affine3DAugmentationLayer(hp['augmentation_scale'], 'LINEAR',
                                            'ZERO')
            input_tensor = aug(input_tensor)
        channel_dim = len(input_tensor.get_shape()) - 1
        input_size = input_tensor.get_shape().as_list()
        spatial_rank = len(input_size) - 2

        modulo = 2**(len(hp['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        downsample_channels = list(hp['n_input_channels'][1:]) + [None]
        v_params = zip(hp['n_dense_channels'], hp['n_seg_channels'],
                       downsample_channels, hp['dilation_rates'],
                       range(len(downsample_channels)))

        downsampled_img = BNLayer()(
            tf.nn.avg_pool3d(input_tensor, [1] + [3] * spatial_rank + [1],
                             [1] + [2] * spatial_rank + [1], 'SAME'),
            is_training=is_training)
        all_segmentation_features = [downsampled_img]
        output_shape = downsampled_img.get_shape().as_list()[1:-1]
        initial_features = ConvolutionalLayer(hp['n_input_channels'][0],
                                              kernel_size=5,
                                              stride=2)(
                                                  input_tensor,
                                                  is_training=is_training)

        down = tf.concat([downsampled_img, initial_features], channel_dim)
        for dense_ch, seg_ch, down_ch, dil_rate, idx in v_params:
            sd = DenseFeatureStackBlockWithSkipAndDownsample(
                dense_ch,
                3,
                dil_rate,
                seg_ch,
                down_ch,
                self.architecture_parameters['use_bdo'],
                acti_func='relu')
            skip, down = sd(down,
                            is_training=is_training,
                            keep_prob=hp['p_channels_selected'])
            all_segmentation_features.append(image_resize(skip, output_shape))
        segmentation = ConvolutionalLayer(
            self.num_classes,
            kernel_size=hp['final_kernel'],
            with_bn=False,
            with_bias=True)(tf.concat(all_segmentation_features, channel_dim),
                            is_training=is_training)
        if self.architecture_parameters['use_prior']:
            segmentation = segmentation + \
                           SpatialPriorBlock([12] * spatial_rank, output_shape)
        if is_training and hp['augmentation_scale'] > 0:
            inverse_aug = aug.inverse()
            segmentation = inverse_aug(segmentation)
        segmentation = image_resize(segmentation, input_size[1:-1])
        seg_summary = tf.to_float(
            tf.expand_dims(tf.argmax(segmentation, -1),
                           -1)) * (255. / self.num_classes - 1)
        m, v = tf.nn.moments(input_tensor, axes=[1, 2, 3], keep_dims=True)
        img_summary = tf.minimum(
            255.,
            tf.maximum(0., (tf.to_float(input_tensor - m) /
                            (tf.sqrt(v) * 2.) + 1.) * 127.))
        image3_axial('imgseg', tf.concat([img_summary, seg_summary], 1), 5,
                     [tf.GraphKeys.SUMMARIES])
        return segmentation
Beispiel #21
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        hp = self.hyperparameters
        if is_training and hp['augmentation_scale'] > 0:
            aug = Affine3DAugmentationLayer(hp['augmentation_scale'], 'LINEAR',
                                            'ZERO')
            input_tensor = aug(input_tensor)
        channel_dim = len(input_tensor.get_shape()) - 1
        input_size = input_tensor.get_shape().as_list()
        spatial_rank = len(input_size) - 2

        modulo = 2**(len(hp['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        downsample_channels = list(hp['n_input_channels'][1:]) + [None]
        v_params = zip(hp['n_dense_channels'], hp['n_seg_channels'],
                       downsample_channels, hp['dilation_rates'],
                       range(len(downsample_channels)))

        downsampled_img = BNLayer()(
            tf.nn.avg_pool3d(input_tensor, [1] + [3] * spatial_rank + [1],
                             [1] + [2] * spatial_rank + [1], 'SAME'),
            is_training=is_training)
        all_segmentation_features = [downsampled_img]
        output_shape = downsampled_img.get_shape().as_list()[1:-1]
        initial_features = ConvolutionalLayer(hp['n_input_channels'][0],
                                              kernel_size=5,
                                              stride=2)(
                                                  input_tensor,
                                                  is_training=is_training)

        down = tf.concat([downsampled_img, initial_features], channel_dim)
        ## ADDED to prevent dropout at inference
        if is_training is False:
            hp['p_channels_selected'] = 1
        ######
        for dense_ch, seg_ch, down_ch, dil_rate, idx in v_params:
            sd = DenseFeatureStackBlockWithSkipAndDownsample(
                dense_ch,
                3,
                dil_rate,
                seg_ch,
                down_ch,
                self.architecture_parameters['use_bdo'],
                acti_func='relu')
            skip, down = sd(down,
                            is_training=is_training,
                            keep_prob=hp['p_channels_selected'])
            all_segmentation_features.append(image_resize(skip, output_shape))
        segmentation = ConvolutionalLayer(
            10, kernel_size=hp['final_kernel'], with_bn=False,
            with_bias=True)(tf.concat(all_segmentation_features, channel_dim),
                            is_training=is_training)
        if self.architecture_parameters['use_prior']:
            segmentation = segmentation + \
                           SpatialPriorBlock([12] * spatial_rank, output_shape)
        if is_training and hp['augmentation_scale'] > 0:
            inverse_aug = aug.inverse()
            segmentation = inverse_aug(segmentation)
        segmentation = image_resize(segmentation, input_size[1:-1])
        #seg_summary = tf.to_float(tf.expand_dims(tf.argmax(segmentation,-1),-1)) * (255./self.num_classes-1)
        ###########
        # =============================================================================
        k = segmentation.get_shape().as_list()
        segmentation = tf.nn.max_pool3d(segmentation, [1, 1, 1, k[3], 1],
                                        [1, 1, 1, 1, 1],
                                        'VALID',
                                        data_format='NDHWC')
        segmentation = tf.reshape(segmentation, [k[0], k[1], k[2], k[-1]])
        segmentation = tf.layers.conv2d(
            segmentation,
            filters=10,
            kernel_size=(3, 3),
            strides=1,
            padding='SAME',
            use_bias=True,
            kernel_initializer=tf.variance_scaling_initializer(),
            activation=tf.nn.relu,
            data_format='channels_last')
        segmentation = tf.layers.conv2d(
            segmentation,
            filters=2,
            kernel_size=(3, 3),
            strides=1,
            padding='SAME',
            use_bias=True,
            kernel_initializer=tf.variance_scaling_initializer(),
            activation=tf.nn.relu,
            data_format='channels_last')

        # =============================================================================
        ###########
        #segmentation = tf.transpose(segmentation,[0,3,1,2])

        return segmentation
Beispiel #22
0
    def layer_op(self,
                 images,
                 is_training=True,
                 layer_id=-1,
                 keep_prob=0.7,
                 **unused_kwargs):

        print('learning downsample ...')

        # >>>>>>>>>>>>>>>> learning down sample
        lds1 = ConvolutionalLayer(32,
                                  conv_type='REGULAR',
                                  kernel_size=3,
                                  stride=2,
                                  w_initializer=self.w_initializer,
                                  w_regularizer=self.w_regularizer)
        lds2 = ConvolutionalLayer(48,
                                  conv_type='SEPARABLE_2D',
                                  kernel_size=3,
                                  stride=2,
                                  w_initializer=self.w_initializer,
                                  w_regularizer=self.w_regularizer)
        lds3 = ConvolutionalLayer(64,
                                  conv_type='SEPARABLE_2D',
                                  kernel_size=3,
                                  stride=2)

        flow = lds1(images, is_training=is_training)
        flow = lds2(flow, is_training=is_training)
        flow = lds3(flow, is_training=is_training)

        lds = flow

        # >>>>>>>>>>>>>>>> global feature extraction

        print('global feature extractor ...')

        bottle1 = SCCNBottleneckBlock(64,
                                      3,
                                      t=6,
                                      stride=2,
                                      n=3,
                                      w_initializer=self.w_initializer,
                                      w_regularizer=self.w_regularizer)
        bottle2 = SCCNBottleneckBlock(96,
                                      3,
                                      t=6,
                                      stride=2,
                                      n=3,
                                      w_initializer=self.w_initializer,
                                      w_regularizer=self.w_regularizer)
        bottle3 = SCCNBottleneckBlock(128,
                                      3,
                                      t=6,
                                      stride=1,
                                      n=3,
                                      w_initializer=self.w_initializer,
                                      w_regularizer=self.w_regularizer)
        pyramid = SCNNPyramidBlock([2, 4, 6, 8],
                                   w_initializer=self.w_initializer,
                                   w_regularizer=self.w_regularizer)

        flow = bottle1(flow)
        flow = bottle2(flow)
        flow = bottle3(flow)

        flow = pyramid(flow)

        gfe = flow

        # >>>>>>>>>>>>>>>> feature fusion

        print('Feature fusion ...')

        conv1 = ConvolutionalLayer(128,
                                   conv_type='REGULAR',
                                   kernel_size=1,
                                   padding='same',
                                   stride=1,
                                   acti_func=None,
                                   w_initializer=self.w_initializer,
                                   w_regularizer=self.w_regularizer)

        upsample1 = tf.keras.layers.UpSampling2D((4, 4),
                                                 interpolation='bilinear')
        dwconv = ConvolutionalLayer(1,
                                    conv_type='DEPTHWISE_2D',
                                    kernel_size=3,
                                    stride=1,
                                    padding='same',
                                    acti_func=self.acti_func,
                                    w_initializer=self.w_initializer,
                                    w_regularizer=self.w_regularizer)

        conv2 = ConvLayer(128,
                          conv_type='REGULAR',
                          kernel_size=1,
                          padding='same',
                          stride=1,
                          w_initializer=self.w_initializer,
                          w_regularizer=self.w_regularizer)

        bn = BNLayer()
        acti = ActiLayer(func=self.acti_func,
                         regularizer=self.w_regularizer,
                         name='acti_')

        flow1 = conv1(lds, is_training=is_training)

        flow2 = upsample1(gfe)
        flow2 = dwconv(flow2, is_training=is_training)
        flow2 = conv2(flow2)

        flow = tf.math.add(flow1, flow2)
        flow = bn(flow, is_training=is_training)
        flow = acti(flow)

        # ff = flow

        # >>>>>>>>>>>>>>>> classifier

        sep_conv1 = ConvolutionalLayer(128,
                                       conv_type='SEPARABLE_2D',
                                       kernel_size=3,
                                       padding='same',
                                       stride=1,
                                       name='DSConv1_classifier',
                                       acti_func=self.acti_func,
                                       w_initializer=self.w_initializer,
                                       w_regularizer=self.w_regularizer)

        sep_conv2 = ConvolutionalLayer(128,
                                       conv_type='SEPARABLE_2D',
                                       kernel_size=3,
                                       padding='same',
                                       stride=1,
                                       name='DSConv2_classifier',
                                       acti_func=self.acti_func,
                                       w_initializer=self.w_initializer,
                                       w_regularizer=self.w_regularizer)

        flow = sep_conv1(flow, is_training=is_training)
        flow = sep_conv2(flow, is_training=is_training)

        conv = ConvolutionalLayer(self.num_classes,
                                  conv_type='REGULAR',
                                  kernel_size=1,
                                  padding='same',
                                  stride=1,
                                  w_initializer=self.w_initializer,
                                  w_regularizer=self.w_regularizer)

        dropout = ActiLayer(func='dropout',
                            regularizer=self.w_regularizer,
                            name='dropout_')
        # tf.keras.layers.Dropout(0.3)
        upsample = tf.keras.layers.UpSampling2D((8, 8),
                                                interpolation='bilinear')

        flow = conv(flow, is_training=is_training)
        flow = dropout(flow, keep_prob=keep_prob)
        flow = upsample(flow)

        flow = tf.nn.softmax(flow)

        return flow
Beispiel #23
0
    def layer_op(self,
                 input_tensor,
                 is_training=True,
                 layer_id=-1,
                 keep_prob=0.5,
                 **unused_kwargs):
        """

        :param input_tensor: tensor to input to the network, size has to be divisible by 2*dilation_rates
        :param is_training: boolean, True if network is in training mode
        :param layer_id: not in use
        :param keep_prob: double, percentage of nodes to keep for drop-out
        :param unused_kwargs:
        :return: network prediction
        """
        hyperparams = self.hyperparams

        # Validate that dilation rates are compatible with input dimensions
        modulo = 2**(len(hyperparams['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        # Perform on the fly data augmentation
        if is_training and hyperparams['augmentation_scale'] > 0:
            augment_layer = AffineAugmentationLayer(
                hyperparams['augmentation_scale'], 'LINEAR', 'ZERO')
            input_tensor = augment_layer(input_tensor)

        ###################
        ### Feedforward ###
        ###################

        # Initialize network components
        dense_vnet = self.create_network()

        # Store output feature maps from each component
        feature_maps = []

        # Downsample input to the network
        downsample_layer = DownSampleLayer(func='AVG', kernel_size=3, stride=2)
        downsampled_tensor = downsample_layer(input_tensor)
        bn_layer = BNLayer()
        downsampled_tensor = bn_layer(downsampled_tensor,
                                      is_training=is_training)
        feature_maps.append(downsampled_tensor)

        # All feature maps should match the downsampled tensor's shape
        feature_map_shape = downsampled_tensor.shape.as_list()[1:-1]

        # Prepare initial input to dense_vblocks
        initial_features = dense_vnet.initial_conv(input_tensor,
                                                   is_training=is_training)
        channel_dim = len(input_tensor.shape) - 1
        down = tf.concat([downsampled_tensor, initial_features], channel_dim)

        # Feed downsampled input through dense_vblocks
        for dblock in dense_vnet.dense_vblocks:
            # Get skip layer and activation output
            skip, down = dblock(down,
                                is_training=is_training,
                                keep_prob=keep_prob)
            # Resize skip layer to original shape and add to feature maps
            skip = LinearResizeLayer(feature_map_shape)(skip)
            feature_maps.append(skip)

        # Merge feature maps
        all_features = tf.concat(feature_maps, channel_dim)

        # Perform final convolution to segment structures
        output = dense_vnet.final_conv(all_features, is_training=is_training)

        ######################
        ### Postprocessing ###
        ######################

        # Get the number of spatial dimensions of input tensor
        n_spatial_dims = input_tensor.shape.ndims - 2

        # Refine segmentation with prior
        if hyperparams['use_prior']:
            spatial_prior_shape = [hyperparams['prior_size']] * n_spatial_dims
            # Prior shape must be 4 or 5 dim to work with linear_resize layer
            # ie to conform to shape=[batch, X, Y, Z, channels]
            prior_shape = [1] + spatial_prior_shape + [1]
            spatial_prior = SpatialPriorBlock(prior_shape, feature_map_shape)
            output += spatial_prior()

        # Invert augmentation
        if is_training and hyperparams['augmentation_scale'] > 0:
            inverse_aug = augment_layer.inverse()
            output = inverse_aug(output)

        # Resize output to original size
        input_tensor_spatial_size = input_tensor.shape.as_list()[1:-1]
        output = LinearResizeLayer(input_tensor_spatial_size)(output)

        # Segmentation summary
        seg_argmax = tf.to_float(tf.expand_dims(tf.argmax(output, -1), -1))
        seg_summary = seg_argmax * (255. / self.num_classes - 1)

        # Image Summary
        norm_axes = list(range(1, n_spatial_dims + 1))
        mean, var = tf.nn.moments(input_tensor, axes=norm_axes, keep_dims=True)
        timg = tf.to_float(input_tensor - mean) / (tf.sqrt(var) * 2.)
        timg = (timg + 1.) * 127.
        single_channel = tf.reduce_mean(timg, -1, True)
        img_summary = tf.minimum(255., tf.maximum(0., single_channel))

        if n_spatial_dims == 2:
            tf.summary.image(tf.get_default_graph().unique_name('imgseg'),
                             tf.concat([img_summary, seg_summary], 1), 5,
                             [tf.GraphKeys.SUMMARIES])
        elif n_spatial_dims == 3:
            image3_axial(tf.get_default_graph().unique_name('imgseg'),
                         tf.concat([img_summary, seg_summary], 1), 5,
                         [tf.GraphKeys.SUMMARIES])
        else:
            raise NotImplementedError(
                'Image Summary only supports 2D and 3D images')

        return output