Ejemplo n.º 1
0
    def layer_op(self, input_tensor):
        output_tensor1 = Conv(self.n_chns, **self.conv_params)(input_tensor)
        output_tensor2 = Conv(self.n_chns, **self.conv_params)(output_tensor1)
        # output_tensor = ElementWise('CONCAT')(output_tensor2, output_tensor1)
        output_tensor = tf.add(output_tensor2, output_tensor1)

        return output_tensor
Ejemplo n.º 2
0
    def layer_op(self, input_tensor, is_training=None):
        """

        :param input_tensor: tensor, input to the two layer convolution
        :param is_training: flag for training
        :return: tensor, output of --conv--conv
        """
        output_tensor = Conv(self.n_chns, **self.conv_params)(input_tensor, is_training=is_training)
        output_tensor = Conv(self.n_chns, **self.conv_params)(output_tensor, is_training=is_training)

        return output_tensor
Ejemplo n.º 3
0
    def layer_op(self, inputs, is_training=True):
        """
        The general connections is::

            (inputs)--o-conv_0--conv_1-+-- (outputs)
                      |                |
                      o----------------o

        ``conv_0``, ``conv_1`` layers are specified by ``type_string``.
        """
        conv_flow = inputs
        # batch normalisation layers
        bn_0 = BNLayer(**self.bn_param)
        bn_1 = BNLayer(**self.bn_param)
        # activation functions //regularisers?
        acti_0 = Acti(func=self.acti_func)
        acti_1 = Acti(func=self.acti_func)
        # convolutions
        conv_0 = Conv(acti_func=None,
                      with_bias=False,
                      with_bn=False,
                      **self.conv_param)
        conv_1 = Conv(acti_func=None,
                      with_bias=False,
                      with_bn=False,
                      **self.conv_param)

        if self.type_string == 'original':
            conv_flow = acti_0(bn_0(conv_0(conv_flow), is_training))
            conv_flow = bn_1(conv_1(conv_flow), is_training)
            conv_flow = ElementwiseLayer('SUM')(conv_flow, inputs)
            conv_flow = acti_1(conv_flow)
            return conv_flow

        if self.type_string == 'conv_bn_acti':
            conv_flow = acti_0(bn_0(conv_0(conv_flow), is_training))
            conv_flow = acti_1(bn_1(conv_1(conv_flow), is_training))
            return ElementwiseLayer('SUM')(conv_flow, inputs)

        if self.type_string == 'acti_conv_bn':
            conv_flow = bn_0(conv_0(acti_0(conv_flow)), is_training)
            conv_flow = bn_1(conv_1(acti_1(conv_flow)), is_training)
            return ElementwiseLayer('SUM')(conv_flow, inputs)

        if self.type_string == 'bn_acti_conv':
            conv_flow = conv_0(acti_0(bn_0(conv_flow, is_training)))
            conv_flow = conv_1(acti_1(bn_1(conv_flow, is_training)))
            return ElementwiseLayer('SUM')(conv_flow, inputs)

        raise ValueError('Unknown type string')
Ejemplo n.º 4
0
 def layer_op(self, input_tensor):
     output_tensor_1 = Conv(self.n_chns, **self.conv_params)(input_tensor)
     output_tensor_drop_1 = ActiLayer(func='dropout',
                                      name='dropout')(output_tensor_1,
                                                      keep_prob=0.9)
     output_tensor_2 = Conv(self.n_chns,
                            **self.conv_params)(output_tensor_drop_1)
     output_tensor_drop_2 = ActiLayer(func='dropout',
                                      name='dropout')(output_tensor_2,
                                                      keep_prob=0.9)
     output_tensor_3 = Conv(self.n_chns,
                            **self.conv_params)(output_tensor_drop_2)
     output_tensor_drop_3 = ActiLayer(func='dropout',
                                      name='dropout')(output_tensor_3,
                                                      keep_prob=0.9)
     return output_tensor_drop_3
Ejemplo n.º 5
0
    def layer_op(self, inputs, is_training=True):
        """
        Consists of::

            (inputs)--conv_0-o-conv_1--conv_2-+-(conv_res)--down_sample--
                             |                |
                             o----------------o

        conv_0, conv_res is also returned for feature forwarding purpose
        """
        conv_0 = Conv(n_output_chns=self.n_output_chns,
                      kernel_size=self.kernel_size,
                      acti_func=self.acti_func,
                      with_bias=False,
                      with_bn=True,
                      **self.conv_param)(inputs, is_training)
        conv_res = ResUnit(n_output_chns=self.n_output_chns,
                           kernel_size=self.kernel_size,
                           acti_func=self.acti_func,
                           type_string=self.type_string,
                           **self.conv_param)(conv_0, is_training)
        conv_down = Down('Max',
                         kernel_size=self.downsample_kernel_size,
                         stride=self.downsample_stride)(conv_res)
        return conv_down, conv_0, conv_res
Ejemplo n.º 6
0
    def layer_op(self,
                 fixed_image,
                 moving_image,
                 is_training=True,
                 **unused_kwargs):
        """

        :param fixed_image: tensor, fixed image for registration (defines reference space)
        :param moving_image: tensor, moving image to be registered to fixed
        :param is_training: boolean, True if network is in training mode
        :return: displacement fields transformed by estimating affine
        """

        spatial_rank = infer_spatial_rank(moving_image)
        spatial_shape = fixed_image.get_shape().as_list()[1:-1]

        # resize the moving image to match the fixed
        moving_image = Resize(spatial_shape)(moving_image)
        img = tf.concat([moving_image, fixed_image], axis=-1)
        res_1 = DownRes(self.fea[0], kernel_size=7,
                        **self.res_param)(img, is_training)[0]
        res_2 = DownRes(self.fea[1], **self.res_param)(res_1, is_training)[0]
        res_3 = DownRes(self.fea[2], **self.res_param)(res_2, is_training)[0]
        res_4 = DownRes(self.fea[3], **self.res_param)(res_3, is_training)[0]

        conv_5 = Conv(n_output_chns=self.fea[4],
                      kernel_size=self.k_conv,
                      with_bias=False,
                      feature_normalization='batch',
                      **self.res_param)(res_4, is_training)

        if spatial_rank == 2:
            affine_size = 6
        elif spatial_rank == 3:
            affine_size = 12
        else:
            tf.logging.fatal('Not supported spatial rank')
            raise NotImplementedError

        if self.affine_w_initializer is None:
            self.affine_w_initializer = init_affine_w()
        if self.affine_b_initializer is None:
            self.affine_b_initializer = init_affine_b(spatial_rank)
        affine = FC(n_output_chns=affine_size,
                    feature_normalization=None,
                    w_initializer=self.affine_w_initializer,
                    b_initializer=self.affine_b_initializer,
                    **self.affine_param)(conv_5)
        grid_global = Grid(source_shape=spatial_shape,
                           output_shape=spatial_shape)(affine)
        return grid_global
Ejemplo n.º 7
0
    def layer_op(self, images, is_training=True, **unused_kwargs):
        """

        :param images: tensor, input to the network
        :param is_training: boolean, True if network is in training mode
        :param unused_kwargs: other conditional arguments, not in use
        :return: tensor, output of the network
        """
        # contracting path
        output_1 = TwoLayerConv(self.n_fea[0], self.conv_params)(images, is_training=is_training)
        down_1 = Pooling(func='MAX', **self.pooling_params)(output_1)

        output_2 = TwoLayerConv(self.n_fea[1], self.conv_params)(down_1, is_training=is_training)
        down_2 = Pooling(func='MAX', **self.pooling_params)(output_2)

        output_3 = TwoLayerConv(self.n_fea[2], self.conv_params)(down_2, is_training=is_training)
        down_3 = Pooling(func='MAX', **self.pooling_params)(output_3)

        output_4 = TwoLayerConv(self.n_fea[3], self.conv_params)(down_3, is_training=is_training)
        down_4 = Pooling(func='MAX', **self.pooling_params)(output_4)

        output_5 = TwoLayerConv(self.n_fea[4], self.conv_params)(down_4, is_training=is_training)

        # expansive path
        up_4 = DeConv(self.n_fea[3], **self.deconv_params)(output_5, is_training=is_training)
        output_4 = CropConcat()(output_4, up_4)
        output_4 = TwoLayerConv(self.n_fea[3], self.conv_params)(output_4, is_training=is_training)

        up_3 = DeConv(self.n_fea[2], **self.deconv_params)(output_4, is_training=is_training)
        output_3 = CropConcat()(output_3, up_3)
        output_3 = TwoLayerConv(self.n_fea[2], self.conv_params)(output_3, is_training=is_training)

        up_2 = DeConv(self.n_fea[1], **self.deconv_params)(output_3, is_training=is_training)
        output_2 = CropConcat()(output_2, up_2)
        output_2 = TwoLayerConv(self.n_fea[1], self.conv_params)(output_2, is_training=is_training)

        up_1 = DeConv(self.n_fea[0], **self.deconv_params)(output_2, is_training=is_training)
        output_1 = CropConcat()(output_1, up_1)
        output_1 = TwoLayerConv(self.n_fea[0], self.conv_params)(output_1, is_training=is_training)

        # classification layer
        classifier = Conv(n_output_chns=self.num_classes,
                          kernel_size=1,
                          with_bias=True,
                          feature_normalization=None)
        output_tensor = classifier(output_1)
        tf.logging.info('output shape %s', output_tensor.shape)
        return output_tensor
Ejemplo n.º 8
0
    def layer_op(self, images, is_training=True, **unused_kwargs):
        # contracting path
        output_1 = TwoLayerConv(self.n_fea[0], self.conv_params)(images)
        down_1 = Pooling(func='MAX', **self.pooling_params)(output_1)

        output_2 = TwoLayerConv(self.n_fea[1], self.conv_params)(down_1)
        down_2 = Pooling(func='MAX', **self.pooling_params)(output_2)

        output_3 = TwoLayerConv(self.n_fea[2], self.conv_params)(down_2)
        down_3 = Pooling(func='MAX', **self.pooling_params)(output_3)

        output_4 = TwoLayerConv(self.n_fea[3], self.conv_params)(down_3)
        down_4 = Pooling(func='MAX', **self.pooling_params)(output_4)

        output_5 = TwoLayerConv(self.n_fea[4], self.conv_params)(down_4)

        # expansive path
        up_4 = DeConv(self.n_fea[3], **self.deconv_params)(output_5)
        output_4 = CropConcat()(output_4, up_4)
        output_4 = TwoLayerConv(self.n_fea[3], self.conv_params)(output_4)

        up_3 = DeConv(self.n_fea[2], **self.deconv_params)(output_4)
        output_3 = CropConcat()(output_3, up_3)
        output_3 = TwoLayerConv(self.n_fea[2], self.conv_params)(output_3)

        up_2 = DeConv(self.n_fea[1], **self.deconv_params)(output_3)
        output_2 = CropConcat()(output_2, up_2)
        output_2 = TwoLayerConv(self.n_fea[1], self.conv_params)(output_2)

        up_1 = DeConv(self.n_fea[0], **self.deconv_params)(output_2)
        output_1 = CropConcat()(output_1, up_1)
        output_1 = TwoLayerConv(self.n_fea[0], self.conv_params)(output_1)

        # classification layer
        classifier = Conv(n_output_chns=self.num_classes,
                          kernel_size=1,
                          with_bias=True,
                          with_bn=False)
        output_tensor = classifier(output_1)
        tf.logging.info('output shape %s', output_tensor.shape)
        return output_tensor
Ejemplo n.º 9
0
    def layer_op(self,
                 fixed_image,
                 moving_image,
                 base_grid=None,
                 is_training=True,
                 **unused_kwargs):
        """

        :param fixed_image:
        :param moving_image:
        :param base_grid:
        :param is_training:
        :return: estimated dense displacement fields
        """

        spatial_rank = infer_spatial_rank(fixed_image)
        spatial_shape = fixed_image.get_shape().as_list()[1:-1]
        check_spatial_dims(fixed_image, lambda x: x % 16 == 0)

        #  resize the moving image to match the fixed
        moving_image = Resize(spatial_shape)(moving_image)
        img = tf.concat([moving_image, fixed_image], axis=-1)
        down_res_0, conv_0_0, _ = \
            DownRes(self.fea[0], kernel_size=7, **self.down_res_param)(img, is_training)
        down_res_1, conv_0_1, _ = \
            DownRes(self.fea[1], **self.down_res_param)(down_res_0, is_training)
        down_res_2, conv_0_2, _ = \
            DownRes(self.fea[2], **self.down_res_param)(down_res_1, is_training)
        down_res_3, conv_0_3, _ = \
            DownRes(self.fea[3], **self.down_res_param)(down_res_2, is_training)

        conv_4 = Conv(n_output_chns=self.fea[4],
                      kernel_size=self.k_conv,
                      **self.down_res_param)(down_res_3, is_training)

        up_res_0 = UpRes(self.fea[3], **self.up_res_param)(conv_4, conv_0_3,
                                                           is_training)
        up_res_1 = UpRes(self.fea[2], **self.up_res_param)(up_res_0, conv_0_2,
                                                           is_training)
        up_res_2 = UpRes(self.fea[1], **self.up_res_param)(up_res_1, conv_0_1,
                                                           is_training)
        up_res_3 = UpRes(self.fea[0], **self.up_res_param)(up_res_2, conv_0_0,
                                                           is_training)

        if self.multi_scale_fusion:
            output_list = [up_res_3, up_res_2, up_res_1, up_res_0, conv_4]
        else:
            output_list = [up_res_3]

        # converting all output layers to displacement fields
        dense_fields = []
        for scale_out in output_list:
            field = Conv(n_output_chns=spatial_rank,
                         kernel_size=self.k_conv,
                         with_bias=True,
                         with_bn=False,
                         acti_func=None,
                         **self.disp_param)(scale_out)
            resized_field = Resize(new_size=spatial_shape)(field)
            dense_fields.append(resized_field)

        if base_grid is None:
            # adding a reference grid if it doesn't exist
            in_spatial_size = [None] * spatial_rank
            base_grid = _create_affine_features(output_shape=spatial_shape,
                                                source_shape=in_spatial_size)
            base_grid = np.asarray(base_grid[:-1])
            base_grid = np.reshape(base_grid.T,
                                   [-1] + spatial_shape + [spatial_rank])
            base_grid = tf.constant(base_grid, dtype=resized_field.dtype)

        if self.multi_scale_fusion and len(dense_fields) > 1:
            dense_field = tf.reduce_sum(dense_fields, axis=0)
        else:
            dense_field = dense_fields[0]

        # TODO filtering
        if self.smoothing_func is not None:
            dense_field = self.smoothing_func(dense_field, spatial_rank)

        tf.add_to_collection('bending_energy',
                             _computing_bending_energy(dense_field))
        tf.add_to_collection('gradient_norm',
                             _computing_gradient_norm(dense_field))

        dense_field = dense_field + base_grid
        return dense_field
Ejemplo n.º 10
0
 def layer_op(self, input_tensor):
     output_tensor = Conv(self.n_chns, **self.conv_params)(input_tensor)
     output_tensor = Conv(self.n_chns, **self.conv_params)(output_tensor)
     return output_tensor