Ejemplo n.º 1
0
def do_conv(input_tensor, dim):
    spatial_rank = infer_spatial_rank(input_tensor)

    assert dim < spatial_rank
    if dim < 0:
        return input_tensor

    _sigmas = expand_spatial_params(input_param=1.5,
                                    spatial_rank=spatial_rank,
                                    param_type=float)
    _truncate = expand_spatial_params(input_param=3.0,
                                      spatial_rank=spatial_rank,
                                      param_type=float)

    # squeeze the kernel to be along the 'dim'
    new_kernel_shape = [1] * (spatial_rank + 2)
    new_kernel_shape[dim] = -1
    kernel_tensor = gaussian_1d(sigma=_sigmas[dim], truncated=_truncate[dim])
    kernel_tensor = tf.reshape(kernel_tensor, new_kernel_shape)

    # split channels and do smoothing respectively
    chn_wise_list = tf.unstack(do_conv(input_tensor, dim - 1), axis=-1)
    output_tensor = [
        tf.nn.convolution(input=tf.expand_dims(chn, axis=-1),
                          filter=kernel_tensor,
                          padding='VALID',
                          strides=[1] * spatial_rank) for chn in chn_wise_list
    ]
    return tf.concat(output_tensor, axis=-1)
Ejemplo n.º 2
0
 def layer_op(self, input_tensor):
     spatial_rank = layer_util.infer_spatial_rank(input_tensor)
     look_up_operations(self.func, SUPPORTED_OP)
     kernel_size_all_dims = layer_util.expand_spatial_params(
         self.kernel_size, spatial_rank)
     stride_all_dims = layer_util.expand_spatial_params(
         self.stride, spatial_rank)
     if self.func == 'CONSTANT':
         full_kernel_size = kernel_size_all_dims + (1, 1)
         np_kernel = layer_util.trivial_kernel(full_kernel_size)
         kernel = tf.constant(np_kernel, dtype=tf.float32)
         output_tensor = [
             tf.expand_dims(x, -1)
             for x in tf.unstack(input_tensor, axis=-1)
         ]
         output_tensor = [
             tf.nn.convolution(input=inputs,
                               filter=kernel,
                               strides=stride_all_dims,
                               padding=self.padding,
                               name='conv') for inputs in output_tensor
         ]
         output_tensor = tf.concat(output_tensor, axis=-1)
     else:
         output_tensor = tf.nn.pool(input=input_tensor,
                                    window_shape=kernel_size_all_dims,
                                    pooling_type=self.func,
                                    padding=self.padding,
                                    dilation_rate=[1] * spatial_rank,
                                    strides=stride_all_dims,
                                    name=self.layer_name)
     return output_tensor
Ejemplo n.º 3
0
    def layer_op(self, input_tensor):
        input_shape = input_tensor.shape.as_list()
        n_input_chns = input_shape[-1]
        spatial_rank = layer_util.infer_spatial_rank(input_tensor)

        # initialize conv kernels/strides and then apply
        kernel_size_all_dim = layer_util.expand_spatial_params(
            self.kernel_size, spatial_rank)
        w_full_size = kernel_size_all_dim + (self.n_output_chns, n_input_chns)
        stride_all_dim = layer_util.expand_spatial_params(
            self.stride, spatial_rank)
        full_stride = (1, ) + stride_all_dim + (1, )

        deconv_kernel = tf.get_variable('w',
                                        shape=w_full_size,
                                        initializer=self.initializers['w'],
                                        regularizer=self.regularizers['w'])
        if spatial_rank == 2:
            op_ = SUPPORTED_OP['2D']
        elif spatial_rank == 3:
            op_ = SUPPORTED_OP['3D']
        else:
            raise ValueError(
                "Only 2D and 3D spatial deconvolutions are supported")

        spatial_shape = []
        for (i, dim) in enumerate(input_shape[:-1]):
            if i == 0:
                continue
            if dim is None:
                spatial_shape.append(tf.shape(input_tensor)[i])
            else:
                spatial_shape.append(dim)
        output_dims = infer_output_dims(spatial_shape, stride_all_dim,
                                        kernel_size_all_dim, self.padding)
        full_output_size = [input_shape[0]
                            ] + output_dims + [self.n_output_chns]
        output_tensor = op_(value=input_tensor,
                            filter=deconv_kernel,
                            output_shape=full_output_size,
                            strides=full_stride,
                            padding=self.padding,
                            name='deconv')
        if not self.with_bias:
            return output_tensor

        # adding the bias term
        bias_full_size = (self.n_output_chns, )
        bias_term = tf.get_variable('b',
                                    shape=bias_full_size,
                                    initializer=self.initializers['b'],
                                    regularizer=self.regularizers['b'])
        output_tensor = tf.nn.bias_add(output_tensor,
                                       bias_term,
                                       name='add_bias')
        return output_tensor
Ejemplo n.º 4
0
    def layer_op(self, input_tensor):
        input_shape = input_tensor.get_shape().as_list()
        n_input_chns = input_shape[-1]
        spatial_rank = layer_util.infer_spatial_rank(input_tensor)

        # initialize conv kernels/strides and then apply
        w_full_size = layer_util.expand_spatial_params(self.kernel_size,
                                                       spatial_rank)
        # expand kernel size to include number of features
        w_full_size = w_full_size + (n_input_chns, self.n_output_chns)
        full_stride = layer_util.expand_spatial_params(self.stride,
                                                       spatial_rank)
        full_dilation = layer_util.expand_spatial_params(
            self.dilation, spatial_rank)

        conv_kernel = tf.get_variable('w',
                                      shape=w_full_size,
                                      initializer=self.initializers['w'],
                                      regularizer=self.regularizers['w'])

        print("W FULL SIZEEEE", w_full_size)

        output_tensor = tf.nn.convolution(input=input_tensor,
                                          filter=conv_kernel,
                                          strides=full_stride,
                                          dilation_rate=full_dilation,
                                          padding=self.padding,
                                          name='conv')
        OUTPUT_TENSOR = tf.nn.convolution(input=input_tensor,
                                          filter=conv_kernel,
                                          strides=full_stride,
                                          dilation_rate=full_dilation,
                                          padding=self.padding,
                                          name='CONVV')

        #print("OUTPUT TENSOR NAMEEEEEEEEEEEE", OUTPUT_TENSOR.name)

        if not self.with_bias:
            return output_tensor

        # adding the bias term
        bias_term = tf.get_variable('b',
                                    shape=self.n_output_chns,
                                    initializer=self.initializers['b'],
                                    regularizer=self.regularizers['b'])
        output_tensor = tf.nn.bias_add(output_tensor,
                                       bias_term,
                                       name='add_bias')
        return output_tensor
Ejemplo n.º 5
0
 def layer_op(self, input_tensor, is_training):
     output_tensor = input_tensor
     for i in range(len(self.kernels)):
         # create parameterised layers
         input_shape = input_tensor.shape.as_list()
         n_input_chns = input_shape[-1]
         spatial_rank = layer_util.infer_spatial_rank(input_tensor)
         w_full_size = layer_util.expand_spatial_params(
         self.kernel_size, spatial_rank)
         w_full_size = w_full_size + (n_input_chns, self.n_output_chns)
         conv_kernel = tf.get_variable('w', shape=w_full_size,
         initializer=self.initializers['w'],
         regularizer=self.regularizers['w'])
         alphas = tf.get_variable(
             'alpha', input_tensor.shape[-1],
             initializer=tf.constant_initializer(0.0),
             regularizer=None)
         
         output_tensor = tf.layers.batch_normalization(input=output_tensor,alphas)
         output_tensor = self.prelu(input_tensor,name='acti_{}'.format(i))
         output_tensor = tf.nn.convolution(input=output_tensor,
                                       filter=conv_kernel,
                                       strides=self.strides,
                                       dilation_rate=self.dilation_rates,
                                       padding=self.padding,
                                       name='conv_{}'.format(i))
         output_tensor = ElementwiseLayer('SUM')(output_tensor, input_tensor)
     return output_tensor
Ejemplo n.º 6
0
    def layer_op(self, input_tensor):
        input_shape = input_tensor.shape.as_list()
        n_input_chns = input_shape[-1]
        spatial_rank = layer_util.infer_spatial_rank(input_tensor)

        # initialize conv kernels/strides and then apply
        w_full_size = layer_util.expand_spatial_params(self.kernel_size,
                                                       spatial_rank)
        # expand kernel size to include number of features
        w_full_size = w_full_size + (n_input_chns, self.n_output_chns)
        full_stride = layer_util.expand_spatial_params(self.stride,
                                                       spatial_rank)
        full_dilation = layer_util.expand_spatial_params(
            self.dilation, spatial_rank)

        conv_kernel = tf.get_variable('w',
                                      shape=w_full_size,
                                      initializer=self.initializers['w'],
                                      regularizer=self.regularizers['w'])
        if self.padding in ('VALID', 'SAME'):
            output_tensor = tf.nn.convolution(input=input_tensor,
                                              filter=conv_kernel,
                                              strides=full_stride,
                                              dilation_rate=full_dilation,
                                              padding=self.padding,
                                              name='conv')
        else:
            output_tensor = _extended_convolution(
                input_tensor,
                conv_kernel,
                full_stride,
                full_dilation,
                self.padding,
                constant=self.padding_constant)

        if not self.with_bias:
            return output_tensor

        # adding the bias term
        bias_term = tf.get_variable('b',
                                    shape=self.n_output_chns,
                                    initializer=self.initializers['b'],
                                    regularizer=self.regularizers['b'])
        output_tensor = tf.nn.bias_add(output_tensor,
                                       bias_term,
                                       name='add_bias')
        return output_tensor
Ejemplo n.º 7
0
    def layer_op(self, input_tensor):
        """
        Resize the image by linearly interpolating the input
        using TF ``resize_bilinear`` function.

        :param input_tensor: 2D/3D image tensor, with shape:
            ``batch, X, Y, [Z,] Channels``
        :return: interpolated volume
        """

        input_spatial_rank = infer_spatial_rank(input_tensor)
        assert input_spatial_rank in (2, 3), \
            "linearly interpolation layer can only be applied to " \
            "2D/3D images (4D or 5D tensor)."
        self.new_size = expand_spatial_params(self.new_size, input_spatial_rank)

        if input_spatial_rank == 2:
            return tf.image.resize_bilinear(input_tensor, self.new_size)

        b_size, x_size, y_size, z_size, c_size = \
            input_tensor.shape.as_list()
        x_size_new, y_size_new, z_size_new = self.new_size

        if (x_size == x_size_new) and (y_size == y_size_new) and (
                z_size == z_size_new):
            # already in the target shape
            return input_tensor

        # resize y-z
        squeeze_b_x = tf.reshape(
            input_tensor, [-1, y_size, z_size, c_size])
        resize_b_x = tf.image.resize_bilinear(
            squeeze_b_x, [y_size_new, z_size_new])
        resume_b_x = tf.reshape(
            resize_b_x, [b_size, x_size, y_size_new, z_size_new, c_size])

        # resize x
        #   first reorient
        reoriented = tf.transpose(resume_b_x, [0, 3, 2, 1, 4])
        #   squeeze and 2d resize
        squeeze_b_z = tf.reshape(
            reoriented, [-1, y_size_new, x_size, c_size])
        resize_b_z = tf.image.resize_bilinear(
            squeeze_b_z, [y_size_new, x_size_new])
        resume_b_z = tf.reshape(
            resize_b_z, [b_size, z_size_new, y_size_new, x_size_new, c_size])

        output_tensor = tf.transpose(resume_b_z, [0, 3, 2, 1, 4])
        return output_tensor
Ejemplo n.º 8
0
    def layer_op(self, input_tensor):
        spatial_rank = layer_util.infer_spatial_rank(input_tensor)
        output_tensor = input_tensor
        if self.func == 'REPLICATE':
            if self.kernel_size != self.stride:
                raise ValueError(
                    "`kernel_size` != `stride` currently not"
                    "supported in `REPLICATE` mode. Please"
                    "consider using `CHANNELWISE_DECONV` operation.")
            # simply replicate input values to
            # local regions of (kernel_size ** spatial_rank) element
            kernel_size_all_dims = layer_util.expand_spatial_params(
                self.kernel_size, spatial_rank)
            pixel_num = np.prod(kernel_size_all_dims)
            repmat = np.hstack((pixel_num, [1] * spatial_rank, 1)).flatten()
            output_tensor = tf.tile(input=input_tensor, multiples=repmat)
            output_tensor = tf.batch_to_space_nd(
                input=output_tensor,
                block_shape=kernel_size_all_dims,
                crops=[[0, 0]] * spatial_rank)

        elif self.func == 'CHANNELWISE_DECONV':
            output_tensor = [
                tf.expand_dims(x, -1)
                for x in tf.unstack(input_tensor, axis=-1)
            ]
            output_tensor = [
                DeconvLayer(n_output_chns=1,
                            kernel_size=self.kernel_size,
                            stride=self.stride,
                            padding='SAME',
                            with_bias=self.with_bias,
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            b_initializer=self.initializers['b'],
                            b_regularizer=self.regularizers['b'],
                            name='deconv_{}'.format(i))(x)
                for (i, x) in enumerate(output_tensor)
            ]
            output_tensor = tf.concat(output_tensor, axis=-1)
        return output_tensor
Ejemplo n.º 9
0
    def layer_op(self, tensor):
        output = tensor
        for i in range(2 * len(self.kernels)):
            input_shape = tensor.shape.as_list()
            n_input_chns = input_shape[-1]
            spatial_rank = layer_util.infer_spatial_rank(tensor)
            w_full_size = layer_util.expand_spatial_params(
                self.kernel_size, spatial_rank)
            w_full_size = w_full_size + (n_input_chns, self.n_output_chns)
            conv_kernel = tf.get_variable('w',
                                          shape=w_full_size,
                                          regularizer=self.regularizers['w'])

            output = tf.layers.batch_normalization(input=tensor,
                                                   name='bn_{}'.format(i))
            output = self.selu(output, name='acti_{}'.format(i))
            output = tf.nn.convolution(input=output,
                                       filter=conv_kernel,
                                       strides=self.strides,
                                       dilation_rate=self.dilation_rates,
                                       padding=self.padding,
                                       name='conv_{}'.format(i))
            output = ElementwiseLayer('SUM')(output, tensor)
        return output
Ejemplo n.º 10
0
    def layer_op(self, input_tensor, input_mask, output_mask):
        """

        :param input_tensor: image to convolve with kernel
        :param input_mask: 1-Tensor with a binary mask of input channels to use
            If this is None, all channels are used.
        :param output_mask: 1-Tensor with a binary mask of output channels to
            generate. If this is None, all channels are used and
            the number of output channels is set at graph-creation time.
        :return:
        """
        sparse_input_shape = input_tensor.shape.as_list()
        if input_mask is None:
            _input_mask = tf.ones([sparse_input_shape[-1]]) > 0
        else:
            _input_mask = input_mask
        if output_mask is None:
            _output_mask = tf.ones([self.n_output_chns]) > 0
        else:
            _output_mask = output_mask
        n_full_input_chns = _input_mask.shape.as_list()[0]
        spatial_rank = layer_util.infer_spatial_rank(input_tensor)
        # initialize conv kernels/strides and then apply
        w_full_size = layer_util.expand_spatial_params(
            self.kernel_size, spatial_rank)
        # expand kernel size to include number of features
        w_full_size = w_full_size + (n_full_input_chns, self.n_output_chns)

        full_stride = layer_util.expand_spatial_params(
            self.stride, spatial_rank)

        full_dilation = layer_util.expand_spatial_params(
            self.dilation, spatial_rank)

        conv_kernel = tf.get_variable(
            'w', shape=w_full_size,
            initializer=self.initializers['w'],
            regularizer=self.regularizers['w'])

        if spatial_rank == 2:
            transpositions = [[3, 2, 1, 0], [1, 0, 2, 3], [3, 2, 0, 1]]
        elif spatial_rank == 3:
            transpositions = [[4, 3, 2, 1, 0], [1, 0, 2, 3, 4], [4, 3, 2, 0, 1]]
        else:
            raise NotImplementedError("spatial rank not supported")

        sparse_kernel = tf.transpose(conv_kernel, transpositions[0])
        sparse_kernel = tf.boolean_mask(sparse_kernel, _output_mask)
        sparse_kernel = tf.transpose(sparse_kernel, transpositions[1])
        sparse_kernel = tf.boolean_mask(sparse_kernel, _input_mask)
        sparse_kernel = tf.transpose(sparse_kernel, transpositions[2])

        output_tensor = tf.nn.convolution(input=input_tensor,
                                          filter=sparse_kernel,
                                          strides=full_stride,
                                          dilation_rate=full_dilation,
                                          padding=self.padding,
                                          name='conv')
        if output_mask is None:
            # If all output channels are used, we can specify
            # the number of output channels which is useful for later layers
            old_shape = output_tensor.shape.as_list()
            old_shape[-1] = self.n_output_chns
            output_tensor.set_shape(old_shape)

        if not self.with_bias:
            return output_tensor

        # adding the bias term
        bias_term = tf.get_variable(
            'b', shape=self.n_output_chns,
            initializer=self.initializers['b'],
            regularizer=self.regularizers['b'])
        sparse_bias = tf.boolean_mask(bias_term, output_mask)
        output_tensor = tf.nn.bias_add(
            output_tensor, sparse_bias, name='add_bias')
        return output_tensor
Ejemplo n.º 11
0
    def layer_op(self, I, U):
        """
        Compute `T` iterations of mean field update given a dense CRF.

        This layer maintains trainable CRF model parameters
        (a compatibility function and `m` kernel weights).

        :param I: feature maps used in the dense pairwise term of CRF
        :param U: activation maps used in the unary term of CRF (before softmax)
        :return: Maximum a posteriori labeling (before softmax)
        """

        spatial_rank = infer_spatial_rank(U)
        all_shape = U.shape.as_list()
        batch_size, spatial_shape, n_ch = \
            all_shape[0], all_shape[1:-1], all_shape[-1]
        n_feat = I.shape.as_list()[-1]
        if self._aspect_ratio is None:
            self._aspect_ratio = [1.] * spatial_rank
        self._aspect_ratio = expand_spatial_params(self._aspect_ratio,
                                                   spatial_rank, float)

        # constructing the scaled regular grid
        spatial_grid = tf.meshgrid(*[
            np.arange(i, dtype=np.float32) * a
            for i, a in zip(spatial_shape, self._aspect_ratio)
        ],
                                   indexing='ij')
        spatial_coords = tf.stack(spatial_grid[::-1], spatial_rank)
        spatial_coords = tf.tile(tf.expand_dims(spatial_coords, 0),
                                 [batch_size] + [1] * spatial_rank + [1])

        # concatenating spatial coordinates and features
        # (and squeeze spatially)
        # for the bilateral kernel
        bilateral_coords = tf.reshape(
            tf.concat([spatial_coords / self._alpha, I / self._beta], -1),
            [batch_size, -1, n_feat + spatial_rank])
        # for the spatial kernel
        spatial_coords = tf.reshape(spatial_coords / self._gamma,
                                    [batch_size, -1, spatial_rank])

        # Build permutohedral structures for smoothing
        permutohedrals = [
            permutohedral_prepare(coords)
            for coords in (bilateral_coords, spatial_coords)
        ]

        # squeeze the spatial shapes and recover them in the end
        U = tf.reshape(U, [batch_size, -1, n_ch])
        n_voxels = U.shape.as_list()[1]
        # normalisation factor
        norms = []
        for idx, permutohedral in enumerate(permutohedrals):
            spatial_norm = _permutohedral_gen(
                permutohedral, tf.ones((batch_size, n_voxels, 1)),
                'spatial_norms' + str(idx))
            spatial_norm.set_shape([batch_size, n_voxels, 1])
            spatial_norm = 1.0 / tf.sqrt(spatial_norm + 1e-20)
            norms.append(spatial_norm)

        # trainable compatibility matrix mu (initialised as identity * -1)
        mu_shape = [n_ch, n_ch]
        if self._mu_init is None:
            self._mu_init = -np.eye(n_ch)
        self._mu_init = np.reshape(self._mu_init, mu_shape)
        mu = tf.get_variable('Compatibility',
                             initializer=tf.constant(self._mu_init,
                                                     dtype=tf.float32))

        # trainable kernel weights
        weight_shape = [n_ch]
        if self._w_init is None:
            self._w_init = [np.ones(n_ch), np.ones(n_ch)]
        self._w_init = [np.reshape(_w, weight_shape) for _w in self._w_init]
        kernel_weights = [
            tf.get_variable('FilterWeights{}'.format(idx),
                            initializer=tf.constant(self._w_init[idx],
                                                    dtype=tf.float32))
            for idx, k in enumerate(permutohedrals)
        ]

        H1 = U
        for t in range(self._T):
            H1 = ftheta(U,
                        H1,
                        permutohedrals,
                        mu,
                        kernel_weights,
                        norms,
                        name='{}{}'.format(self.name, t))
        return tf.reshape(H1, all_shape)
Ejemplo n.º 12
0
    def layer_op(self, input_tensor, input_image_shape = None):
        """
        input_tensor: a 5D tensor [B, H, D, W, N]
        input_image_shape:  a tensor of [H, D, W]
        """
        input_shape = input_tensor.get_shape().as_list()
        n_input_chns = input_shape[-1]
        spatial_rank = layer_util.infer_spatial_rank(input_tensor)

        # initialize conv kernels/strides and then apply
        kernel_size_all_dim = layer_util.expand_spatial_params(
            self.kernel_size, spatial_rank)
        w_full_size = kernel_size_all_dim + (self.n_output_chns, n_input_chns)
        stride_all_dim = layer_util.expand_spatial_params(
            self.stride, spatial_rank)
        full_stride = (1,) + stride_all_dim + (1,)

        deconv_kernel = tf.get_variable(
            'w', shape=w_full_size,
            initializer=self.initializers['w'],
            regularizer=self.regularizers['w'])
        if spatial_rank == 2:
            op_ = SUPPORTED_OP['2D']
        elif spatial_rank == 3:
            op_ = SUPPORTED_OP['3D']
        else:
            raise ValueError(
                "Only 2D and 3D spatial deconvolutions are supported")

        if(input_image_shape is None):
            output_dims = infer_output_dims(input_shape[1:-1],
                                            stride_all_dim,
                                            kernel_size_all_dim,
                                            self.padding)
            
            full_output_size = [input_shape[0]] + output_dims + [self.n_output_chns]
        else:
            output_dims = infer_output_dims_tensor(input_image_shape,
                                            stride_all_dim,
                                            kernel_size_all_dim,
                                            self.padding)
            full_output_size = tf.concat([tf.constant([input_shape[0]]),
                                           output_dims, [self.n_output_chns]], axis = 0)
        output_tensor = op_(value=input_tensor,
                            filter=deconv_kernel,
                            output_shape=full_output_size,
                            strides=full_stride,
                            padding=self.padding,
                            name='deconv')

        if not self.with_bias:
            return output_tensor

        # adding the bias term
        bias_full_size = (self.n_output_chns,)
        bias_term = tf.get_variable(
            'b', shape=bias_full_size,
            initializer=self.initializers['b'],
            regularizer=self.regularizers['b'])
        output_tensor = tf.nn.bias_add(output_tensor,
                                       bias_term,
                                       name='add_bias')
        return output_tensor