Ejemplo n.º 1
0
    def test_bilinear_kernel_1D(self):
        """Test 1D kernels used in bilinear upsampling

        This method tests the correctness of the
        1D kernel values used in bilinear upsampling
        for some upsampling ratios.

        """
        rat = tensor.iscalar()
        kernel_ten = bilinear_kernel_1D(ratio=rat, normalize=False)
        f_ten = theano.function([rat], kernel_ten)

        kernel_ten_norm = bilinear_kernel_1D(ratio=rat, normalize=True)
        f_ten_norm = theano.function([rat], kernel_ten_norm)

        for ratio in [2, 3, 4, 5, 6, 7, 8, 9]:
            # getting the un-normalized kernel
            kernel = bilinear_kernel_1D(ratio=ratio, normalize=False)
            f = theano.function([], kernel)
            kernel_1D = self.numerical_kernel_1D(ratio)
            utt.assert_allclose(kernel_1D, f())
            utt.assert_allclose(kernel_1D, f_ten(ratio))

            # getting the normalized kernel
            kernel = bilinear_kernel_1D(ratio=ratio, normalize=True)
            f = theano.function([], kernel)
            kernel_1D = kernel_1D / float(ratio)
            utt.assert_allclose(kernel_1D, f())
            utt.assert_allclose(kernel_1D, f_ten_norm(ratio))
    def _bilinear_upsampling_1D(self, inpt, ratio, batch_size=None, num_input_channels=None):
        '''
        This implementation is a very minimally changed excerpt from:
        https://github.com/Theano/theano/blob/ddfd7d239a1e656cee850cdbc548da63f349c37d/theano/tensor/nnet/abstract_conv.py#L455
        '''
        if theano.config.device.startswith('gpu'):
            from theano.tensor.nnet.abstract_conv import bilinear_kernel_1D, conv2d_grad_wrt_inputs
        else:
            raise AssertionError('Bilinear interpolation requires GPU and cuDNN.')
        try:
            up_bs = batch_size * num_input_channels
        except TypeError:
            up_bs = None
        row, col = inpt.shape[2:]
        up_input = inpt.reshape((-1, 1, row, col))

        concat_mat = T.concatenate((up_input[:, :, :1, :], up_input,
                                    up_input[:, :, -1:, :]), axis=2)

        pad = 2 * ratio - (ratio - 1) // 2 - 1

        kern = bilinear_kernel_1D(ratio=ratio, normalize=True)
        upsampled_row = conv2d_grad_wrt_inputs(
            output_grad=concat_mat,
            filters=kern[np.newaxis, np.newaxis, :, np.newaxis],
            input_shape=(up_bs, 1, row * ratio, col),
            filter_shape=(1, 1, None, 1),
            border_mode=(pad, 0),
            subsample=(ratio, 1),
            filter_flip=True
        )

        return upsampled_row.reshape((inpt.shape[0], inpt.shape[1], row * ratio, col * 1))
Ejemplo n.º 3
0
    def _bilinear_upsampling_1D(self,
                                inpt,
                                ratio,
                                batch_size=None,
                                num_input_channels=None):
        '''
        This implementation is a very minimally changed excerpt from:
        https://github.com/Theano/theano/blob/ddfd7d239a1e656cee850cdbc548da63f349c37d/theano/tensor/nnet/abstract_conv.py#L455
        '''
        if theano.config.device.startswith('gpu'):
            from theano.tensor.nnet.abstract_conv import bilinear_kernel_1D, conv2d_grad_wrt_inputs
        else:
            raise AssertionError(
                'Bilinear interpolation requires GPU and cuDNN.')
        try:
            up_bs = batch_size * num_input_channels
        except TypeError:
            up_bs = None
        row, col = inpt.shape[2:]
        up_input = inpt.reshape((-1, 1, row, col))

        concat_mat = T.concatenate(
            (up_input[:, :, :1, :], up_input, up_input[:, :, -1:, :]), axis=2)

        pad = 2 * ratio - (ratio - 1) // 2 - 1

        kern = bilinear_kernel_1D(ratio=ratio, normalize=True)
        upsampled_row = conv2d_grad_wrt_inputs(output_grad=concat_mat,
                                               filters=kern[np.newaxis,
                                                            np.newaxis, :,
                                                            np.newaxis],
                                               input_shape=(up_bs, 1,
                                                            row * ratio, col),
                                               filter_shape=(1, 1, None, 1),
                                               border_mode=(pad, 0),
                                               subsample=(ratio, 1),
                                               filter_flip=True)

        return upsampled_row.reshape(
            (inpt.shape[0], inpt.shape[1], row * ratio, col * 1))
Ejemplo n.º 4
0
def bilinear_upsampling(input,
                        ratio,
                        batch_size=None,
                        num_input_channels=None,
                        use_1D_kernel=True):
    """Compute bilinear upsampling
    This function will build the symbolic graph for upsampling
    a tensor by the given ratio using bilinear interpolation.
    Parameters
    ----------
    input: symbolic 4D tensor
        mini-batch of feature map stacks, of shape (batch size,
        input channels, input rows, input columns) that will be upsampled.
    ratio: `int or Constant or Scalar Tensor of int* dtype`
        the ratio by which the input is upsampled in the 2D space (row and
        col size).
    batch_size: None, int or Constant variable
        The size of the first dimension of the input variable.
        Optional, possibly used to choose an optimal implementation.
        batch_size will be used only if num_input_channels is not None.
    num_input_channels: None, int or Constant variable
        The size of the second dimension of the input variable.
        Optional, possibly used to choose an optimal implementation.
        num_input_channels will be used only if batch_size is not None.
    use_1D_kernel: bool
        if set to true, row and column will be upsampled seperately by 1D
        kernels, otherwise they are upsampled together using a 2D kernel. The
        final result is the same, only the speed can differ, given factors such
        as upsampling ratio.
    Returns
    -------
    symbolic 4D tensor
        set of feature maps generated by bilinear upsampling. Tensor
        is of shape (batch size, num_input_channels, input row size * ratio,
        input column size * ratio)
    Notes
    -----
    :note: The kernel used for bilinear interpolation is fixed (not learned).
    :note: When the upsampling ratio is even, the last row and column is
        repeated one extra time compared to the first row and column which makes
        the upsampled tensor asymmetrical on both sides. This does not happen when
        the upsampling ratio is odd.
    """

    T = theano.tensor
    try:
        up_bs = batch_size * num_input_channels
    except TypeError:
        up_bs = None
    row, col = input.shape[2:]
    up_input = input.reshape((-1, 1, row, col))

    # concatenating the first and last row and column
    # first and last row
    concat_mat = T.concatenate(
        (up_input[:, :, :1, :], up_input, up_input[:, :, -1:, :]), axis=2)
    # first and last col
    concat_mat = T.concatenate(
        (concat_mat[:, :, :, :1], concat_mat, concat_mat[:, :, :, -1:]),
        axis=3)
    concat_col = col + 2

    pad = 2 * ratio - (ratio - 1) // 2 - 1

    if use_1D_kernel:
        kern = bilinear_kernel_1D(ratio=ratio, normalize=True)
        # upsampling rows
        upsampled_row = conv2d_grad_wrt_inputs(
            output_grad=concat_mat,
            filters=kern[np.newaxis, np.newaxis, :, np.newaxis],
            input_shape=(up_bs, 1, row * ratio, concat_col),
            filter_shape=(1, 1, None, 1),
            border_mode=(pad, 0),
            subsample=(ratio, 1),
            filter_flip=True)
        # upsampling cols
        upsampled_mat = conv2d_grad_wrt_inputs(
            output_grad=upsampled_row,
            filters=kern[np.newaxis, np.newaxis, np.newaxis, :],
            input_shape=(up_bs, 1, row * ratio, col * ratio),
            filter_shape=(1, 1, 1, None),
            border_mode=(0, pad),
            subsample=(1, ratio),
            filter_flip=True)
    else:
        kern = bilinear_kernel_2D(ratio=ratio, normalize=True)
        upsampled_mat = conv2d_grad_wrt_inputs(
            output_grad=concat_mat,
            filters=kern[np.newaxis, np.newaxis, :, :],
            input_shape=(up_bs, 1, row * ratio, col * ratio),
            filter_shape=(1, 1, None, None),
            border_mode=(pad, pad),
            subsample=(ratio, ratio),
            filter_flip=True)

    return upsampled_mat.reshape(
        (input.shape[0], input.shape[1], row * ratio, col * ratio))