Beispiel #1
0
    def __call__(self, x):
        """Applies the convolution layer.
        Args:
            x (~chainer.Variable): Input image.
        Returns:
            ~chainer.Variable: Output of the convolution.
        """
        if self.W.data is None:
            self._initialize_params(x.shape[1])

        wid = self.W.shape[0]/2
        W = F.split_axis(self.W, 2, axis=0)
        b = F.split_axis(self.b, 2, axis=0)
        x_real = x[:,:x.shape[1]/2,:,:]
        x_imag = x[:,x.shape[1]/2:,:,:]

        xr_Wr = convolution_2d.convolution_2d(
            x_real, W[0], b[0], self.stride, self.pad) 
        xi_Wr = convolution_2d.convolution_2d(
            x_imag, W[0], b[0], self.stride, self.pad) 
        xr_Wi = convolution_2d.convolution_2d(
            x_real, W[1], b[1], self.stride, self.pad) 
        xi_Wi = convolution_2d.convolution_2d(
            x_imag, W[1], b[1], self.stride, self.pad) 

        r = xr_Wr - xi_Wi
        i = xr_Wi + xi_Wr
        return F.concat([r,i], axis=1)
Beispiel #2
0
    def forward(self, x):
        """Applies the convolution layer.

        Args:
            x (~chainer.Variable): Input image.

        Returns:
            ~chainer.Variable: Output of the convolution.

        """
        x = chainer.as_variable(x)
        assert x.layout == self.x_layout
        # self.W can be a Variable instead of Parameter: #8462
        # TODO(niboshi): Use Parameter.is_initialized.
        if self.W.raw_array is None:
            _, c, _, _ = memory_layouts.get_semantic_shape(
                x, assumed_layout=self.x_layout)
            self._initialize_params(c)
        return convolution_2d.convolution_2d(x,
                                             self.W,
                                             self.b,
                                             self.stride,
                                             self.pad,
                                             dilate=self.dilate,
                                             groups=self.groups,
                                             cudnn_fast=self.cudnn_fast)
Beispiel #3
0
 def __call__(self, x):
     if self.W.data is None:
         self._initialize_params(x.shape[1])
     return convolution_2d.convolution_2d(x,
                                          self.W_bar,
                                          self.b,
                                          self.stride,
                                          self.pad,
                                          dilate=self.dilate,
                                          groups=self.groups)
Beispiel #4
0
 def __call__(self, x):
     """Applies the convolution layer.
     Args:
         x (~chainer.Variable): Input image.
     Returns:
         ~chainer.Variable: Output of the convolution.
     """
     if self.W.data is None:
         self._initialize_params(x.shape[1])
     return convolution_2d.convolution_2d(x, self.W_bar, self.b,
                                          self.stride, self.pad)
Beispiel #5
0
    def __call__(self, x):
        """Applies the convolution layer.

        Args:
            x (~chainer.Variable): Input image.

        Returns:
            ~chainer.Variable: Output of the convolution.

        """
        return convolution_2d.convolution_2d(
            x, self.W, self.b, self.stride, self.pad, self.use_cudnn)
Beispiel #6
0
    def __call__(self, x):
        """Applies the convolution layer.

        Args:
            x (~chainer.Variable): Input image.

        Returns:
            ~chainer.Variable: Output of the convolution.

        """
        return convolution_2d.convolution_2d(x, self.W, self.b, self.stride,
                                             self.pad, self.use_cudnn)
Beispiel #7
0
    def __call__(self, x, W=None, b=None):
        """Applies the convolution layer.

        Args:
            x (~chainer.Variable): Input image.

        Returns:
            ~chainer.Variable: Output of the convolution.

        """
        if self.has_uninitialized_params:
            with cuda.get_device_from_id(self._device_id):
                self._initialize_params(x.shape[1])

        if W is not None:
            return convolution_2d.convolution_2d(
                x, W, b, self.stride, self.pad, self.use_cudnn,
                deterministic=self.deterministic)
                
        return convolution_2d.convolution_2d(
            x, self.W, self.b, self.stride, self.pad, self.use_cudnn,
            deterministic=self.deterministic)
    def __call__(self, x):
        """Applies the convolution layer.

        Args:
            x (~chainer.Variable): Input image.

        Returns:
            ~chainer.Variable: Output of the convolution.

        """
        if self.has_uninitialized_params:
            self._initialize_params(x.data.shape[1])
        return convolution_2d.convolution_2d(
            x, self.W, self.b, self.stride, self.pad, self.use_cudnn)
Beispiel #9
0
    def __call__(self, x):
        """Applies the convolution layer.

        Args:
            x (~chainer.Variable): Input image.

        Returns:
            ~chainer.Variable: Output of the convolution.

        """
        if self.W.data is None:
            self._initialize_params(x.shape[1])
        return convolution_2d.convolution_2d(
            x, self.W, self.b, self.stride, self.pad, dilate=self.dilate)
Beispiel #10
0
    def forward(self, x):
        """Applies the convolution layer.

        Args:
            x (~chainer.Variable): Input image.

        Returns:
            ~chainer.Variable: Output of the convolution.

        """
        if self.W.array is None:
            self._initialize_params(x.shape[1])
        return convolution_2d.convolution_2d(
            x, self.W, self.b, self.stride, self.pad, dilate=self.dilate,
            groups=self.groups)
Beispiel #11
0
    def __call__(self, x):
        """Applies the convolution layer.

        Args:
            x (~chainer.Variable): Input image.

        Returns:
            ~chainer.Variable: Output of the convolution.

        """
        if self.has_uninitialized_params:
            with cuda.get_device(self._device_id):
                self._initialize_params(x.shape[1])
        return convolution_2d.convolution_2d(x, self.W, self.b, self.stride,
                                             self.pad, self.use_cudnn)
Beispiel #12
0
    def __call__(self, x):
        """(~chainer.Variable): Input image.
        """

        # build filter matrix

        sd_mat = F.broadcast_to(F.exp(self.logsigma),
                                (1, 1, self.ksize, self.ksize))
        W = F.exp(self.dist / sd_mat)
        W = W / xp.sum(W.data)

        return convolution_2d.convolution_2d(x,
                                             W,
                                             b=None,
                                             stride=1,
                                             pad=self.pad,
                                             use_cudnn=True)
Beispiel #13
0
    def test_basis(self):
        n = 1
        c_i = 1
        h_i = 3
        w_i = 3
        x = np.arange(n * c_i * h_i * w_i).reshape(n, c_i, h_i,
                                                   w_i).astype(np.float32)
        c_o = 1
        h_k = 2
        w_k = 2
        W = np.full(c_o * c_i * h_k * w_k, 2).reshape(c_o, c_i, h_k,
                                                      w_k).astype(np.float32)

        expected = convolution_2d(x, W).data
        actual = convolution_with_numpy(x, W)

        assert_array_equal(expected, actual)
Beispiel #14
0
    def __call__(self, x):
        """Applies the convolution layer.

        Args:
            x (~chainer.Variable): Input image.

        Returns:
            ~chainer.Variable: Output of the convolution.

        """
        if self.W.data is None:
            self._initialize_params(x.shape[1])
            if ia.all_ready((self.W, )):
                self.to_ia()
        return convolution_2d.convolution_2d(x,
                                             self.W,
                                             self.b,
                                             self.stride,
                                             self.pad,
                                             dilate=self.dilate)
Beispiel #15
0
    def test_with_c_o_3_c_i_3_n_3_stride_3_and_padding_5(self):
        n = 3
        c_i = 3
        h_i = 10
        w_i = 10
        x = np.arange(n * c_i * h_i * w_i).reshape(n, c_i, h_i,
                                                   w_i).astype(np.float32)
        c_o = 3
        h_k = 2
        w_k = 2
        W = np.full(c_o * c_i * h_k * w_k, 2).reshape(c_o, c_i, h_k,
                                                      w_k).astype(np.float32)

        stride = 3
        pad = 5

        expected = convolution_2d(x, W, stride=stride, pad=pad).data
        actual = convolution_with_numpy(x, W, stride=stride, pad=pad)

        assert_array_equal(expected, actual)
Beispiel #16
0
    def test_with_stride_4(self):
        n = 1
        c_i = 1
        h_i = 10
        w_i = 10
        x = np.arange(n * c_i * h_i * w_i).reshape(n, c_i, h_i,
                                                   w_i).astype(np.float32)
        c_o = 1
        h_k = 2
        w_k = 2
        W = np.full(c_o * c_i * h_k * w_k, 2).reshape(c_o, c_i, h_k,
                                                      w_k).astype(np.float32)

        stride = 4
        pad = 0

        expected = convolution_2d(x, W, stride=stride, pad=pad).data
        actual = convolution_with_numpy(x, W, stride=stride, pad=pad)

        assert_array_equal(expected, actual)
Beispiel #17
0
    def __call__(self, x):
        """Applies the convolution layer.
        Args:
            x (~chainer.Variable): Input image.
        Returns:
            ~chainer.Variable: Output of the convolution.
        """
        if self.W.data is None:
            self._initialize_params(x.shape[1])

        if self.scale_param.data[0, 0] < 0:
            self._initialize_scale()
        #print(self.scale_param.data)
        y = self.scale_param.data * convolution_2d.convolution_2d(
            x,
            self.W,
            None,
            self.stride,
            self.pad,
            dilate=self.dilate,
            groups=self.groups)
        return bias(y, self.b)
Beispiel #18
0
    def test_basis_with_1_1_kernel(self):
        n = 1
        c_i = 1
        h_i = 3
        w_i = 3
        x = np.arange(n * c_i * h_i * w_i).reshape(n, c_i, h_i,
                                                   w_i).astype(np.float32)
        c_o = 1
        h_k = 1
        w_k = 1
        W = np.full(c_o * c_i * h_k * w_k, 2).reshape(c_o, c_i, h_k,
                                                      w_k).astype(np.float32)

        stride = 1
        pad = 0

        expected = convolution_2d(x, W, stride=stride, pad=pad).data.tolist()
        actual = convolution_with_im2col_and_gemm(x.tolist(),
                                                  W.tolist(),
                                                  stride=stride,
                                                  pad=pad)

        self.__assert_eq_arrays(actual, expected, n, c_o, h_i, w_i, h_k, w_k,
                                pad, stride)
Beispiel #19
0
    def test_basis(self):
        n = 1
        c_i = 1
        h_i = 3
        w_i = 3
        x = np.arange(n * c_i * h_i * w_i).reshape(n, c_i, h_i,
                                                   w_i).astype(np.float32)
        c_o = 1
        h_k = 2
        w_k = 2
        W = np.full(c_o * c_i * h_k * w_k, 2).reshape(c_o, c_i, h_k,
                                                      w_k).astype(np.float32)

        stride = 1
        pad = 0

        expected = list(convolution_2d(x, W, stride=stride, pad=pad).data)
        actual = convolution_with_standard_library(x.tolist(),
                                                   W.tolist(),
                                                   stride=stride,
                                                   pad=pad)

        self.__assert_eq_arrays(actual, expected, n, c_o, h_i, w_i, h_k, w_k,
                                pad, stride)
Beispiel #20
0
    def test_with_c_o_64_c_i_3_n_1_stride_1_and_padding_1(self):
        n = 1
        c_i = 3
        h_i = 224
        w_i = 224
        x = np.arange(n * c_i * h_i * w_i).reshape(n, c_i, h_i,
                                                   w_i).astype(np.float32)
        c_o = 64
        h_k = 3
        w_k = 3
        W = np.full(c_o * c_i * h_k * w_k, 2).reshape(c_o, c_i, h_k,
                                                      w_k).astype(np.float32)

        stride = 1
        pad = 1

        expected = convolution_2d(x, W, stride=stride, pad=pad).data.tolist()
        actual = convolution_with_standard_library(x.tolist(),
                                                   W.tolist(),
                                                   stride=stride,
                                                   pad=pad)

        self.__assert_eq_arrays(actual, expected, n, c_o, h_i, w_i, h_k, w_k,
                                pad, stride)
Beispiel #21
0
    def test_with_stride_3(self):
        n = 1
        c_i = 1
        h_i = 10
        w_i = 10
        x = np.arange(n * c_i * h_i * w_i).reshape(n, c_i, h_i,
                                                   w_i).astype(np.float32)
        c_o = 1
        h_k = 2
        w_k = 2
        W = np.full(c_o * c_i * h_k * w_k, 2).reshape(c_o, c_i, h_k,
                                                      w_k).astype(np.float32)

        stride = 3
        pad = 0

        expected = convolution_2d(x, W, stride=stride, pad=pad).data.tolist()
        actual = convolution_with_im2col(x.tolist(),
                                         W.tolist(),
                                         stride=stride,
                                         pad=pad)

        self.__assert_eq_arrays(actual, expected, n, c_o, h_i, w_i, h_k, w_k,
                                pad, stride)
Beispiel #22
0
    def test_with_c_o_3_c_i_3_n_3_stride_3_and_padding_5(self):
        n = 3
        c_i = 3
        h_i = 10
        w_i = 10
        x = np.arange(n * c_i * h_i * w_i).reshape(n, c_i, h_i,
                                                   w_i).astype(np.float32)
        c_o = 3
        h_k = 2
        w_k = 2
        W = np.full(c_o * c_i * h_k * w_k, 2).reshape(c_o, c_i, h_k,
                                                      w_k).astype(np.float32)

        stride = 3
        pad = 5

        expected = list(convolution_2d(x, W, stride=stride, pad=pad).data)
        actual = convolution_with_standard_library(x.tolist(),
                                                   W.tolist(),
                                                   stride=stride,
                                                   pad=pad)

        self.__assert_eq_arrays(actual, expected, n, c_o, h_i, w_i, h_k, w_k,
                                pad, stride)
def dilated_convolution_2d(x, W, b=None, stride=1, pad=0, dilate=1,
                           cover_all=False):
    """Two-dimensional dilated convolution function.

    This is an implementation of two-dimensional dilated convolution
    in ConvNets.
    It takes three variables: the input image ``x``, the filter weight ``W``,
    and the bias vector ``b``.

    .. note::
       You can also perform dilated convolution by passing ``dilate``
       argument to :class:`chainer.functions.convolution_2d`.
       The functionality is the same.

    Notation: here is a notation for dimensionalities.

    - :math:`n` is the batch size.
    - :math:`c_I` and :math:`c_O` are the number of the input and output,
      respectively.
    - :math:`h` and :math:`w` are the height and width of the input image,
      respectively.
    - :math:`k_H` and :math:`k_W` are the height and width of the filters,
      respectively.

    Args:
        x (:class:`~chainer.Variable` or :ref:`ndarray`):
            Input variable of shape :math:`(n, c_I, h, w)`.
        W (:class:`~chainer.Variable` or :ref:`ndarray`):
            Weight variable of shape :math:`(c_O, c_I, k_H, k_W)`.
        b (:class:`~chainer.Variable` or :ref:`ndarray`):
            Bias variable of length :math:`c_O` (optional).
        stride (int or pair of ints): Stride of filter applications.
            ``stride=s`` and ``stride=(s, s)`` are equivalent.
        pad (int or pair of ints): Spatial padding width for input arrays.
            ``pad=p`` and ``pad=(p, p)`` are equivalent.
        dilate (int or pair of ints): Dilation factor of filter applications.
            ``dilate=d`` and ``dilate=(d, d)`` are equivalent.
        cover_all (bool): If ``True``, all spatial locations are convoluted
            into some output pixels. It may make the output size larger.

    Returns:
        ~chainer.Variable: Output variable.

    The two-dimensional dilated convolution function is defined as follows.
    Then the ``DilatedConvolution2D`` function computes correlations
    between filters and patches of size :math:`(k_H, k_W)` in ``x``.
    Patches here are extracted at intervals of the dilation factor.
    Note that correlation here is equivalent to the inner product between
    expanded vectors.
    Patches are extracted at intervals of the dilation factor and at positions
    shifted by multiples of ``stride`` from the first position ``-pad`` for
    each spatial axis. The right-most (or bottom-most) patches do not run over
    the padded spatial size.

    Let :math:`(s_Y, s_X)` be the stride of filter application,
    :math:`(p_H, p_W)` the spatial padding size, and :math:`(d_Y, d_X)`
    the dilation factor of filter application. Then, the output size
    :math:`(h_O, w_O)` is determined by the following equations:

    .. math::

       h_O &= (h + 2p_H - k_H - (k_H - 1) * (d_Y - 1)) / s_Y + 1,\\\\
       w_O &= (w + 2p_W - k_W - (k_W - 1) * (d_X - 1)) / s_X + 1.

    If the bias vector is given, then it is added to all spatial locations of
    the output of convolution.

    """
    return convolution_2d.convolution_2d(x, W, b,
                                         stride, pad, cover_all, dilate=dilate)
    def forward_step(self, x):
        y = convolution_2d.convolution_2d(x, self.W, None, 1, 0)

        log_det = self.compute_log_determinant(x)
        return y, log_det
Beispiel #25
0
 def forward(self, x):
   return convolution_2d(x, self.W, self.b, pad=1)
Beispiel #26
0
def dilated_convolution_2d(x,
                           W,
                           b=None,
                           stride=1,
                           pad=0,
                           dilate=1,
                           cover_all=False):
    """Two-dimensional dilated convolution function.

    This is an implementation of two-dimensional dilated convolution
    in ConvNets.
    It takes three variables: the input image ``x``, the filter weight ``W``,
    and the bias vector ``b``.

    .. note::
       You can also perform dilated convolution by passing ``dilate``
       argument to :class:`chainer.functions.convolution_2d`.
       The functionality is the same.

    Notation: here is a notation for dimensionalities.

    - :math:`n` is the batch size.
    - :math:`c_I` and :math:`c_O` are the number of the input and output,
      respectively.
    - :math:`h` and :math:`w` are the height and width of the input image,
      respectively.
    - :math:`k_H` and :math:`k_W` are the height and width of the filters,
      respectively.

    Args:
        x (:class:`~chainer.Variable` or :ref:`ndarray`):
            Input variable of shape :math:`(n, c_I, h, w)`.
        W (:class:`~chainer.Variable` or :ref:`ndarray`):
            Weight variable of shape :math:`(c_O, c_I, k_H, k_W)`.
        b (:class:`~chainer.Variable` or :ref:`ndarray`):
            Bias variable of length :math:`c_O` (optional).
        stride (int or pair of ints): Stride of filter applications.
            ``stride=s`` and ``stride=(s, s)`` are equivalent.
        pad (int or pair of ints): Spatial padding width for input arrays.
            ``pad=p`` and ``pad=(p, p)`` are equivalent.
        dilate (int or pair of ints): Dilation factor of filter applications.
            ``dilate=d`` and ``dilate=(d, d)`` are equivalent.
        cover_all (bool): If ``True``, all spatial locations are convoluted
            into some output pixels. It may make the output size larger.

    Returns:
        ~chainer.Variable: Output variable.

    The two-dimensional dilated convolution function is defined as follows.
    Then the ``DilatedConvolution2D`` function computes correlations
    between filters and patches of size :math:`(k_H, k_W)` in ``x``.
    Patches here are extracted at intervals of the dilation factor.
    Note that correlation here is equivalent to the inner product between
    expanded vectors.
    Patches are extracted at intervals of the dilation factor and at positions
    shifted by multiples of ``stride`` from the first position ``-pad`` for
    each spatial axis. The right-most (or bottom-most) patches do not run over
    the padded spatial size.

    Let :math:`(s_Y, s_X)` be the stride of filter application,
    :math:`(p_H, p_W)` the spatial padding size, and :math:`(d_Y, d_X)`
    the dilation factor of filter application. Then, the output size
    :math:`(h_O, w_O)` is determined by the following equations:

    .. math::

       h_O &= (h + 2p_H - k_H - (k_H - 1) * (d_Y - 1)) / s_Y + 1,\\\\
       w_O &= (w + 2p_W - k_W - (k_W - 1) * (d_X - 1)) / s_X + 1.

    If the bias vector is given, then it is added to all spatial locations of
    the output of convolution.

    """
    return convolution_2d.convolution_2d(x,
                                         W,
                                         b,
                                         stride,
                                         pad,
                                         cover_all,
                                         dilate=dilate)