def check_type_forward(self, in_types):
        n_in = in_types.size().eval()
        if n_in != 3 and n_in != 5:
            raise type_check.InvalidType(
                '%s or %s' % (in_types.size() == 3, in_types.size() == 5),
                '%s == %s' % (in_types.size(), n_in))

        x_type, gamma_type, beta_type = in_types[:3]
        type_check.expect(
            x_type.dtype == numpy.float32,
            x_type.ndim >= gamma_type.ndim + 1,
            # TODO(beam2d): Check shape
            gamma_type.dtype == numpy.float32,
            beta_type.dtype == numpy.float32,
            gamma_type.shape == beta_type.shape,
        )

        if len(in_types) == 5:
            mean_type, var_type = in_types[3:]
            type_check.expect(
                mean_type.dtype == numpy.float32,
                mean_type.shape == gamma_type.shape,
                var_type.dtype == numpy.float32,
                var_type.shape == gamma_type.shape,
            )
示例#2
0
 def check_type_forward(self, in_types):
     n_in = type_check.eval(in_types.size())
     if n_in != 3 and n_in != 5:
         raise type_check.InvalidType(
             '%s or %s' % (in_types.size() == 3, in_types.size() == 5),
             '%s == %s' % (in_types.size(), n_in))
     x_type, gamma_type, beta_type = in_types[:3]
     M = type_check.eval(gamma_type.ndim)
     type_check.expect(
         x_type.dtype.kind == 'f',
         x_type.ndim >= gamma_type.ndim + 1,
         x_type.shape[1:1 + M] == gamma_type.shape,
         # TODO(beam2d): Check shape
         gamma_type.dtype == x_type.dtype,
         beta_type.dtype == x_type.dtype,
         gamma_type.shape == beta_type.shape,
     )
     if len(in_types) == 5:
         mean_type, var_type = in_types[3:]
         type_check.expect(
             mean_type.dtype == x_type.dtype,
             mean_type.shape == gamma_type.shape,
             var_type.dtype == x_type.dtype,
             var_type.shape == gamma_type.shape,
         )
示例#3
0
    def _check_data_type_forward(self, in_data):
        in_type = type_check.get_types(in_data, 'in_types', False)
        try:
            self.check_type_forward(in_type)
        except type_check.InvalidType as e:
            msg = """
Invalid operation is performed in: {0} (Forward)

{1}""".format(self.label, str(e))
            raise type_check.InvalidType(e.expect, e.actual, msg=msg)
示例#4
0
    def check_type_forward(self, in_types):
        type_check.expect(in_types.size() > 0)

        shapes = [t.eval().shape for t in in_types]
        r_shapes = [s[::-1] for s in shapes]
        r_filled = six.moves.zip_longest(*r_shapes, fillvalue=1)
        for ss in r_filled:
            d = max(ss)
            if not all(s == d or s == 1 for s in ss):
                expect = 'each dimension has the same size or is 1'
                actual = 'shapes: ' + ', '.join(map(str, shapes))
                raise type_check.InvalidType(expect, actual)
示例#5
0
 def check_type_forward(self, in_types):
     n_in = type_check.eval(in_types.size())
     if n_in != 3:
         raise type_check.InvalidType('%s == %s' % (in_types.size(), n_in))
     x_type, gamma_type, beta_type = in_types[:3]
     M = type_check.eval(gamma_type.ndim)
     type_check.expect(
         x_type.dtype.kind == 'f',
         x_type.ndim >= gamma_type.ndim + 1,
         x_type.shape[1:1 + M] == gamma_type.shape,
         gamma_type.dtype == x_type.dtype,
         beta_type.dtype == x_type.dtype,
         gamma_type.shape == beta_type.shape,
     )
示例#6
0
    def check_type_forward(self, in_types):
        type_check.expect(in_types.size() == 1)

        ndim = type_check.Variable(len(self._shape), 'len(shape)')
        type_check.expect(in_types[0].ndim <= ndim)

        shape = in_types[0].shape.eval()
        # check the shape in inverse order
        for i in six.moves.range(-1, -len(shape) - 1, -1):
            if shape[i] == self._shape[i] or shape[i] == 1:
                continue
            expect = 'in_type[0].shape[%d] == %d' % (i, self._shape[i])
            if self._shape[i] != 1:
                expect += ' or in_type[0].shape[%d] == 1' % i
            actual = 'in_type[0].shape: %s' % str(shape)
            raise type_check.InvalidType(expect, actual)
示例#7
0
def maxout(x, pool_size, axis=1):
    """Maxout activation function.

    It accepts an input tensor ``x``, reshapes the ``axis`` dimension
    (say the size being ``M * pool_size``) into two dimensions
    ``(M, pool_size)``, and takes maximum along the ``axis`` dimension.
    The output of this function is same as ``x`` except that ``axis`` dimension
    is transformed from ``M * pool_size`` to ``M``.

    Typically, ``x`` is the output of a linear layer or a convolution layer.
    The following is the example where we use :func:`maxout` in combination
    with a Linear link.

    >>> import numpy, chainer, chainer.links as L
    >>> in_size, out_size, pool_size = 100, 100, 100
    >>> l = L.Linear(in_size, out_size * pool_size)
    >>> x = chainer.Variable(numpy.zeros((1, in_size), 'f'))  # prepare data
    >>> x = l(x)
    >>> y = maxout(x, pool_size)

    Args:
       x (~chainer.Variable): Input variable. Its first dimension is assumed
            to be the *minibatch dimension*. The other dimensions are treated
            as one concatenated dimension.
    Returns:
        ~chainer.Variable: Output variable.

    .. seealso:: :class:`~chainer.links.Maxout`
    """

    if pool_size <= 0:
        raise ValueError('pool_size must be a positive integer.')

    x_shape = x.data.shape
    if x_shape[axis] % pool_size != 0:
        expect = 'x.data.shape[axis] % pool_size == 0'
        actual = 'x.data.shape[axis]={}, pool_size={}'.format(
            x_shape[axis], pool_size)
        msg = 'axis dimension must be divided by pool_size'
        raise type_check.InvalidType(expect, actual, msg)

    shape = (x_shape[:axis] + (x_shape[axis] // pool_size, pool_size) +
             x_shape[axis + 1:])
    x = reshape.reshape(x, shape)
    return minmax.max(x, axis=axis + 1)
示例#8
0
    def check_type_forward(self, in_types):
        n_in = in_types.size().eval()
        if n_in != 2 and n_in != 4:
            raise type_check.InvalidType(
                "%s or %s" % (in_types.size() == 2, in_types.size() == 4),
                "%s == %s" % (in_types.size(), n_in))

        x_type, gamma_type = in_types[:2]
        type_check.expect(
            x_type.dtype == np.float32,
            x_type.ndim >= gamma_type.ndim + 1,
            gamma_type.dtype == np.float32,
        )

        if len(in_types) == 4:
            mean_type, var_type = in_types[2:]
            type_check.expect(
                mean_type.dtype == np.float32,
                mean_type.shape == gamma_type.shape,
                var_type.dtype == np.float32,
                var_type.shape == gamma_type.shape,
            )
示例#9
0
    def check_type_forward(self, in_types):
        n_in = type_check.eval(in_types.size())
        if n_in != 3 and n_in != 6:
            raise type_check.InvalidType(
                '{0} or {1}'.format(in_types.size() == 3,
                                    in_types.size() == 6),
                '{0} == {1}'.format(in_types.size(), n_in))

        e1_type, e2_type, W_type = in_types[:3]
        type_check_prod = type_check.make_variable(numpy.prod, 'prod')
        type_check.expect(
            e1_type.dtype == numpy.float32,
            e1_type.ndim >= 2,
            e2_type.dtype == numpy.float32,
            e2_type.ndim >= 2,
            e1_type.shape[0] == e2_type.shape[0],
            W_type.dtype == numpy.float32,
            W_type.ndim == 3,
            type_check_prod(e1_type.shape[1:]) == W_type.shape[0],
            type_check_prod(e2_type.shape[1:]) == W_type.shape[1],
        )

        if n_in == 6:
            out_size = W_type.shape[2]
            V1_type, V2_type, b_type = in_types[3:]
            type_check.expect(
                V1_type.dtype == numpy.float32,
                V1_type.ndim == 2,
                V1_type.shape[0] == W_type.shape[0],
                V1_type.shape[1] == out_size,
                V2_type.dtype == numpy.float32,
                V2_type.ndim == 2,
                V2_type.shape[0] == W_type.shape[1],
                V2_type.shape[1] == out_size,
                b_type.dtype == numpy.float32,
                b_type.ndim == 1,
                b_type.shape[0] == out_size,
            )
示例#10
0
文件: maxout.py 项目: km-t/dcpython
def maxout(x, pool_size, axis=1):
    """Maxout activation function.

    It accepts an input tensor ``x``, reshapes the ``axis`` dimension
    (say the size being ``M * pool_size``) into two dimensions
    ``(M, pool_size)``, and takes maximum along the ``axis`` dimension.

    Args:
        x (:class:`~chainer.Variable` or :ref:`ndarray`):
            Input variable. A :math:`n`-dimensional (:math:`n \\ge` ``axis``)
            float array. In general, its first dimension is assumed to be the
            *minibatch dimension*. The other dimensions are treated as one
            concatenated dimension.
        pool_size (int):
            The size used for downsampling of pooling layer.
        axis (int):
            The ``axis`` dimension to be reshaped. The size of ``axis``
            dimension should be ``M * pool_size``.

    Returns:
        ~chainer.Variable:
            Output variable. The shape of the output is same as ``x`` except
            that ``axis`` dimension is transformed from ``M * pool_size`` to
            ``M``.

    .. seealso:: :class:`~chainer.links.Maxout`

    .. admonition:: Example

        Typically, ``x`` is the output of a linear layer or a convolution
        layer. The following is the example where we use :func:`maxout` in
        combination with a Linear link.

        >>> in_size, out_size, pool_size = 10, 10, 10
        >>> bias = np.arange(out_size * pool_size).astype(np.float32)
        >>> l = L.Linear(in_size, out_size * pool_size, initial_bias=bias)
        >>> x = np.zeros((1, in_size), np.float32)  # prepare data
        >>> x = l(x)
        >>> y = F.maxout(x, pool_size)
        >>> x.shape
        (1, 100)
        >>> y.shape
        (1, 10)
        >>> x.reshape((out_size, pool_size)).data
        array([[ 0.,  1.,  2.,  3.,  4.,  5.,  6.,  7.,  8.,  9.],
               [10., 11., 12., 13., 14., 15., 16., 17., 18., 19.],
               [20., 21., 22., 23., 24., 25., 26., 27., 28., 29.],
               [30., 31., 32., 33., 34., 35., 36., 37., 38., 39.],
               [40., 41., 42., 43., 44., 45., 46., 47., 48., 49.],
               [50., 51., 52., 53., 54., 55., 56., 57., 58., 59.],
               [60., 61., 62., 63., 64., 65., 66., 67., 68., 69.],
               [70., 71., 72., 73., 74., 75., 76., 77., 78., 79.],
               [80., 81., 82., 83., 84., 85., 86., 87., 88., 89.],
               [90., 91., 92., 93., 94., 95., 96., 97., 98., 99.]], \
dtype=float32)
        >>> y.data
        array([[ 9., 19., 29., 39., 49., 59., 69., 79., 89., 99.]], \
dtype=float32)

    """

    if pool_size <= 0:
        raise ValueError('pool_size must be a positive integer.')

    x_shape = x.shape
    if x_shape[axis] % pool_size != 0:
        expect = 'x.shape[axis] % pool_size == 0'
        actual = 'x.shape[axis]={}, pool_size={}'.format(
            x_shape[axis], pool_size)
        msg = 'axis dimension must be divided by pool_size'
        raise type_check.InvalidType(expect, actual, msg)

    shape = (x_shape[:axis] + (x_shape[axis] // pool_size, pool_size) +
             x_shape[axis + 1:])
    x = reshape.reshape(x, shape)
    return minmax.max(x, axis=axis + 1)
示例#11
0
 def test_pickle(self):
     exc = T.InvalidType('foo', 'bar', 'baz')
     new = pickle.loads(pickle.dumps(exc))
     self.assertEqual(exc.args, new.args)
     self.assertEqual(exc.expect, new.expect)
     self.assertEqual(exc.actual, new.actual)