Exemple #1
0
    def forward(self, x):
        # TODO: cosim
        if ia.all_ready(x):
            return self.forward_ia(x)

        if hasattr(self, 'mask'):
            y = x[0] * self.mask
        else:
            scale = x[0].dtype.type(1. / (1 - self.dropout_ratio))
            xp = cuda.get_array_module(*x)
            if xp == numpy:
                flag = xp.random.rand(*x[0].shape) >= self.dropout_ratio
                self.mask = scale * flag
                y = x[0] * self.mask
            else:
                rand = xp.random.rand(*x[0].shape, dtype=numpy.float32)
                self.mask, y = cuda.elementwise(
                    'T x, R r, T scale, T ratio',
                    'T mask, T y',
                    '''
                    mask = (r >= ratio) * scale;
                    y = x * mask;
                    ''',
                    'dropout_fwd',
                )(x[0], rand, scale, self.dropout_ratio)
        return y,
Exemple #2
0
 def init_state(self, param):
     xp = cuda.get_array_module(param.data)
     with cuda.get_device_from_array(param.data):
         self.state['v'] = xp.zeros_like(param.data)
     if ia.all_ready((self.state['v'], )):
         self.state['v'] = ia.array(self.state['v'],
                                    itype=ia.ideep4py.wgt_array)
Exemple #3
0
    def forward(self, inputs):
        x = inputs[0]
        W = inputs[1]
        if (ia.all_ready(inputs)):
            return self.forward_ia(inputs)

        if not type_check.same_types(*inputs):
            raise ValueError('numpy and cupy must not be used together\n'
                             'type(W): {0}, type(x): {1}'.format(
                                 type(W), type(x)))

        # NumPy raises an error when the array is not contiguous.
        # See: https://github.com/chainer/chainer/issues/2744
        # TODO(niboshi): Remove this code when NumPy is fixed.
        if (isinstance(x, numpy.ndarray)
                and not (x.flags.c_contiguous or x.flags.f_contiguous)
                and 1 in x.shape):
            x = numpy.ascontiguousarray(x)

        y = x.dot(W.T).astype(x.dtype, copy=False)
        if len(inputs) == 3:
            b = inputs[2]
            y += b
        self.retain_inputs((0, 1))  # b is not retained
        return y,
Exemple #4
0
 def backward(self, indexes, gy):
     y = self.get_retained_outputs()[0]
     if (ia.all_ready(gy)) or \
             (chainer.should_use_cudnn('==always') and self._use_cudnn):
         x = self.get_retained_inputs()[0]
         return ReLUGrad3(x, y).apply((gy[0], ))
     else:
         return ReLUGrad2(y).apply((gy[0], ))
Exemple #5
0
    def forward(self, inputs):
        # currently, only support 4 dims
        if ia.all_ready((inputs), (4,)):
            return self.forward_ia(inputs)

        x, = inputs
        if isinstance(self.indices_or_sections, collections.Iterable):
            cdimx = x.shape[self.axis]
            ind = list(self.indices_or_sections)
            ind.append(cdimx)
        self._xp = cuda.get_array_module(x)
        ret = tuple(self._xp.split(x, self.indices_or_sections, self.axis))
        self._shapes = [r.shape for r in ret]
        return ret
    def backward_cpu(self, x, gy):
        if ia.all_ready(x, (4,)):
            return self.backward_ia(x, gy)
        else:
            half_n = self.n // 2
            summand = self.y * gy[0] / self.unit_scale
            sum_part = summand.copy()
            for i in six.moves.range(1, half_n + 1):
                sum_part[:, i:] += summand[:, :-i]
                sum_part[:, :-i] += summand[:, i:]

            gx = gy[0] * self.scale - 2 * \
                self.alpha * self.beta * x[0] * sum_part
            return gx,
 def __init__(self, x, gamma):
     is_gamma_1d = gamma.ndim == 1
     # cuDNN only supports these tensor dimensions because they are
     # the most commonly used. If there is a need to support other
     # dimensions with cuDNN, we could consider reshaping the input
     # into a 2-dim array with channels as second dim and m=<product
     # of all dimensions except the 2nd dimension> as the first
     # dimension.
     self.is_for_conv2d = x.ndim == 4 and is_gamma_1d
     self.is_for_linear = x.ndim == 2 and is_gamma_1d
     self.cudnn_dim_ok = self.is_for_conv2d or self.is_for_linear
     # self.cudnn_dtype_ok = x.dtype != numpy.float16
     self.cudnn_dtype_ok = self.is_for_conv2d or (x.dtype != numpy.float16)
     self.ideep_ok = ia.all_ready((x, )) and is_gamma_1d
Exemple #8
0
    def __call__(self, x):
        """Applies the linear layer.

        Args:
            x (~chainer.Variable): Batch of input vectors.

        Returns:
            ~chainer.Variable: Output of the linear layer.

        """
        if self.W.data is None:
            self._initialize_params(x.size // x.shape[0])
            if ia.all_ready((self.W, )):
                self.to_ia()
        return linear.linear(x, self.W, self.b)
 def forward_cpu(self, x):
     # pdb.set_trace()
     if ia.all_ready(x, (4,)):
         return self.forward_ia(x)
     else:
         half_n = self.n // 2
         x2 = numpy.square(x[0])
         sum_part = x2.copy()
         for i in six.moves.range(1, half_n + 1):
             sum_part[:, i:] += x2[:, :-i]
             sum_part[:, :-i] += x2[:, i:]
         self.unit_scale = self.k + self.alpha * sum_part
         self.scale = self.unit_scale ** -self.beta
         self.y = x[0] * self.scale
         return self.y,
Exemple #10
0
    def __call__(self, x):
        """Applies the convolution layer.

        Args:
            x (~chainer.Variable): Input image.

        Returns:
            ~chainer.Variable: Output of the convolution.

        """
        if self.W.data is None:
            self._initialize_params(x.shape[1])
            if ia.all_ready((self.W, )):
                self.to_ia()
        return dilated_convolution_2d.dilated_convolution_2d(
            x, self.W, self.b, self.stride, self.pad, self.dilate)
Exemple #11
0
    def forward(self, inputs):
        self.retain_inputs((0, 1))
        if ((ia.all_ready(inputs)) and self.W_dtype == numpy.dtype('float32')):
            return self.forward_ia(inputs)
        x, gy = inputs
        if not type_check.same_types(*inputs):
            raise ValueError('numpy and cupy must not be used together\n'
                             'type(x): {0}, type(gy): {1}'.format(
                                 type(x), type(gy)))
        if (isinstance(gy, numpy.ndarray)
                and not (gy.flags.c_contiguous or gy.flags.f_contiguous)
                and 1 in gy.shape):
            gy = numpy.ascontiguousarray(gy)

        gW = gy.T.dot(x).astype(self.W_dtype, copy=False)
        self.retain_inputs((0, 1))
        return gW,
Exemple #12
0
 def forward(self, inputs):
     # TODO: cosim
     if ia.all_ready(inputs):
         return self.forward_ia(inputs)
     y = inputs[0] * self.mask
     return y,
Exemple #13
0
 def forward(self, xs):
     if ia.all_ready(xs, (4,)):  # only support 4 dims now
         return self.forward_ia(xs)
     xp = cuda.get_array_module(*xs)
     return xp.concatenate(xs, self.axis),