Example #1
0
def copyto(dst, src):
    """Copies the elements of an ndarray to those of another one.

    This function can copy the CPU/GPU arrays to the destination arrays on
    another device.

    Args:
        dst (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
            Destination array.
        src (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
            Source array.

    """
    if isinstance(dst, numpy.ndarray):
        numpy.copyto(dst, numpy.asarray(cuda.to_cpu(src)))
    elif isinstance(dst, intel64.mdarray):
        intel64.ideep.basic_copyto(dst, cuda.to_cpu(src))
    elif isinstance(dst, cuda.ndarray):
        if isinstance(src, chainer.get_cpu_array_types()):
            src = numpy.asarray(src)
            if dst.flags.c_contiguous or dst.flags.f_contiguous:
                dst.set(src)
            else:
                cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device))
        elif isinstance(src, cuda.ndarray):
            cuda.cupy.copyto(dst, src)
        else:
            raise TypeError(
                'cannot copy from non-array object of type {}'.format(
                    type(src)))
    else:
        raise TypeError('cannot copy to non-array object of type {}'.format(
            type(dst)))
Example #2
0
def copyto(dst, src):
    """Copies the elements of an ndarray to those of another one.

    This function can copy the CPU/GPU arrays to the destination arrays on
    another device.

    Args:
        dst (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
            Destination array.
        src (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
            Source array.

    """
    if isinstance(dst, numpy.ndarray):
        numpy.copyto(dst, _cpu._to_cpu(src))
    elif isinstance(dst, intel64.mdarray):
        intel64.ideep.basic_copyto(
            dst, _cpu._to_cpu(src))
    elif isinstance(dst, cuda.ndarray):
        if isinstance(src, chainer.get_cpu_array_types()):
            src = numpy.asarray(src)
            if dst.flags.c_contiguous or dst.flags.f_contiguous:
                dst.set(src)
            else:
                cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device))
        elif isinstance(src, cuda.ndarray):
            cuda.cupy.copyto(dst, src)
        else:
            raise TypeError('cannot copy from non-array object of type {}'
                            .format(type(src)))
    else:
        raise TypeError('cannot copy to non-array object of type {}'.format(
            type(dst)))
Example #3
0
    def forward(self, inputs):
        self.retain_inputs((0, 1))
        c_prev, x = inputs
        a, i, f, o = _extract_gates(x)
        batch = len(x)

        if isinstance(x, chainer.get_cpu_array_types()):
            if intel64.should_use_ideep('>=auto'):
                xp = intel64.ideep.get_array_module(x)
            else:
                xp = numpy
            a = xp.tanh(a)
            i = _sigmoid(i, xp)
            f = _sigmoid(f, xp)
            o = _sigmoid(o, xp)

            c_next = numpy.empty_like(c_prev)
            c_next[:batch] = a * i + f * c_prev[:batch]
            h = o * xp.tanh(c_next[:batch])
        else:
            c_next = cuda.cupy.empty_like(c_prev)
            h = cuda.cupy.empty_like(c_next[:batch])
            cuda.elementwise(
                'T c_prev, T a, T i_, T f, T o', 'T c, T h',
                '''
                    COMMON_ROUTINE;
                    c = aa * ai + af * c_prev;
                    h = ao * tanh(c);
                ''',
                'lstm_fwd', preamble=_preamble)(
                    c_prev[:batch], a, i, f, o, c_next[:batch], h)

        c_next[batch:] = c_prev[batch:]
        self.retain_outputs((0,))
        return c_next, h
Example #4
0
    def forward(self, inputs):
        self.retain_inputs((0, 1))
        c_prev, x = inputs
        a, i, f, o = _extract_gates(x)
        batch = len(x)

        if isinstance(x, chainer.get_cpu_array_types()):
            if intel64.should_use_ideep('>=auto'):
                xp = intel64.ideep.get_array_module(x)
            else:
                xp = numpy
            a = xp.tanh(a)
            i = _sigmoid(i, xp)
            f = _sigmoid(f, xp)
            o = _sigmoid(o, xp)

            c_next = numpy.empty_like(c_prev)
            c_next[:batch] = a * i + f * c_prev[:batch]
            h = o * xp.tanh(c_next[:batch])
        else:
            c_next = cuda.cupy.empty_like(c_prev)
            h = cuda.cupy.empty_like(c_next[:batch])
            cuda.elementwise(
                'T c_prev, T a, T i_, T f, T o', 'T c, T h',
                '''
                    COMMON_ROUTINE;
                    c = aa * ai + af * c_prev;
                    h = ao * tanh(c);
                ''',
                'lstm_fwd', preamble=_preamble)(
                    c_prev[:batch], a, i, f, o, c_next[:batch], h)

        c_next[batch:] = c_prev[batch:]
        self.retain_outputs((0,))
        return c_next, h
Example #5
0
def _array_to_cpu(array, stream):
    if array is None:
        return None
    if isinstance(array, ndarray):
        check_cuda_available()
        with get_device_from_array(array):
            return array.get(stream)
    elif isinstance(array, (numpy.number, numpy.bool_)):
        return numpy.asarray(array)
    elif isinstance(array, chainer.get_cpu_array_types()):
        return array
    else:
        raise TypeError(
            'The array sent to cpu must be numpy.ndarray or cupy.ndarray, '
            'or a NumPy scalar.'
            '\nActual type: {0}.'.format(type(array)))
Example #6
0
File: cuda.py Project: tkng/chainer
def _array_to_cpu(array, stream):
    if array is None:
        return None
    if isinstance(array, ndarray):
        check_cuda_available()
        with get_device_from_array(array):
            return array.get(stream)
    elif isinstance(array, (numpy.number, numpy.bool_)):
        return numpy.asarray(array)
    elif isinstance(array, chainer.get_cpu_array_types()):
        return array
    else:
        raise TypeError(
            'The array sent to cpu must be numpy.ndarray or cupy.ndarray, '
            'or a NumPy scalar.'
            '\nActual type: {0}.'.format(type(array)))
Example #7
0
    def forward(self, inputs):
        cs, x = inputs[:-1], inputs[-1]
        n_ary = len(cs)
        gates = list(_extract_gates(x, 3 + n_ary))
        a, i, o = gates[:3]
        fs = gates[3:]

        if isinstance(x, chainer.get_cpu_array_types()):
            self.a = numpy.tanh(a)
            self.i = _sigmoid(i)
            self.o = _sigmoid(o)
            self.fs = [_sigmoid(f) for f in fs]

            self.c = self.a * self.i + sum(f * c for f, c in zip(self.fs, cs))
            h = self.o * numpy.tanh(self.c)
        else:
            preamble = _preamble + \
                ' '.join('T af{} = sigmoid(f{});'.format(j, j)
                         for j in six.moves.range(1, n_ary + 1))
            cells_str = ', '.join('T c{}'.format(j)
                                  for j in six.moves.range(1, n_ary + 1))
            fgates_str = ', '.join('T f{}'.format(j)
                                   for j in six.moves.range(1, n_ary + 1))
            fc_calc_str = ' + '.join('af{} * c{}'.format(j, j)
                                     for j in six.moves.range(1, n_ary + 1))
            self.c, h = cuda.elementwise('T a, T i_, T o, {}, {}'.format(
                cells_str, fgates_str),
                                         'T c, T h',
                                         '''
                    COMMON_ROUTINE;
                    c = aa * ai + {};
                    h = ao * tanh(c);
                '''.format(fc_calc_str),
                                         'treelstm_fwd',
                                         preamble=preamble)(a, i, o,
                                                            *(list(cs) + fs))

        return self.c, h
Example #8
0
    def forward(self, inputs):
        cs, x = inputs[:-1], inputs[-1]
        n_ary = len(cs)
        gates = list(_extract_gates(x, 3 + n_ary))
        a, i, o = gates[:3]
        fs = gates[3:]

        if isinstance(x, chainer.get_cpu_array_types()):
            self.a = numpy.tanh(a)
            self.i = _sigmoid(i)
            self.o = _sigmoid(o)
            self.fs = [_sigmoid(f) for f in fs]

            self.c = self.a * self.i + sum(f * c for f, c in zip(self.fs, cs))
            h = self.o * numpy.tanh(self.c)
        else:
            preamble = _preamble + \
                ' '.join('T af{} = sigmoid(f{});'.format(j, j)
                         for j in six.moves.range(1, n_ary + 1))
            cells_str = ', '.join('T c{}'.format(j)
                                  for j in six.moves.range(1, n_ary + 1))
            fgates_str = ', '.join('T f{}'.format(j)
                                   for j in six.moves.range(1, n_ary + 1))
            fc_calc_str = ' + '.join('af{} * c{}'.format(j, j)
                                     for j in six.moves.range(1, n_ary + 1))
            self.c, h = cuda.elementwise(
                'T a, T i_, T o, {}, {}'.format(cells_str, fgates_str),
                'T c, T h',
                '''
                    COMMON_ROUTINE;
                    c = aa * ai + {};
                    h = ao * tanh(c);
                '''.format(fc_calc_str),
                'treelstm_fwd', preamble=preamble)(
                    a, i, o, *(list(cs) + fs))

        return self.c, h