Ejemplo n.º 1
0
def _create_batch_norm_ndarray_args(
        xp, device, x_shape, gamma_shape, beta_shape, mean_shape, var_shape,
        float_dtype):
    x = array_utils.create_dummy_ndarray(xp, x_shape, float_dtype)

    # Non-contiguous gamma and beta is not supported by CUDA.
    # TODO(hvy): Support non-contiguous gamma and beta with CUDA. Create a
    # contiguous copy in the cuDNN wrapper.
    pad_gamma_beta = device.backend.name != 'cuda'
    gamma = array_utils.create_dummy_ndarray(
        xp, gamma_shape, float_dtype, padding=pad_gamma_beta)
    beta = array_utils.create_dummy_ndarray(
        xp, beta_shape, float_dtype, padding=pad_gamma_beta)

    # Non-contiguous running values which are updated in-place are not
    # supported by CUDA, so we only pad for other devices.
    pad_running = device.backend.name != 'cuda'
    mean = array_utils.create_dummy_ndarray(
        xp, mean_shape, float_dtype, padding=pad_running)
    var = array_utils.create_dummy_ndarray(
        xp, var_shape, float_dtype, padding=pad_running, start=0)

    # TODO(imanishi): Remove them after supporting random test
    x /= x.size
    gamma /= gamma.size
    beta /= beta.size
    mean /= mean.size
    var /= var.size

    return x, gamma, beta, mean, var
Ejemplo n.º 2
0
def test_linear(device, x_shape, w_shape, b_shape, n_batch_axes, dtype):
    # TODO(imanishi): Remove the skip after supporting non-float dot on CUDA
    if device.name == 'cuda:0' and numpy.dtype(dtype).kind != 'f':
        return chainerx.testing.ignore()
    x = array_utils.create_dummy_ndarray(numpy, x_shape, dtype)
    w = array_utils.create_dummy_ndarray(numpy, w_shape, dtype)
    b = (None if b_shape in (None, Unspecified)
         else array_utils.create_dummy_ndarray(numpy, b_shape, dtype))

    # Calculate chainerx_out
    chainerx_x = chainerx.array(x)
    chainerx_w = chainerx.array(w)
    chainerx_b = chainerx.array(b) if b is not None else None
    if b_shape is Unspecified:
        chainerx_out = chainerx.linear(chainerx_x, chainerx_w)
    elif n_batch_axes is Unspecified:
        chainerx_out = chainerx.linear(chainerx_x, chainerx_w, chainerx_b)
    else:
        chainerx_out = chainerx.linear(chainerx_x, chainerx_w, chainerx_b,
                                       n_batch_axes)

    # Calculate numpy_out
    if n_batch_axes is Unspecified:
        n_batch_axes = 1
    out_shape = x_shape[:n_batch_axes] + (w_shape[0],)
    x = x.reshape(numpy.prod(x_shape[:n_batch_axes]),
                  numpy.prod(x_shape[n_batch_axes:]))
    numpy_out = x.dot(w.T).reshape(out_shape)
    if b is not None:
        numpy_out += b

    chainerx.testing.assert_array_equal(chainerx_out, numpy_out)
Ejemplo n.º 3
0
def test_add(xp, device, shape, dtype, is_module):
    lhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=1)
    rhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=2)
    if is_module:
        return xp.add(lhs, rhs)
    else:
        return lhs + rhs
Ejemplo n.º 4
0
def test_sub(xp, device, shape, numeric_dtype, is_module):
    lhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=1)
    rhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=2)
    if is_module:
        return xp.subtract(lhs, rhs)
    else:
        return lhs - rhs
Ejemplo n.º 5
0
def test_asarray_from_numpy_array_with_zero_copy():
    obj = array_utils.create_dummy_ndarray(
        numpy, (2, 3), 'float32', padding=True)
    obj_refcount_before = sys.getrefcount(obj)

    a = chainerx.asarray(obj, dtype='float32')

    assert sys.getrefcount(obj) == obj_refcount_before + 1
    chainerx.testing.assert_array_equal_ex(obj, a)

    # test buffer is shared (zero copy)
    a += a
    chainerx.testing.assert_array_equal_ex(obj, a)

    # test possibly freed memory
    obj_copy = obj.copy()
    del obj
    chainerx.testing.assert_array_equal_ex(obj_copy, a, strides_check=False)

    # test possibly freed memory (the other way)
    obj = array_utils.create_dummy_ndarray(
        numpy, (2, 3), 'float32', padding=True)
    a = chainerx.asarray(obj, dtype='float32')
    a_copy = a.copy()
    del a
    chainerx.testing.assert_array_equal_ex(a_copy, obj, strides_check=False)
Ejemplo n.º 6
0
def test_dummy_ndarray_padding(xp, shape, dtype, padding, expected_strides):
    if padding is None:
        a = array_utils.create_dummy_ndarray(xp, shape, dtype)
    else:
        a = array_utils.create_dummy_ndarray(xp, shape, dtype, padding=padding)
    assert isinstance(a, xp.ndarray)
    assert a.shape == shape
    assert a.dtype == xp.dtype(dtype)
    assert a.strides == expected_strides
Ejemplo n.º 7
0
def _create_conv_transpose_args(
        xp, device, x_shape, w_shape, b_shape, stride, pad, outsize,
        float_dtype):
    x = array_utils.create_dummy_ndarray(xp, x_shape, float_dtype)
    w = array_utils.create_dummy_ndarray(xp, w_shape, float_dtype)
    if b_shape is None:
        b = None
    else:
        b = array_utils.create_dummy_ndarray(xp, b_shape, float_dtype)
    return x, w, b, stride, pad, outsize
Ejemplo n.º 8
0
def test_dot_invalid(is_module, xp, device, a_shape, b_shape, dtype):
    # TODO(beam2d): Remove the skip after supporting non-float dot on CUDA
    if device.name == 'cuda:0' and numpy.dtype(dtype).kind != 'f':
        return chainerx.testing.ignore()
    a = array_utils.create_dummy_ndarray(xp, a_shape, dtype)
    b = array_utils.create_dummy_ndarray(xp, b_shape, dtype)
    if is_module:
        return xp.dot(a, b)
    else:
        return a.dot(b)
Ejemplo n.º 9
0
def test_as_grad_stopped_view(shape, float_dtype):
    dtype = float_dtype

    # Stop gradients on all graphs
    with chainerx.backprop_scope('bp1') as bp1, \
            chainerx.backprop_scope('bp2') as bp2, \
            chainerx.backprop_scope('bp3') as bp3:

        a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
        a.require_grad(bp1)
        a.require_grad(bp2)
        assert a.is_grad_required(bp1)
        assert a.is_grad_required(bp2)
        assert a.is_backprop_required(bp1)
        assert a.is_backprop_required(bp2)
        b = a.as_grad_stopped(copy=False)

        chainerx.testing.assert_array_equal_ex(a, b)
        assert b.device is a.device
        assert not b.is_grad_required(bp1)
        assert not b.is_grad_required(bp2)
        assert not b.is_backprop_required(bp1)
        assert not b.is_backprop_required(bp2)

        assert a.is_backprop_required(bp1)
        assert a.is_backprop_required(bp2)

        # Stop gradients on some graphs
        a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
        a.require_grad(bp1)
        a.require_grad(bp2)
        a.require_grad(bp3)
        assert a.is_grad_required(bp1)
        assert a.is_grad_required(bp2)
        assert a.is_grad_required(bp3)
        assert a.is_backprop_required(bp1)
        assert a.is_backprop_required(bp2)
        assert a.is_backprop_required(bp3)
        b = a.as_grad_stopped([bp1, bp2], copy=False)

        chainerx.testing.assert_array_equal_ex(a, b)
        assert b.device is a.device
        assert not b.is_grad_required(bp1)
        assert not b.is_grad_required(bp2)
        assert not b.is_grad_required(bp3)
        assert not b.is_backprop_required(bp1)
        assert not b.is_backprop_required(bp2)
        assert b.is_backprop_required(bp3)

        assert a.is_grad_required(bp1)
        assert a.is_grad_required(bp2)
        assert a.is_grad_required(bp3)
        assert a.is_backprop_required(bp1)
        assert a.is_backprop_required(bp2)
        assert a.is_backprop_required(bp3)
Ejemplo n.º 10
0
def test_cmp_invalid(cmp_op, chx_cmp, a_shape, b_shape):
    def check(x, y):
        with pytest.raises(chainerx.DimensionError):
            cmp_op(x, y)

        with pytest.raises(chainerx.DimensionError):
            chx_cmp(x, y)

    a = array_utils.create_dummy_ndarray(chainerx, a_shape, 'float32')
    b = array_utils.create_dummy_ndarray(chainerx, b_shape, 'float32')
    check(a, b)
    check(b, a)
Ejemplo n.º 11
0
def test_cmp_invalid_dtypes(cmp_op, chx_cmp, numeric_dtype):
    def check(x, y):
        with pytest.raises(chainerx.DtypeError):
            cmp_op(x, y)

        with pytest.raises(chainerx.DtypeError):
            chx_cmp(x, y)

    a = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'bool_')
    b = array_utils.create_dummy_ndarray(chainerx, (2, 3), numeric_dtype)
    check(a, b)
    check(b, a)
Ejemplo n.º 12
0
def _create_conv_args(
        xp, device, x_shape, w_shape, b_shape, stride, pad, cover_all,
        float_dtype):
    x = array_utils.create_dummy_ndarray(xp, x_shape, float_dtype)
    w = array_utils.create_dummy_ndarray(xp, w_shape, float_dtype)
    if b_shape is None:
        b = None
    else:
        b = array_utils.create_dummy_ndarray(xp, b_shape, float_dtype)
    if device.backend.name == 'cuda':  # cover_all is not supported by CUDA.
        cover_all = False
    return x, w, b, stride, pad, cover_all
Ejemplo n.º 13
0
def test_conv_transpose_invalid(
        device, x_shape, w_shape, b_shape, stride, pad, outsize, float_dtype):
    dtype = float_dtype
    x = array_utils.create_dummy_ndarray(chainerx, x_shape, dtype)
    w = array_utils.create_dummy_ndarray(chainerx, w_shape, dtype)
    if b_shape is None:
        b = None
    else:
        b = array_utils.create_dummy_ndarray(chainerx, b_shape, float_dtype)

    with pytest.raises(chainerx.DimensionError):
        chainerx.conv_transpose(x, w, b, stride, pad, outsize)
Ejemplo n.º 14
0
def test_log_softmax(xp, device, a_shape, axis, float_dtype):
    a = array_utils.create_dummy_ndarray(xp, a_shape, float_dtype)
    if xp is numpy:
        # Default is the second axis
        axis = axis if axis is not None else 1
        return a - xp.log(xp.sum(xp.exp(a), axis=axis, keepdims=True))
    return xp.log_softmax(a, axis=axis)
Ejemplo n.º 15
0
def test_iadd_scalar(xp, scalar, device, shape, dtype):
    lhs = array_utils.create_dummy_ndarray(xp, shape, dtype)
    rhs = scalar
    if xp is numpy:
        rhs = numpy.dtype(dtype).type(rhs)
    lhs += rhs
    return lhs
Ejemplo n.º 16
0
def test_frombuffer_with_device(device):
    obj = array_utils.create_dummy_ndarray(
        numpy, (2, 3), 'int32', padding=False)
    a = chainerx.frombuffer(obj, obj.dtype, device=device)
    b = chainerx.frombuffer(obj, obj.dtype)
    chainerx.testing.assert_array_equal_ex(a, b)
    array_utils.check_device(a, device)
Ejemplo n.º 17
0
def test_ascontiguousarray_from_numpy_array(xp, shape, dtype, padding):
    obj = array_utils.create_dummy_ndarray(
        numpy, shape, dtype, padding=padding)
    a = xp.ascontiguousarray(obj)
    if xp is chainerx:
        assert a.is_contiguous
    return a
Ejemplo n.º 18
0
def test_rollaxis_invalid(in_shape, axis, start):
    a = array_utils.create_dummy_ndarray(chainerx, in_shape, 'float32')
    with pytest.raises(chainerx.DimensionError):
        if start is _unspecified:
            chainerx.rollaxis(a, axis)
        else:
            chainerx.rollaxis(a, axis, start)
Ejemplo n.º 19
0
def test_sigmoid(xp, device, shape, float_dtype):
    # TODO(imanishi): Dtype promotion is not supported yet.
    x = array_utils.create_dummy_ndarray(xp, shape, float_dtype)
    if xp is numpy:
        return numpy.reciprocal(1 + numpy.exp(-x))
    else:
        return chainerx.sigmoid(x)
Ejemplo n.º 20
0
def test_itruediv(xp, device, shape, float_dtype):
    # TODO(niboshi): Remove padding=False
    lhs = array_utils.create_dummy_ndarray(
        xp, shape, float_dtype, padding=False)
    rhs = xp.arange(1, lhs.size + 1, dtype=float_dtype).reshape(shape)
    lhs /= rhs
    return lhs
Ejemplo n.º 21
0
def test_itruediv_scalar(xp, scalar, device, shape, float_dtype):
    # TODO(niboshi): Remove padding=False
    lhs = array_utils.create_dummy_ndarray(
        xp, shape, float_dtype, padding=False)
    rhs = scalar
    lhs /= rhs
    return lhs
Ejemplo n.º 22
0
def _create_average_pool_args(
        xp, device, x_shape, ksize, stride, pad, pad_mode, float_dtype):
    x = array_utils.create_dummy_ndarray(xp, x_shape, float_dtype)
    ret_args = dict(x=x, ksize=ksize)
    if stride is not None:
        ret_args['stride'] = stride
    if pad is not None:
        ret_args['pad'] = pad

    if pad_mode is None:
        # chainerx defaults to 'ignore', which is equivalent with
        # pad_value=None in chainer
        if xp is not chainerx:
            ret_args['pad_value'] = None
    else:
        if xp is chainerx:
            ret_args['pad_mode'] = pad_mode
        else:
            if pad_mode == 'zero':
                ret_args['pad_value'] = 0
            elif pad_mode == 'ignore':
                ret_args['pad_value'] = None
            else:
                assert False  # should never reach

    return ret_args
Ejemplo n.º 23
0
def test_sub_scalar(scalar, device, shape, dtype):
    if dtype == 'bool_':
        # Boolean subtract is deprecated.
        return chainerx.testing.ignore()
    x_np = array_utils.create_dummy_ndarray(numpy, shape, dtype)
    # Implicit casting in NumPy's multiply depends on the 'casting' argument,
    # which is not yet supported (ChainerX always casts).
    # Therefore, we explicitly cast the scalar to the dtype of the ndarray
    # before the multiplication for NumPy.
    expected = x_np - numpy.dtype(dtype).type(scalar)
    expected_rev = numpy.dtype(dtype).type(scalar) - x_np

    x = chainerx.array(x_np)
    scalar_chx = chainerx.Scalar(scalar, dtype)
    chainerx.testing.assert_array_equal_ex(x - scalar, expected)
    chainerx.testing.assert_array_equal_ex(x - scalar_chx, expected)
    chainerx.testing.assert_array_equal_ex(scalar - x, expected_rev)
    chainerx.testing.assert_array_equal_ex(scalar_chx - x, expected_rev)
    chainerx.testing.assert_array_equal_ex(
        chainerx.subtract(x, scalar), expected)
    chainerx.testing.assert_array_equal_ex(
        chainerx.subtract(x, scalar_chx), expected)
    chainerx.testing.assert_array_equal_ex(
        chainerx.subtract(scalar, x), expected_rev)
    chainerx.testing.assert_array_equal_ex(
        chainerx.subtract(scalar_chx, x), expected_rev)
Ejemplo n.º 24
0
def test_ascontiguousarray_with_device(device, shape, padding, dtype):
    obj = array_utils.create_dummy_ndarray(
        chainerx, shape, dtype, padding=padding)
    a = chainerx.ascontiguousarray(obj, device=device)
    b = chainerx.ascontiguousarray(obj)
    array_utils.check_device(a, device)
    assert a.is_contiguous
    chainerx.testing.assert_array_equal_ex(a, b)
Ejemplo n.º 25
0
def test_relu(xp, device, shape, dtype):
    if dtype == 'bool_':
        return chainerx.testing.ignore()
    x = array_utils.create_dummy_ndarray(xp, shape, dtype)
    if xp is numpy:
        return numpy.maximum(0, x)
    else:
        return chainerx.relu(x)
Ejemplo n.º 26
0
def test_negative(xp, device, shape, dtype, is_module):
    if dtype == 'bool_':  # Checked in test_invalid_bool_neg
        return chainerx.testing.ignore()
    x = array_utils.create_dummy_ndarray(xp, shape, dtype)
    if is_module:
        return xp.negative(x)
    else:
        return -x
Ejemplo n.º 27
0
def test_truediv(xp, device, shape, numeric_dtype, is_module):
    lhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype)
    rhs = xp.arange(1, lhs.size + 1, dtype=numeric_dtype).reshape(shape)
    # TODO(beam2d): Remove astype after supporting correct dtype promotion.
    if is_module:
        return xp.divide(lhs, rhs).astype(numeric_dtype)
    else:
        return (lhs / rhs).astype(numeric_dtype)
Ejemplo n.º 28
0
def test_ascontiguousarray_with_dtype(xp, device, shape, padding, dtype_spec):
    obj = array_utils.create_dummy_ndarray(xp, shape, 'int32', padding=padding)
    if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
        dtype_spec = dtype_spec.name
    a = xp.ascontiguousarray(obj, dtype=dtype_spec)
    if xp is chainerx:
        assert a.is_contiguous
    return a
Ejemplo n.º 29
0
def test_take_list_indices(is_module, xp, shape, indices, axis, device):
    a = array_utils.create_dummy_ndarray(xp, shape, 'float32')

    assert isinstance(indices, list)

    if is_module:
        return xp.take(a, indices, axis)
    else:
        return a.take(indices, axis)
Ejemplo n.º 30
0
def test_asanyarray_from_numpy_subclass_array():
    class Subclass(numpy.ndarray):
        pass
    obj = array_utils.create_dummy_ndarray(
        numpy, (2, 3), 'int32').view(Subclass)
    a = chainerx.asanyarray(obj, dtype='float32')
    e = chainerx.array(obj, dtype='float32', copy=False)
    chainerx.testing.assert_array_equal_ex(e, a)
    assert e.device is a.device
Ejemplo n.º 31
0
def test_logsumexp(xp, device, a_shape, axis, float_dtype, keepdims):
    a = array_utils.create_dummy_ndarray(xp, a_shape, float_dtype)
    if xp is numpy:
        return xp.log(xp.sum(xp.exp(a), axis=axis, keepdims=keepdims))
    return xp.logsumexp(a, axis=axis, keepdims=keepdims)
Ejemplo n.º 32
0
def test_frombuffer_from_numpy_array_with_cuda(device):
    obj = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32')
    with pytest.raises(chainerx.ChainerxError):
        chainerx.frombuffer(obj, obj.dtype)
Ejemplo n.º 33
0
 def generate_inputs(self):
     a = array_utils.create_dummy_ndarray(numpy, self.shape, self.dtype)
     return a,
Ejemplo n.º 34
0
def _array_from_numpy_array_with_dtype(xp, shape, src_dtype, dst_dtype_spec):
    if xp is numpy and isinstance(dst_dtype_spec, chainerx.dtype):
        dst_dtype_spec = dst_dtype_spec.name
    t = array_utils.create_dummy_ndarray(numpy, shape, src_dtype)
    return xp.array(t, dtype=dst_dtype_spec)
Ejemplo n.º 35
0
def test_frombuffer_from_numpy_array_with_offset_count(xp, count, offset):
    obj = array_utils.create_dummy_ndarray(numpy, (3, ), 'int32')
    return xp.frombuffer(obj, obj.dtype, count=count, offset=offset)
Ejemplo n.º 36
0
def test_sum_invalid(is_module, xp, shape, axis, keepdims, dtype):
    a = array_utils.create_dummy_ndarray(xp, shape, dtype)
    if is_module:
        xp.sum(a, axis=axis, keepdims=keepdims)
    else:
        a.sum(axis=axis, keepdims=keepdims)
Ejemplo n.º 37
0
def test_asanyarray_from_numpy_array():
    obj = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32')
    a = chainerx.asanyarray(obj, dtype='float32')
    e = chainerx.array(obj, dtype='float32', copy=False)
    chainerx.testing.assert_array_equal_ex(e, a)
    assert e.device is a.device
Ejemplo n.º 38
0
 def generate_inputs(self):
     in_shape = self.in_shape
     dtype = self.dtype
     a = array_utils.create_dummy_ndarray(numpy, in_shape, dtype)
     return a,
Ejemplo n.º 39
0
def test_flipud_invalid(xp, shape):
    a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
    return xp.flipud(a)
Ejemplo n.º 40
0
 def check(a_shape, b_shape):
     a = array_utils.create_dummy_ndarray(chainerx, a_shape, 'float32')
     with pytest.raises(chainerx.DimensionError):
         a.reshape(b_shape)
Ejemplo n.º 41
0
def test_copyto_invalid_casting():
    a = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'float32')
    b = array_utils.create_dummy_ndarray(chainerx, (3,), 'float32')
    with pytest.raises(ValueError):
        chainerx.copyto(a, b, casting='some_invalid_casting')
Ejemplo n.º 42
0
def test_reshape_invalid_cannot_infer(shape1, shape2):
    a = array_utils.create_dummy_ndarray(chainerx, shape1, 'float32')
    with pytest.raises(chainerx.DimensionError):
        a.reshape(shape2)
Ejemplo n.º 43
0
 def generate_inputs(self):
     a = array_utils.create_dummy_ndarray(numpy, self.shape, 'float32')
     return a,
Ejemplo n.º 44
0
def test_hsplit_invalid(xp, shape, indices_or_sections):
    a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
    return xp.hsplit(a, indices_or_sections)
Ejemplo n.º 45
0
def test_iadd(xp, device, shape, dtype):
    lhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=1)
    rhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=2)
    lhs += rhs
    return lhs
Ejemplo n.º 46
0
def test_flip_invalid(xp, shape, axis):
    a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
    return xp.flip(a, axis)
Ejemplo n.º 47
0
def test_maximum_with_scalar(xp, device, shape, value, signed_dtype):
    a = array_utils.create_dummy_ndarray(xp, shape, signed_dtype)
    return xp.maximum(a, value)
Ejemplo n.º 48
0
def test_expand_dims_invalid(xp, shape, axis):
    with warnings.catch_warnings():
        warnings.simplefilter('error', DeprecationWarning)
        a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
        return xp.expand_dims(a, axis)
Ejemplo n.º 49
0
def test_isub(xp, device, shape, numeric_dtype):
    lhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=1)
    rhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=2)
    lhs -= rhs
    return lhs
Ejemplo n.º 50
0
def test_transpose_invalid_axes(shape, axes):
    a = array_utils.create_dummy_ndarray(chainerx, shape, 'float32')
    with pytest.raises(chainerx.DimensionError):
        chainerx.transpose(a, axes)
    with pytest.raises(chainerx.DimensionError):
        a.transpose(axes)
Ejemplo n.º 51
0
def test_array_from_numpy_array_with_device(shape, device):
    orig = array_utils.create_dummy_ndarray(numpy, (2, ), 'float32')
    a = chainerx.array(orig, device=device)
    b = chainerx.array(orig)
    chainerx.testing.assert_array_equal_ex(a, b)
    array_utils.check_device(a, device)
Ejemplo n.º 52
0
def test_repeat_invalid(xp, shape, repeats, axis):
    a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
    return xp.repeat(a, repeats, axis)
Ejemplo n.º 53
0
def test_copy(xp, shape, dtype, device, is_module):
    a = array_utils.create_dummy_ndarray(xp, shape, dtype)
    if is_module:
        return xp.copy(a)
    else:
        return a.copy()
Ejemplo n.º 54
0
def test_log_softmax_invalid(device, a_shape, axis, float_dtype):
    a = array_utils.create_dummy_ndarray(chainerx, a_shape, float_dtype)
    with pytest.raises(chainerx.DimensionError):
        return chainerx.log_softmax(a, axis=axis)
Ejemplo n.º 55
0
def test_frombuffer_from_numpy_array_with_noncontiguous(xp):
    obj = array_utils.create_dummy_ndarray(numpy, (2, 3),
                                           'int32',
                                           padding=True)
    return xp.frombuffer(obj, obj.dtype)
Ejemplo n.º 56
0
def test_logsumexp_invalid(device, a_shape, axis, float_dtype, keepdims):
    a = array_utils.create_dummy_ndarray(chainerx, a_shape, float_dtype)
    with pytest.raises(chainerx.DimensionError):
        chainerx.logsumexp(a, axis=axis, keepdims=keepdims)
Ejemplo n.º 57
0
def test_swap_invalid(xp, shape, axis1, axis2):
    a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
    return xp.swapaxes(a, axis1, axis2)
Ejemplo n.º 58
0
def test_where_invalid_shapes(xp, cond_shape, x_shape, y_shape):
    x = array_utils.create_dummy_ndarray(xp, x_shape, 'float32')
    y = array_utils.create_dummy_ndarray(xp, y_shape, 'float32')
    c = array_utils.create_dummy_ndarray(xp, cond_shape, 'float32')
    return xp.where(c, x, y)
Ejemplo n.º 59
0
def test_to_numpy_non_contiguous(shape, dtype, device, copy):
    a_chx = array_utils.create_dummy_ndarray(chainerx, shape, dtype).T
    a_np = chainerx.to_numpy(a_chx, copy)
    _check_to_numpy(a_np, a_chx, device, copy)
Ejemplo n.º 60
0
def test_as_grad_stopped_copy(shape, float_dtype):
    dtype = float_dtype

    def check(array_a, array_b):
        chainerx.testing.assert_array_equal_ex(array_a,
                                               array_b,
                                               strides_check=False)

        assert array_b.is_contiguous

        # Check memory addresses only if >0 bytes are allocated
        if array_a.size > 0:
            assert (array_a._debug_data_memory_address !=
                    array_b._debug_data_memory_address)

    # Stop gradients on all graphs
    with chainerx.backprop_scope('bp1') as bp1, \
            chainerx.backprop_scope('bp2') as bp2, \
            chainerx.backprop_scope('bp3') as bp3:

        a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
        a.require_grad(bp1)
        a.require_grad(bp2)
        assert a.is_grad_required(bp1)
        assert a.is_grad_required(bp2)
        assert a.is_backprop_required(bp1)
        assert a.is_backprop_required(bp2)
        b = a.as_grad_stopped(copy=True)

        check(a, b)
        assert not b.is_grad_required(bp1)
        assert not b.is_grad_required(bp2)
        assert not b.is_backprop_required(bp1)
        assert not b.is_backprop_required(bp2)

        assert a.is_grad_required(bp1)
        assert a.is_grad_required(bp2)
        assert a.is_backprop_required(bp1)
        assert a.is_backprop_required(bp2)

        # Stop gradients on some graphs
        a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
        a.require_grad(bp1)
        a.require_grad(bp2)
        a.require_grad(bp3)
        assert a.is_grad_required(bp1)
        assert a.is_grad_required(bp2)
        assert a.is_grad_required(bp3)
        assert a.is_backprop_required(bp1)
        assert a.is_backprop_required(bp2)
        assert a.is_backprop_required(bp3)
        b = a.as_grad_stopped([bp1, bp2], copy=True)

        check(a, b)
        assert not b.is_grad_required(bp1)
        assert not b.is_grad_required(bp2)
        assert not b.is_grad_required(bp3)
        assert not b.is_backprop_required(bp1)
        assert not b.is_backprop_required(bp2)
        assert b.is_backprop_required(bp3)

        assert a.is_grad_required(bp1)
        assert a.is_grad_required(bp2)
        assert a.is_grad_required(bp3)
        assert a.is_backprop_required(bp1)
        assert a.is_backprop_required(bp2)
        assert a.is_backprop_required(bp3)