Example #1
0
 def test_double_backward_negative_multi_axis_invert_chainerx(self):
     gy = numpy.ones_like(self.x.sum(axis=(-2, 0))) * self.gy
     self.check_double_backward(
         chainerx.array(self.x),
         chainerx.array(gy),
         chainerx.array(self.ggx),
         axis=(-2, 0))
Example #2
0
    def check_double_backward_chainerx(self, op):
        # TODO(sonots): Support float16
        if self.dtype == numpy.float16:
            raise unittest.SkipTest('ChainerX does not support float16')

        self.check_double_backward(op, chainerx.array(
            self.x), chainerx.array(self.gy), chainerx.array(self.ggy))
Example #3
0
    def test_backward_chainerx(self):
        # TODO(sonots): Support float16
        if self.dtype == numpy.float16:
            raise unittest.SkipTest('ChainerX does not support float16')

        self.check_backward(
            chainerx.array(self.data), chainerx.array(self.grad))
Example #4
0
def test_array_grad_without_backprop_id(backprop_args):
    array = chainerx.array([1., 1., 1.], chainerx.float32)
    grad = chainerx.array([0.5, 0.5, 0.5], chainerx.float32)

    with pytest.raises(chainerx.ChainerxError):
        array.get_grad(*backprop_args)
    with pytest.raises(chainerx.ChainerxError):
        array.set_grad(grad, *backprop_args)
    with pytest.raises(chainerx.ChainerxError):
        array.cleargrad(*backprop_args)

    # Gradient methods
    array.require_grad().set_grad(grad, *backprop_args)
    assert array.get_grad(*backprop_args) is not None
    assert array.get_grad(
        *backprop_args)._debug_flat_data == grad._debug_flat_data

    array.cleargrad(*backprop_args)  # clear
    assert array.get_grad(*backprop_args) is None

    array.set_grad(grad, *backprop_args)
    assert array.get_grad(*backprop_args) is not None
    assert array.get_grad(
        *backprop_args)._debug_flat_data == grad._debug_flat_data

    array.set_grad(None, *backprop_args)  # clear
    assert array.get_grad(*backprop_args) is None

    # Gradient attributes
    array.grad = grad
    assert array.get_grad(*backprop_args) is not None
    assert array.get_grad(*backprop_args) is array.grad

    array.grad = None  # clear
    assert array.get_grad(*backprop_args) is None
Example #5
0
def test_assert_allclose(shape, transpose, dtype1, dtype2):
    atol = 1e-3 if numpy.dtype('float16') in [dtype1, dtype2] else 1e-5

    np_a = numpy.arange(2, 2 + numpy.prod(shape)).astype(dtype1).reshape(shape)
    if transpose:
        np_b = numpy.empty(np_a.T.shape, dtype=dtype2).T
        np_b[:] = np_a
    else:
        np_b = numpy.arange(2, 2 + numpy.prod(shape)
                            ).astype(dtype2).reshape(shape)

    # Give some perturbation only if dtype is float
    if np_a.dtype.kind in ('f', 'c'):
        np_a += atol * 1e-1
    if np_b.dtype.kind in ('f', 'c'):
        np_b -= atol * 1e-1

    chx_a = chainerx.array(np_a)
    chx_b = chainerx.array(np_b)

    # Test precondition checks
    assert np_a.shape == np_b.shape
    if transpose:
        assert np_a.strides != np_b.strides, 'transpose=True is meaningless'

    # Test checks
    chainerx.testing.assert_allclose(np_a, np_a, atol=atol)  # np-np (same obj)
    chainerx.testing.assert_allclose(
        chx_a, chx_a, atol=atol)  # chx-chx (same obj)
    chainerx.testing.assert_allclose(
        np_a, np_b, atol=atol)  # np-np (diff. obj)
    chainerx.testing.assert_allclose(
        chx_a, chx_b, atol=atol)  # chx-chx (diff. obj)
    chainerx.testing.assert_allclose(np_a, chx_b, atol=atol)  # np-chx
    chainerx.testing.assert_allclose(chx_a, np_b, atol=atol)  # chx-np
Example #6
0
 def test_double_backward_multi_axis_chainerx(self):
     gy = numpy.ones_like(self.x.sum(axis=(0, 1))) * self.gy
     self.check_double_backward(
         chainerx.array(self.x),
         chainerx.array(gy),
         chainerx.array(self.ggx),
         axis=(0, 1))
Example #7
0
def test_linear(device, x_shape, w_shape, b_shape, n_batch_axes, dtype):
    # TODO(imanishi): Remove the skip after supporting non-float dot on CUDA
    if device.name == 'cuda:0' and numpy.dtype(dtype).kind != 'f':
        return chainerx.testing.ignore()
    x = array_utils.create_dummy_ndarray(numpy, x_shape, dtype)
    w = array_utils.create_dummy_ndarray(numpy, w_shape, dtype)
    b = (None if b_shape in (None, Unspecified)
         else array_utils.create_dummy_ndarray(numpy, b_shape, dtype))

    # Calculate chainerx_out
    chainerx_x = chainerx.array(x)
    chainerx_w = chainerx.array(w)
    chainerx_b = chainerx.array(b) if b is not None else None
    if b_shape is Unspecified:
        chainerx_out = chainerx.linear(chainerx_x, chainerx_w)
    elif n_batch_axes is Unspecified:
        chainerx_out = chainerx.linear(chainerx_x, chainerx_w, chainerx_b)
    else:
        chainerx_out = chainerx.linear(chainerx_x, chainerx_w, chainerx_b,
                                       n_batch_axes)

    # Calculate numpy_out
    if n_batch_axes is Unspecified:
        n_batch_axes = 1
    out_shape = x_shape[:n_batch_axes] + (w_shape[0],)
    x = x.reshape(numpy.prod(x_shape[:n_batch_axes]),
                  numpy.prod(x_shape[n_batch_axes:]))
    numpy_out = x.dot(w.T).reshape(out_shape)
    if b is not None:
        numpy_out += b

    chainerx.testing.assert_array_equal(chainerx_out, numpy_out)
Example #8
0
def test_array_require_grad_multiple_graphs_forward():
    x1 = chainerx.array([1, 1, 1], chainerx.float32)
    x2 = chainerx.array([1, 1, 1], chainerx.float32)

    with chainerx.backprop_scope('bp1') as bp1, \
            chainerx.backprop_scope('bp2') as bp2, \
            chainerx.backprop_scope('bp3') as bp3:

        x1.require_grad(bp1)
        x2.require_grad(bp2)

        assert x1.is_grad_required(bp1)
        assert x2.is_grad_required(bp2)
        assert x1.is_backprop_required(bp1)
        assert x2.is_backprop_required(bp2)

        assert not x1.is_grad_required(bp2)
        assert not x2.is_grad_required(bp1)
        assert not x1.is_backprop_required(bp2)
        assert not x2.is_backprop_required(bp1)

        y = x1 * x2

        assert not y.is_grad_required(bp1)
        assert not y.is_grad_required(bp2)
        assert y.is_backprop_required(bp1)
        assert y.is_backprop_required(bp2)

        # No unspecified graphs are generated
        assert not y.is_backprop_required(None)
        assert not y.is_backprop_required(bp3)
Example #9
0
    def test_matmul_backward_chainerx(self):
        # TODO(sonots): Support it
        if numpy.float16 in [self.x1_dtype, self.x2_dtype]:
            raise unittest.SkipTest('ChainerX does not support float16')

        self.check_backward(
            chainerx.array(self.x1), chainerx.array(self.x2),
            chainerx.array(self.gy), atol=1e-2, rtol=1e-2)
Example #10
0
 def test_double_backward_negative_axis_chainerx(self):
     for i in range(self.x.ndim):
         gy = numpy.ones_like(self.x.sum(axis=-1)) * self.gy
         self.check_double_backward(
             chainerx.array(self.x),
             chainerx.array(gy),
             chainerx.array(self.ggx),
             axis=-1)
Example #11
0
def _check_backward_binary(fprop):
    chainerx.check_backward(
        fprop,
        (chainerx.array([1, -2, 1], chainerx.float32).require_grad(),
         chainerx.array([0, 1, 2], chainerx.float32).require_grad()),
        (chainerx.array([1, -2, 3], chainerx.float32),),
        (chainerx.full((3,), 1e-3, chainerx.float32),
         chainerx.full((3,), 1e-3, chainerx.float32)),
    )
Example #12
0
def test_array_deepcopy(device):
    arr = chainerx.array([1, 2], chainerx.float32, device=device)
    arr2 = copy.deepcopy(arr)

    assert isinstance(arr2, chainerx.ndarray)
    assert arr2.device is device
    assert arr2.dtype == chainerx.float32
    chainerx.testing.assert_array_equal(
        arr2,
        chainerx.array([1, 2], chainerx.float32))
Example #13
0
def _check_backward_unary(fprop):
    x = chainerx.array([1, 2, 1], chainerx.float32)
    x.require_grad()

    chainerx.check_backward(
        fprop,
        (x,),
        (chainerx.array([0, -2, 1], chainerx.float32),),
        (chainerx.full((3,), 1e-3, chainerx.float32),),
    )
Example #14
0
 def __init__(self, in_channels, out_channels, ksize, stride, pad,
              initialW=None, nobias=False, groups=1):
     W_shape = out_channels, int(in_channels / groups), ksize, ksize
     self.W = chx.array(np.random.normal(size=W_shape).astype(np.float32))
     if nobias:
         self.b = None
     else:
         self.b = chx.array(np.random.normal(
             size=out_channels).astype(np.float32))
     self.stride = stride
     self.pad = pad
Example #15
0
def test_array_pickle(device):
    arr = chainerx.array([1, 2], chainerx.float32, device=device)
    s = pickle.dumps(arr)
    del arr

    arr2 = pickle.loads(s)
    assert isinstance(arr2, chainerx.ndarray)
    assert arr2.device is device
    assert arr2.dtype == chainerx.float32
    chainerx.testing.assert_array_equal(
        arr2,
        chainerx.array([1, 2], chainerx.float32))
Example #16
0
def _array_to_chainerx(array, device=None):
    # If device is None, appropriate device is chosen according to the input
    # arrays.
    assert device is None or isinstance(device, chainerx.Device)

    if array is None:
        return None

    if array.dtype not in chainerx.all_dtypes:
        raise TypeError(
            'Dtype {} is not supported in ChainerX.'.format(array.dtype.name))

    if isinstance(array, chainerx.ndarray):
        if device is None:
            return array
        if device is array.device:
            return array
        return array.to_device(device)
    if isinstance(array, numpy.ndarray):
        if device is None:
            device = chainerx.get_device('native', 0)
        return chainerx.array(array, device=device, copy=False)
    if isinstance(array, cuda.ndarray):
        if device is None:
            device = chainerx.get_device('cuda', array.device.id)
        elif device.backend.name != 'cuda':
            # cupy to non-cuda backend
            # TODO(niboshi): Remove conversion to numpy when both CuPy and
            # ChainerX support the array interface.
            array = _cpu._to_cpu(array)
            return chainerx.array(array, device=device, copy=False)
        elif device.index != array.device.id:
            # cupy to cuda backend but different device
            array = cuda.to_gpu(array, device=device.index)
        # cupy to cuda backend with the same device
        return chainerx._core._fromrawpointer(
            array.data.mem.ptr,
            array.shape,
            array.dtype,
            array.strides,
            device,
            array.data.ptr - array.data.mem.ptr,
            array)
    if isinstance(array, intel64.mdarray):
        return _array_to_chainerx(numpy.array(array), device)
    if numpy.isscalar(array):
        return chainerx.asarray(array)

    raise TypeError(
        'Array cannot be converted into chainerx.ndarray'
        '\nActual type: {0}.'.format(type(array)))
Example #17
0
def test_array_grad_no_deepcopy():
    dtype = chainerx.float32
    array = chainerx.array([2, 5, 1], dtype)
    grad = chainerx.array([5, 7, 8], dtype)

    # Set grad
    array.require_grad().set_grad(grad)

    # Retrieve grad twice and assert they share the same underlying data
    grad1 = array.get_grad()
    grad2 = array.get_grad()

    grad1 *= chainerx.array([2, 2, 2], dtype)
    assert grad2._debug_flat_data == [
        10, 14, 16], 'grad getter must not incur a copy'
Example #18
0
    def forward_chainerx(self, inputs):
        x, gamma, beta = inputs

        running_mean = chainerx.array(self.running_mean, copy=True)
        running_var = chainerx.array(self.running_var, copy=True)

        y = chainerx.batch_norm(
            x, gamma, beta, running_mean=running_mean, running_var=running_var,
            **self.optional_args)

        # Record running values for later checks.
        self.running_mean_chx = running_mean
        self.running_var_chx = running_var

        return y,
Example #19
0
def test_array_backward():
    with chainerx.backprop_scope('bp1') as bp1:
        x1 = chainerx.array(
            [1, 1, 1], chainerx.float32).require_grad(backprop_id=bp1)
        x2 = chainerx.array(
            [1, 1, 1], chainerx.float32).require_grad(backprop_id=bp1)
        y = x1 * x2

        y.backward(backprop_id=bp1, enable_double_backprop=True)
        gx1 = x1.get_grad(backprop_id=bp1)
        x1.set_grad(None, backprop_id=bp1)

        gx1.backward(backprop_id=bp1)
        with pytest.raises(chainerx.ChainerxError):
            gx1.get_grad(backprop_id=bp1)
Example #20
0
def test_array_require_grad_with_backprop_id():
    array = chainerx.array([1, 1, 1], chainerx.float32)

    with chainerx.backprop_scope('bp1') as bp1:
        assert not array.is_backprop_required(bp1)
        array.require_grad(bp1)
        assert array.is_grad_required(bp1)
        assert array.is_backprop_required(bp1)

        # Repeated calls should not fail, but do nothing
        array.require_grad(bp1)
        assert array.is_grad_required(bp1)
        assert array.is_backprop_required(bp1)

    # keyword arguments
    with chainerx.backprop_scope('bp2') as bp2:
        assert not array.is_backprop_required(backprop_id=bp2)
        array.require_grad(backprop_id=bp2)
        assert array.is_grad_required(bp2)
        assert array.is_grad_required(backprop_id=bp2)
        assert array.is_backprop_required(bp2)
        assert array.is_backprop_required(backprop_id=bp2)

        # Repeated calls should not fail, but do nothing
        array.require_grad(backprop_id=bp2)
        assert array.is_grad_required(backprop_id=bp2)
        assert array.is_backprop_required(backprop_id=bp2)
Example #21
0
def test_array_cleargrad():
    dtype = chainerx.float32
    array = chainerx.array([2, 5, 1], dtype)
    grad = chainerx.array([5, 7, 8], dtype)

    # Set grad, get it and save it
    array.require_grad().set_grad(grad)
    del grad
    saved_grad = array.get_grad()

    # Clear grad
    array.cleargrad()
    assert array.get_grad() is None

    assert saved_grad._debug_flat_data == [
        5, 7, 8], 'Clearing grad must not affect previously retrieved grad'
Example #22
0
def new_linear_params(n_in, n_out):
    W = np.random.randn(n_out, n_in).astype(
        np.float32)  # TODO(beam2d): not supported in chx
    W /= np.sqrt(n_in)  # TODO(beam2d): not supported in chx
    W = chx.array(W)
    b = chx.zeros(n_out, dtype=chx.float32)
    return W, b
Example #23
0
def test_max_pool_invalid(
        device, x_shape, ksize, stride, pad, cover_all, float_dtype):
    x = numpy.random.uniform(-1, 1, x_shape).astype(float_dtype)
    x = chainerx.array(x)
    with pytest.raises(chainerx.DimensionError):
        chainerx.max_pool(
            x, ksize=ksize, stride=stride, pad=pad, cover_all=cover_all)
Example #24
0
def test_sub_scalar(scalar, device, shape, dtype):
    if dtype == 'bool_':
        # Boolean subtract is deprecated.
        return chainerx.testing.ignore()
    x_np = array_utils.create_dummy_ndarray(numpy, shape, dtype)
    # Implicit casting in NumPy's multiply depends on the 'casting' argument,
    # which is not yet supported (ChainerX always casts).
    # Therefore, we explicitly cast the scalar to the dtype of the ndarray
    # before the multiplication for NumPy.
    expected = x_np - numpy.dtype(dtype).type(scalar)
    expected_rev = numpy.dtype(dtype).type(scalar) - x_np

    x = chainerx.array(x_np)
    scalar_chx = chainerx.Scalar(scalar, dtype)
    chainerx.testing.assert_array_equal_ex(x - scalar, expected)
    chainerx.testing.assert_array_equal_ex(x - scalar_chx, expected)
    chainerx.testing.assert_array_equal_ex(scalar - x, expected_rev)
    chainerx.testing.assert_array_equal_ex(scalar_chx - x, expected_rev)
    chainerx.testing.assert_array_equal_ex(
        chainerx.subtract(x, scalar), expected)
    chainerx.testing.assert_array_equal_ex(
        chainerx.subtract(x, scalar_chx), expected)
    chainerx.testing.assert_array_equal_ex(
        chainerx.subtract(scalar, x), expected_rev)
    chainerx.testing.assert_array_equal_ex(
        chainerx.subtract(scalar_chx, x), expected_rev)
Example #25
0
def test_array_repr():
    array = chainerx.array([], chainerx.bool_)
    assert "array([], shape=(0,), dtype=bool, device='native:0')" == str(array)

    array = chainerx.array([False], chainerx.bool_)
    assert ("array([False], shape=(1,), dtype=bool, "
            "device='native:0')" == str(array))

    array = chainerx.array([[0, 1, 2], [3, 4, 5]], chainerx.int8)
    assert ("array([[0, 1, 2],\n"
            "       [3, 4, 5]], shape=(2, 3), dtype=int8, "
            "device='native:0')") == str(array)

    array = chainerx.array([[0, 1, 2], [3.25, 4, 5]], chainerx.float32)
    assert ("array([[0.  , 1.  , 2.  ],\n"
            "       [3.25, 4.  , 5.  ]], shape=(2, 3), dtype=float32, "
            "device='native:0')") == str(array)
Example #26
0
def test_cmp(device, cmp_op, chx_cmp, np_cmp, a_object, b_object, dtype):
    a_np = _to_array_safe(numpy, a_object, dtype)
    b_np = _to_array_safe(numpy, b_object, dtype)
    if a_np is None or b_np is None:
        return

    a_chx = chainerx.array(a_np)
    b_chx = chainerx.array(b_np)

    chainerx.testing.assert_array_equal_ex(
        cmp_op(a_chx, b_chx), cmp_op(a_np, b_np))
    chainerx.testing.assert_array_equal_ex(
        cmp_op(b_chx, a_chx), cmp_op(b_np, a_np))
    chainerx.testing.assert_array_equal_ex(
        chx_cmp(a_chx, b_chx), np_cmp(a_np, b_np))
    chainerx.testing.assert_array_equal_ex(
        chx_cmp(b_chx, a_chx), np_cmp(b_np, a_np))
Example #27
0
def test_average_pool_invalid(
        device, x_shape, ksize, stride, pad, pad_mode, float_dtype):
    x = numpy.random.uniform(-1, 1, x_shape).astype(float_dtype)
    x = chainerx.array(x)
    pad_mode_kwargs = _get_pad_mode_kwargs(pad_mode, True)
    with pytest.raises(chainerx.DimensionError):
        chainerx.average_pool(
            x, ksize=ksize, stride=stride, pad=pad, **pad_mode_kwargs)
Example #28
0
def test_array_to_numpy_identity(device, slice1, slice2):
    start1, end1, step1 = slice1
    start2, end2, step2 = slice2
    x = numpy.arange(1500).reshape((30, 50))[
        start1:end1:step1, start2:end2:step2]
    y = chainerx.array(x)
    z = chainerx.to_numpy(y)
    chainerx.testing.assert_array_equal_ex(x, y, strides_check=False)
    chainerx.testing.assert_array_equal_ex(x, z, strides_check=False)
Example #29
0
def test_array_repr():
    array = chainerx.array([], chainerx.bool_)
    assert ('array([], shape=(0,), dtype=bool, '
            'device=\'native:0\')' == str(array))

    array = chainerx.array([False], chainerx.bool_)
    assert ('array([False], shape=(1,), dtype=bool, '
            'device=\'native:0\')' == str(array))

    array = chainerx.array([[0, 1, 2], [3, 4, 5]], chainerx.int8)
    assert ('array([[0, 1, 2],\n'
            '       [3, 4, 5]], shape=(2, 3), dtype=int8, '
            'device=\'native:0\')') == str(array)

    array = chainerx.array([[0, 1, 2], [3.25, 4, 5]], chainerx.float32)
    assert ('array([[0.  , 1.  , 2.  ],\n'
            '       [3.25, 4.  , 5.  ]], shape=(2, 3), dtype=float32, '
            'device=\'native:0\')') == str(array)
Example #30
0
def test_asanyarray_from_numpy_subclass_array():
    class Subclass(numpy.ndarray):
        pass
    obj = array_utils.create_dummy_ndarray(
        numpy, (2, 3), 'int32').view(Subclass)
    a = chainerx.asanyarray(obj, dtype='float32')
    e = chainerx.array(obj, dtype='float32', copy=False)
    chainerx.testing.assert_array_equal_ex(e, a)
    assert e.device is a.device
Example #31
0
def test_frombuffer_from_device_buffer(device):
    dtype = 'int32'

    device_buffer = chainerx.testing._DeviceBuffer([1, 2, 3, 4, 5, 6], (2, 3),
                                                   dtype)
    a = chainerx.frombuffer(device_buffer, dtype)
    e = chainerx.array([1, 2, 3, 4, 5, 6], dtype)

    chainerx.testing.assert_array_equal_ex(e, a)
    assert a.device is chainerx.get_device(device)
Example #32
0
def test_array_grad_identity():
    array = chainerx.array([1., 1., 1.], chainerx.float32)
    grad = chainerx.array([0.5, 0.5, 0.5], chainerx.float32)
    array.require_grad().set_grad(grad)

    assert array.get_grad() is grad, (
        'grad must preserve physical identity')
    assert array.get_grad() is grad, (
        'grad must preserve physical identity in repeated retrieval')

    # array.grad and grad share the same data
    grad += chainerx.array([2, 2, 2], chainerx.float32)
    assert array.get_grad()._debug_flat_data == [
        2.5, 2.5, 2.5], 'A modification to grad must affect array.grad'

    array_grad = array.get_grad()
    array_grad += chainerx.array([1, 1, 1], chainerx.float32)
    assert grad._debug_flat_data == [
        3.5, 3.5, 3.5], 'A modification to array.grad must affect grad'
Example #33
0
def prepare_chainerx_inputs(num_input, config, grad=False):
    device = chainerx.get_default_device()
    dtype = config['dtype']
    inputs = prepare_numpy_inputs(num_input, config)
    inputs = [chainerx.array(i, dtype=dtype) for i in inputs]
    if grad:
        for i in inputs:
            i.require_grad()
    device.synchronize()
    return inputs
Example #34
0
def test_asanyarray_from_chainerx_array(dtype):
    obj = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'int32')
    a = chainerx.asanyarray(obj, dtype=dtype)
    if a.dtype == obj.dtype:
        assert a is obj
    else:
        assert a is not obj
    e = chainerx.array(obj, dtype=dtype, copy=False)
    chainerx.testing.assert_array_equal_ex(e, a)
    assert e.device is a.device
Example #35
0
def test_array_from_chainerx_array(shape, dtype, copy, device):
    t = array_utils.create_dummy_ndarray(chainerx, shape, dtype, device=device)
    a = chainerx.array(t, copy=copy)
    if not copy:
        assert t is a
    else:
        assert t is not a
        chainerx.testing.assert_array_equal_ex(a, t, strides_check=False)
        assert a.device is t.device
        assert a.is_contiguous
Example #36
0
 def chainerx_max_pool():
     y = chainerx.max_pool(**create_args(chainerx))
     # In the case of CUDA, we get huge negative numbers instead of -inf
     # around boundaries.
     # Align them to chainer (native) results.
     if device.backend.name == 'cuda':
         y = chainerx.to_numpy(y)
         y[y < -3.e+34] = -float('inf')
         y = chainerx.array(y)
     return y
Example #37
0
def test_is_chained():
    arr = chainerx.array([1, 2], chainerx.float32)
    with pytest.raises(chainerx.ChainerxError):
        arr._is_chained()

    arr.require_grad()
    assert not arr._is_chained()

    arr2 = 2 * arr
    assert arr2._is_chained()
Example #38
0
def test_asanyarray_from_numpy_subclass_array():
    class Subclass(numpy.ndarray):
        pass

    obj = array_utils.create_dummy_ndarray(numpy, (2, 3),
                                           'int32').view(Subclass)
    a = chainerx.asanyarray(obj, dtype='float32')
    e = chainerx.array(obj, dtype='float32', copy=False)
    chainerx.testing.assert_array_equal_ex(e, a)
    assert e.device is a.device
Example #39
0
def test_asarray_from_numpy_array_with_copy():
    obj = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32')
    a = chainerx.asarray(obj, dtype='float32')
    e = chainerx.array(obj, dtype='float32', copy=False)
    chainerx.testing.assert_array_equal_ex(e, a)
    assert e.device is a.device

    # test buffer is not shared
    a += a
    assert not numpy.array_equal(obj, chainerx.to_numpy(a))
Example #40
0
def test_max_pool_invalid(device, x_shape, ksize, stride, pad, cover_all,
                          float_dtype):
    x = numpy.random.uniform(-1, 1, x_shape).astype(float_dtype)
    x = chainerx.array(x)
    with pytest.raises(chainerx.DimensionError):
        chainerx.max_pool(x,
                          ksize=ksize,
                          stride=stride,
                          pad=pad,
                          cover_all=cover_all)
Example #41
0
def test_correct_double_backward_unary():
    chainerx.check_double_backward(
        lambda xs: (xs[0] * xs[0],),
        (chainerx.array([1, 2, 3], chainerx.float32).require_grad(),),
        (chainerx.ones((3,), chainerx.float32).require_grad(),),
        (chainerx.ones((3,), chainerx.float32),),
        (chainerx.full((3,), 1e-3, chainerx.float32),
         chainerx.full((3,), 1e-3, chainerx.float32)),
        1e-4,
        1e-3,
    )
Example #42
0
def fromiter(iterable, dtype, count=-1, device=None):
    """Constructs a new 1-D array from an iterable object.

    This is currently equivalent to :func:`numpy.fromiter`
    wrapped by :func:`chainerx.array`, given the device argument.

    .. seealso:: :func:`numpy.fromiter`

    """
    return chainerx.array(numpy.fromiter(iterable, dtype=dtype, count=count),
                          device=device)
Example #43
0
def test_average_pool_invalid(device, x_shape, ksize, stride, pad, pad_mode,
                              float_dtype):
    x = numpy.random.uniform(-1, 1, x_shape).astype(float_dtype)
    x = chainerx.array(x)
    pad_mode_kwargs = _get_pad_mode_kwargs(pad_mode, True)
    with pytest.raises(chainerx.DimensionError):
        chainerx.average_pool(x,
                              ksize=ksize,
                              stride=stride,
                              pad=pad,
                              **pad_mode_kwargs)
Example #44
0
def test_truediv_scalar(scalar, device, shape, numeric_dtype):
    x_np = array_utils.create_dummy_ndarray(numpy, shape, numeric_dtype)
    expected = x_np / scalar

    x = chainerx.array(x_np)
    chainerx.testing.assert_array_equal_ex(x / scalar,
                                           expected,
                                           strides_check=False)
    chainerx.testing.assert_array_equal_ex(chainerx.divide(x, scalar),
                                           expected,
                                           strides_check=False)
Example #45
0
def get_mnist(path, name):
    path = pathlib.Path(path)
    x_path = str(path / '{}-images-idx3-ubyte.gz'.format(name))
    y_path = str(path / '{}-labels-idx1-ubyte.gz'.format(name))

    with gzip.open(x_path, 'rb') as fx:
        fx.read(16)  # skip header
        # read/frombuffer is used instead of fromfile because fromfile does not
        # handle gzip file correctly
        x = np.frombuffer(fx.read(), dtype=np.uint8).reshape(-1, 784)

    with gzip.open(y_path, 'rb') as fy:
        fy.read(8)  # skip header
        y = np.frombuffer(fy.read(), dtype=np.uint8)

    assert x.shape[0] == y.shape[0]

    x = x.astype(np.float32)
    x /= 255
    y = y.astype(np.int32)
    return chx.array(x), chx.array(y)
Example #46
0
    def test_scatter_dataset(self):
        n = self.communicator.size

        for shuffle in [True, False]:
            for root in range(self.communicator.size):
                self.check_scatter_dataset([], shuffle, root)
                self.check_scatter_dataset([0], shuffle, root)
                self.check_scatter_dataset(list(range(n)), shuffle, root)
                self.check_scatter_dataset(list(range(n * 5 - 1)), shuffle,
                                           root)

                self.check_scatter_dataset(np.array([]), shuffle, root)
                self.check_scatter_dataset(np.array([0]), shuffle, root)
                self.check_scatter_dataset(np.arange(n), shuffle, root)
                self.check_scatter_dataset(np.arange(n * 5 - 1), shuffle, root)

                self.check_scatter_dataset(chx.array([]), shuffle, root)
                self.check_scatter_dataset(chx.array([0]), shuffle, root)
                self.check_scatter_dataset(chx.arange(n), shuffle, root)
                self.check_scatter_dataset(chx.arange(n * 5 - 1), shuffle,
                                           root)
Example #47
0
def test_cholesky_invalid_not_positive_definite(device):
    _skip_if_native_and_lapack_unavailable(device)

    while True:
        a = numpy.random.random((3, 3)).astype('float32')
        try:
            numpy.linalg.cholesky(a)
        except numpy.linalg.LinAlgError:
            break
    a = chainerx.array(a)
    with pytest.raises(chainerx.ChainerxError):
        chainerx.linalg.cholesky(a)
Example #48
0
    def test_send_obj_chx_gpu(self):
        self.setup()

        rank_next = (self.communicator.rank + 1) % self.communicator.size
        with chainerx.using_device("cuda"):
            chx_array = chainerx.array([0])
            with pytest.raises(ValueError):
                self.communicator.send_obj(chx_array, dest=rank_next)

            chx_array_list = [[0], chainerx.array([1])]
            with pytest.raises(ValueError):
                self.communicator.send_obj(chx_array_list, dest=rank_next)

            chx_array_tuple = (0, chainerx.array([2]))
            with pytest.raises(ValueError):
                self.communicator.send_obj(chx_array_tuple, dest=rank_next)

            chx_array_dict_value = {0: chainerx.array([2])}
            with pytest.raises(ValueError):
                self.communicator.send_obj(chx_array_dict_value,
                                           dest=rank_next)

            chx_array_dict_key = {chainerx.array([2]): 0}
            with pytest.raises(ValueError):
                self.communicator.send_obj(chx_array_dict_key, dest=rank_next)

            chx_array_dict_set = {chainerx.array([2]), 0}
            with pytest.raises(ValueError):
                self.communicator.send_obj(chx_array_dict_set, dest=rank_next)

        self.teardown()
Example #49
0
    def test_collective_obj_chx_gpu(self):
        self.setup()

        test_function_list = [
            self.communicator.gather_obj, self.communicator.bcast_obj,
            self.communicator.allreduce_obj
        ]
        with chainerx.using_device("cuda"):
            for func in test_function_list:
                chx_array = chainerx.array([0])
                with pytest.raises(ValueError):
                    func(chx_array)

                chx_array_list = [[0], chainerx.array([1])]
                with pytest.raises(ValueError):
                    func(chx_array_list)

                chx_array_tuple = (0, chainerx.array([2]))
                with pytest.raises(ValueError):
                    func(chx_array_tuple)

                chx_array_dict_value = {0: chainerx.array([2])}
                with pytest.raises(ValueError):
                    func(chx_array_dict_value)

                chx_array_dict_key = {chainerx.array([2]): 0}
                with pytest.raises(ValueError):
                    func(chx_array_dict_key)

                chx_array_dict_set = {chainerx.array([2]), 0}
                with pytest.raises(ValueError):
                    func(chx_array_dict_set)

        self.teardown()
Example #50
0
def _array_to_chainerx(array, device=None):
    # If device is None, appropriate device is chosen according to the input
    # arrays.
    assert device is None or isinstance(device, chainerx.Device)

    if array is None:
        return None
    if isinstance(array, chainerx.ndarray):
        if device is None:
            return array
        if device is array.device:
            return array
        return array.to_device(device)
    if isinstance(array, numpy.ndarray):
        if device is None:
            device = chainerx.get_device('native', 0)
        return chainerx.array(array, device=device, copy=False)
    if isinstance(array, cuda.ndarray):
        if device is None:
            device = chainerx.get_device('cuda', array.device.id)
        elif device.backend.name != 'cuda':
            # cupy to non-cuda backend
            # TODO(niboshi): Remove conversion to numpy when both CuPy and
            # ChainerX support the array interface.
            array = _cpu._to_cpu(array)
            return chainerx.array(array, device=device, copy=False)
        elif device.index != array.device.id:
            # cupy to cuda backend but different device
            array = cuda.to_gpu(array, device=device.index)
        # cupy to cuda backend with the same device
        return chainerx._core._fromrawpointer(
            array.data.mem.ptr, array.shape, array.dtype, array.strides,
            device, array.data.ptr - array.data.mem.ptr, array)
    if isinstance(array, intel64.mdarray):
        return _array_to_chainerx(numpy.array(array), device)
    if numpy.isscalar(array):
        return chainerx.asarray(array)

    raise TypeError('Array cannot be converted into chainerx.ndarray'
                    '\nActual type: {0}.'.format(type(array)))
Example #51
0
def sample_from_decoder_cell(cell,
                             nb_steps,
                             best=False,
                             keep_attn_values=False,
                             need_score=False):
    """
        Function that sample an output from a conditionalized decoder cell
    """

    with chainer.using_config("train", False), chainer.no_backprop_mode():
        states, logits, attn = cell.get_initial_logits()

        score = 0
        sequences = []
        attn_list = []

        for _ in six.moves.range(nb_steps):
            if keep_attn_values:
                attn_list.append(attn)

            probs = F.softmax(logits)
            if best:
                curr_idx = cell.xp.argmax(probs.data, 1).astype(np.int32)
            else:
                #                 curr_idx = self.xp.empty((mb_size,), dtype = np.int32)
                if cell.xp != np:
                    probs_data = cuda.to_cpu(probs.data)
                else:
                    probs_data = probs.data
                curr_idx = minibatch_sampling(probs_data)
                if cell.xp == chainerx:
                    curr_idx = chainerx.array(curr_idx.astype(np.int32),
                                              dtype=chainerx.int32,
                                              device=logits.array.device)
                elif cell.xp != np:
                    curr_idx = cuda.to_gpu(curr_idx.astype(np.int32))
                else:
                    curr_idx = curr_idx.astype(np.int32)
    #                 for i in six.moves.range(mb_size):
    #                     sampler = chainer.utils.WalkerAlias(probs_data[i])
    #                     curr_idx[i] =  sampler.sample(1)[0]
            if need_score:
                score = score + np.log(
                    cuda.to_cpu(probs.data)[np.arange(cell.mb_size),
                                            cuda.to_cpu(curr_idx)])
            sequences.append(curr_idx)

            previous_word = Variable(curr_idx, requires_grad=False)

            states, logits, attn = cell(states, previous_word)

        return sequences, score, attn_list
Example #52
0
def test_add_scalar(scalar, device, shape, dtype):
    x_np = array_utils.create_dummy_ndarray(numpy, shape, dtype)
    # Implicit casting in NumPy's multiply depends on the 'casting' argument,
    # which is not yet supported (ChainerX always casts).
    # Therefore, we explicitly cast the scalar to the dtype of the ndarray
    # before the multiplication for NumPy.
    expected = x_np + numpy.dtype(dtype).type(scalar)

    x = chainerx.array(x_np)
    chainerx.testing.assert_array_equal_ex(x + scalar, expected)
    chainerx.testing.assert_array_equal_ex(scalar + x, expected)
    chainerx.testing.assert_array_equal_ex(chainerx.add(x, scalar), expected)
    chainerx.testing.assert_array_equal_ex(chainerx.add(scalar, x), expected)
Example #53
0
def evaluate(model, X_test, Y_test, eval_size, batch_size):
    N_test = X_test.shape[0] if eval_size is None else eval_size

    if N_test > X_test.shape[0]:
        raise ValueError('Test size can be no larger than {}'.format(
            X_test.shape[0]))

    with chx.no_backprop_mode():
        total_loss = chx.array(0, dtype=chx.float32)
        num_correct = chx.array(0, dtype=chx.int64)
        for i in range(0, N_test, batch_size):
            x = X_test[i:min(i + batch_size, N_test)]
            t = Y_test[i:min(i + batch_size, N_test)]

            y = model.forward(x)
            total_loss += compute_loss(y, t) * batch_size
            num_correct += (y.argmax(axis=1).astype(t.dtype) == t).astype(
                chx.int32).sum()

    mean_loss = float(total_loss) / N_test
    accuracy = int(num_correct) / N_test
    return mean_loss, accuracy
Example #54
0
def test_power_invalid_bool_dtype(device, dtype, is_bool_rhs,
                                  is_bool_primitive, is_module):
    shape = (3, 2)

    a = chainerx.array(array_utils.uniform(shape, dtype))

    if is_bool_primitive:
        b = True
    else:
        b = chainerx.array(array_utils.uniform(shape, 'bool'))

    with pytest.raises(chainerx.DtypeError):
        if is_module:
            if is_bool_rhs:
                chainerx.power(a, b)
            else:
                chainerx.power(b, a)
        else:
            if is_bool_rhs:
                a**b
            else:
                b**a
Example #55
0
def test_array_grad_with_backprop_id():
    array = chainerx.array([1., 1., 1.], chainerx.float32)
    grad = chainerx.array([0.5, 0.5, 0.5], chainerx.float32)

    with chainerx.backprop_scope('bp1') as bp1:
        with pytest.raises(chainerx.ChainerxError):
            array.get_grad(bp1)
        with pytest.raises(chainerx.ChainerxError):
            array.set_grad(grad, bp1)
        with pytest.raises(chainerx.ChainerxError):
            array.cleargrad(bp1)

        array.require_grad(bp1).set_grad(grad, bp1)
        assert array.get_grad(bp1) is not None
        assert array.get_grad(bp1)._debug_flat_data == grad._debug_flat_data

        array.cleargrad(bp1)  # clear
        assert array.get_grad(bp1) is None

    # keyword arguments
    with chainerx.backprop_scope('bp2') as bp2:
        with pytest.raises(chainerx.ChainerxError):
            array.get_grad(backprop_id=bp2)
        with pytest.raises(chainerx.ChainerxError):
            array.set_grad(grad, backprop_id=bp2)
        with pytest.raises(chainerx.ChainerxError):
            array.cleargrad(backprop_id=bp2)

        array.require_grad(backprop_id=bp2).set_grad(grad, backprop_id=bp2)
        assert array.get_grad(bp2) is not None
        assert array.get_grad(backprop_id=bp2) is not None
        assert array.get_grad(bp2)._debug_flat_data == grad._debug_flat_data
        assert array.get_grad(
            backprop_id=bp2)._debug_flat_data == grad._debug_flat_data

        array.cleargrad(backprop_id=bp2)  # clear
        assert array.get_grad(bp2) is None
        assert array.get_grad(backprop_id=bp2) is None
Example #56
0
def fromfile(file, dtype=float, count=-1, sep='', device=None):
    """Constructs an array from data in a text or binary file.

    This is currently equivalent to :func:`numpy.fromfile`
    wrapped by :func:`chainerx.array`, given the device argument.

    .. seealso:: :func:`numpy.fromfile`

    """
    return chainerx.array(numpy.fromfile(file,
                                         dtype=dtype,
                                         count=count,
                                         sep=sep),
                          device=device)
Example #57
0
def normal(*args, **kwargs):
    """normal(*args, **kwargs, device=None)

    Draws random samples from a normal (Gaussian) distribution.

    This is currently equivalent to :func:`numpy.random.normal`
    wrapped by :func:`chainerx.array`, given the device argument.

    .. seealso:: :func:`numpy.random.normal`
    """
    device = kwargs.pop('device', None)

    a = numpy.random.normal(*args, **kwargs)
    return chainerx.array(a, device=device, copy=False)
Example #58
0
def test_dropout_inference():
    graph = chainer_compiler_core.load(
        os.path.join(ONNX_TEST_DATA, 'node/test_dropout_random/model.onnx'))
    input_names = graph.input_names()
    output_names = graph.output_names()
    assert len(input_names) == 1
    assert len(output_names) == 1

    xcvm = graph.compile()
    input = chainerx.array(np.random.normal(size=(3, 4, 5)))
    inputs = {input_names[0]: chainer_compiler_core.value(input)}
    outputs = xcvm.run(inputs)
    output = outputs[output_names[0]].array()

    assert bool(chainerx.sum(input != output) == 0)
Example #59
0
def test_assert_array_equal(shape, transpose, dtype1, dtype2):
    np_a = numpy.arange(2, 2 + numpy.prod(shape)).astype(dtype1).reshape(shape)
    if transpose:
        np_b = numpy.empty(np_a.T.shape, dtype=dtype2).T
        np_b[:] = np_a
    else:
        np_b = numpy.arange(2, 2 +
                            numpy.prod(shape)).astype(dtype2).reshape(shape)

    chx_a = chainerx.array(np_a)
    chx_b = chainerx.array(np_b)

    # Test precondition checks
    assert np_a.shape == np_b.shape
    if transpose:
        assert np_a.strides != np_b.strides, 'transpose=True is meaningless'

    # Test checks
    chainerx.testing.assert_array_equal(np_a, np_a)  # np-np (same obj)
    chainerx.testing.assert_array_equal(chx_a, chx_a)  # chx-chx (same obj)
    chainerx.testing.assert_array_equal(np_a, np_b)  # np-np (diff. obj)
    chainerx.testing.assert_array_equal(chx_a, chx_b)  # chx-chx (diff. obj)
    chainerx.testing.assert_array_equal(np_a, chx_b)  # np-chx
    chainerx.testing.assert_array_equal(chx_a, np_b)  # chx-np
Example #60
0
def test_array_from_chainerx_array_with_device(
        src_dtype, dst_dtype, copy, device, dst_device_spec):
    t = array_utils.create_dummy_ndarray(
        chainerx, (2,), src_dtype, device=device)
    a = chainerx.array(t, dtype=dst_dtype, copy=copy, device=dst_device_spec)

    dst_device = chainerx.get_device(dst_device_spec)

    if not copy and src_dtype == dst_dtype and device is dst_device:
        assert t is a
    else:
        assert t is not a
        chainerx.testing.assert_array_equal_ex(
            a, t.to_device(dst_device).astype(dst_dtype))
        assert a.dtype == chainerx.dtype(dst_dtype)
        assert a.device is dst_device