Exemple #1
0
def compute_loss(y, t):
    # softmax cross entropy
    score = chx.log_softmax(y, axis=1)
    mask = (t[:, chx.newaxis] == chx.arange(
        10, dtype=t.dtype)).astype(score.dtype)
    # TODO(beam2d): implement mean
    return -(score * mask).sum() * (1 / y.shape[0])
def test_chainerx_to_cupy_noncontiguous():
    dtype = 'float32'
    a_chx = chainerx.arange(12, dtype=dtype, device='cuda:0').reshape(
        (2, 6))[::-1, ::2]
    offset = a_chx.offset

    # test preconditions
    assert offset > 0
    assert not a_chx.is_contiguous

    a_cupy = cupy.ndarray(
        a_chx.shape,
        cupy.dtype(a_chx.dtype.name),
        cupy.cuda.MemoryPointer(
            cupy.cuda.UnownedMemory(a_chx.data_ptr, a_chx.data_size, a_chx, 0),
            offset),
        strides=a_chx.strides,
    )

    assert a_chx.strides == a_cupy.strides
    chainerx.testing.assert_array_equal_ex(a_chx,
                                           a_cupy.get(),
                                           strides_check=False)

    a_cupy[1, 1] = 53

    assert a_chx.strides == a_cupy.strides
    chainerx.testing.assert_array_equal_ex(a_chx,
                                           a_cupy.get(),
                                           strides_check=False)
Exemple #3
0
def test_chainerx_to_cupy_contiguous():
    dtype = 'float32'
    a_chx = chainerx.arange(6, dtype=dtype, device='cuda:0').reshape((2, 3))
    a_cupy = cupy.ndarray(
        a_chx.shape,
        cupy.dtype(a_chx.dtype.name),
        cupy.cuda.MemoryPointer(cupy.cuda.UnownedMemory(
            a_chx.data_ptr + a_chx.offset,
            a_chx.data_size,
            a_chx,
            0), 0),
        strides=a_chx.strides,
    )

    assert a_cupy.device.id == 0
    chainerx.testing.assert_array_equal_ex(a_chx, a_cupy.get())

    # Write to a_cupy
    a_cupy[0, 1] = 8
    chainerx.testing.assert_array_equal_ex(
        a_chx, numpy.array([[0, 8, 2], [3, 4, 5]], dtype))

    # Write to a_chx
    a_chx += 1
    chainerx.testing.assert_array_equal_ex(
        a_cupy.get(), numpy.array([[1, 9, 3], [4, 5, 6]], dtype))
Exemple #4
0
def test_chainerx_to_cupy_noncontiguous():
    dtype = 'float32'
    a_chx = chainerx.arange(
        12, dtype=dtype, device='cuda:0').reshape((2, 6))[::-1, ::2]
    offset = a_chx.offset

    # test preconditions
    assert offset > 0
    assert not a_chx.is_contiguous

    a_cupy = cupy.ndarray(
        a_chx.shape,
        cupy.dtype(a_chx.dtype.name),
        cupy.cuda.MemoryPointer(cupy.cuda.UnownedMemory(
            a_chx.data_ptr,
            a_chx.data_size,
            a_chx,
            0), offset),
        strides=a_chx.strides,
    )

    assert a_chx.strides == a_cupy.strides
    chainerx.testing.assert_array_equal_ex(
        a_chx, a_cupy.get(), strides_check=False)

    a_cupy[1, 1] = 53

    assert a_chx.strides == a_cupy.strides
    chainerx.testing.assert_array_equal_ex(
        a_chx, a_cupy.get(), strides_check=False)
Exemple #5
0
def compute_loss(y, t):
    # softmax cross entropy
    score = chx.log_softmax(y, axis=1)
    mask = (t[:, chx.newaxis] == chx.arange(1000,
                                            dtype=t.dtype)).astype(score.dtype)
    # TODO(beam2d): implement mean
    return -(score * mask).sum() * (1 / y.shape[0])
Exemple #6
0
def test_ascontiguousarray_from_chainerx_array_device():
    with chainerx.using_device(chainerx.get_device('native:0')):
        dev = chainerx.get_device('native:1')  # Non default one
        assert chainerx.get_default_device() is not dev

        a = chainerx.arange(10, device=dev)
        b = chainerx.ascontiguousarray(a)
        assert b.is_contiguous is True
        assert b.device is dev
 def forward_chainerx(self, inputs):
     # TODO(niboshi): Current implementation is only intended to support
     # MNIST example.
     x, t = inputs
     num_classes = x.shape[1]
     score = chainerx.log_softmax(x, axis=1)
     mask = (t[:, chainerx.newaxis] == chainerx.arange(
         num_classes, dtype=t.dtype, device=x.device)).astype(score.dtype)
     # TODO(beam2d): implement mean
     y = -(score * mask).sum() * (1 / x.shape[0])
     return y,
 def forward_chainerx(self, inputs):
     # TODO(niboshi): Current implementation is only intended to support
     # MNIST example.
     x, t = inputs
     num_classes = x.shape[1]
     score = chainerx.log_softmax(x, axis=1)
     mask = (t[:, chainerx.newaxis] == chainerx.arange(
         num_classes, dtype=t.dtype, device=x.device)).astype(score.dtype)
     # TODO(beam2d): implement mean
     y = -(score * mask).sum() * (1 / x.shape[0])
     return y,
Exemple #9
0
    def test_scatter_dataset(self):
        n = self.communicator.size

        for shuffle in [True, False]:
            for root in range(self.communicator.size):
                self.check_scatter_dataset([], shuffle, root)
                self.check_scatter_dataset([0], shuffle, root)
                self.check_scatter_dataset(list(range(n)), shuffle, root)
                self.check_scatter_dataset(list(range(n * 5 - 1)), shuffle,
                                           root)

                self.check_scatter_dataset(np.array([]), shuffle, root)
                self.check_scatter_dataset(np.array([0]), shuffle, root)
                self.check_scatter_dataset(np.arange(n), shuffle, root)
                self.check_scatter_dataset(np.arange(n * 5 - 1), shuffle, root)

                self.check_scatter_dataset(chx.array([]), shuffle, root)
                self.check_scatter_dataset(chx.array([0]), shuffle, root)
                self.check_scatter_dataset(chx.arange(n), shuffle, root)
                self.check_scatter_dataset(chx.arange(n * 5 - 1), shuffle,
                                           root)
Exemple #10
0
def test_getitem_zero_sized_offsets(device):
    a = chainerx.arange(6)

    b = a[3:3]
    # Test pre-conditions.
    assert b.size == 0
    assert b.offset == 12

    # The offset of `c` should be the same as `b` since `b` is empty.
    c = b[2:]
    assert c.size == 0
    assert c.offset == b.offset
Exemple #11
0
def test_getitem_zero_sized_offsets(device):
    a = chainerx.arange(6)

    b = a[3:3]
    # Test pre-conditions.
    assert b.size == 0
    assert b.offset == 12

    # The offset of `c` should be the same as `b` since `b` is empty.
    c = b[2:]
    assert c.size == 0
    assert c.offset == b.offset
def test_chainerx_to_cupy_nondefault_device():
    dtype = 'float32'
    a_chx = chainerx.arange(6, dtype=dtype, device='cuda:1').reshape((2, 3))
    a_cupy = cupy.ndarray(
        a_chx.shape,
        cupy.dtype(a_chx.dtype.name),
        cupy.cuda.MemoryPointer(
            cupy.cuda.UnownedMemory(a_chx.data_ptr + a_chx.offset,
                                    a_chx.data_size, a_chx, -1), 0),
        strides=a_chx.strides,
    )

    assert a_cupy.device.id == 1
    chainerx.testing.assert_array_equal_ex(a_chx, a_cupy.get())
Exemple #13
0
    def forward_chainerx(self, inputs):
        if self.reduce == 'mean' and self.normalize:
            x, t = inputs
            n_classes = x.shape[1]
            score = chainerx.log_softmax(x, axis=1)
            mask = (t[:, chainerx.newaxis] == chainerx.arange(
                n_classes, dtype=t.dtype, device=x.device)).astype(score.dtype)
            y = (score * mask).sum() * (-1 / mask.sum())
            return y,

        x, t = inputs
        y = chainerx.softmax_cross_entropy(x, t)
        if self.reduce == 'mean':
            return y.mean(),
        return y,
Exemple #14
0
def test_chainerx_to_cupy_nondefault_device():
    dtype = 'float32'
    a_chx = chainerx.arange(6, dtype=dtype, device='cuda:1').reshape((2, 3))
    a_cupy = cupy.ndarray(
        a_chx.shape,
        cupy.dtype(a_chx.dtype.name),
        cupy.cuda.MemoryPointer(cupy.cuda.UnownedMemory(
            a_chx.data_ptr + a_chx.offset,
            a_chx.data_size,
            a_chx,
            -1), 0),
        strides=a_chx.strides,
    )

    assert a_cupy.device.id == 1
    chainerx.testing.assert_array_equal_ex(a_chx, a_cupy.get())
def test_chainerx_to_cupy_delete_chainerx_first():
    dtype = 'float32'
    a_chx = chainerx.arange(6, dtype=dtype, device='cuda:0').reshape((2, 3))
    a_cupy = cupy.ndarray(
        a_chx.shape,
        cupy.dtype(a_chx.dtype.name),
        cupy.cuda.MemoryPointer(
            cupy.cuda.UnownedMemory(a_chx.data_ptr + a_chx.offset,
                                    a_chx.data_size, a_chx, 0), 0),
        strides=a_chx.strides,
    )

    del a_chx

    a_cupy += 1
    chainerx.testing.assert_array_equal_ex(
        a_cupy.get(), numpy.array([[1, 2, 3], [4, 5, 6]], dtype))
Exemple #16
0
def test_chainerx_to_cupy_delete_chainerx_first():
    dtype = 'float32'
    a_chx = chainerx.arange(6, dtype=dtype, device='cuda:0').reshape((2, 3))
    a_cupy = cupy.ndarray(
        a_chx.shape,
        cupy.dtype(a_chx.dtype.name),
        cupy.cuda.MemoryPointer(cupy.cuda.UnownedMemory(
            a_chx.data_ptr + a_chx.offset,
            a_chx.data_size,
            a_chx,
            0), 0),
        strides=a_chx.strides,
    )

    del a_chx

    a_cupy += 1
    chainerx.testing.assert_array_equal_ex(
        a_cupy.get(), numpy.array([[1, 2, 3], [4, 5, 6]], dtype))
def test_chainerx_to_cupy_contiguous():
    dtype = 'float32'
    a_chx = chainerx.arange(6, dtype=dtype, device='cuda:0').reshape((2, 3))
    a_cupy = cupy.ndarray(
        a_chx.shape,
        cupy.dtype(a_chx.dtype.name),
        cupy.cuda.MemoryPointer(
            cupy.cuda.UnownedMemory(a_chx.data_ptr + a_chx.offset,
                                    a_chx.data_size, a_chx, 0), 0),
        strides=a_chx.strides,
    )

    assert a_cupy.device.id == 0
    chainerx.testing.assert_array_equal_ex(a_chx, a_cupy.get())

    # Write to a_cupy
    a_cupy[0, 1] = 8
    chainerx.testing.assert_array_equal_ex(
        a_chx, numpy.array([[0, 8, 2], [3, 4, 5]], dtype))

    # Write to a_chx
    a_chx += 1
    chainerx.testing.assert_array_equal_ex(
        a_cupy.get(), numpy.array([[1, 9, 3], [4, 5, 6]], dtype))
Exemple #18
0
 def check(*args, **kwargs):
     a = chainerx.arange(*args, device=device, **kwargs)
     b = chainerx.arange(*args, **kwargs)
     array_utils.check_device(a, device)
     chainerx.testing.assert_array_equal_ex(a, b)
Exemple #19
0
def compute_loss(y, t):
	score = chx.log_softmax(y, axis=1)
	mask = (t[:, chx.newaxis]==chx.arange(10, dtype=t.dtype)).astype(score.dtype)
	return -(score * mask).sum() * (1 / y.shape[0])
Exemple #20
0
 def check(*args, **kwargs):
     a = chainerx.arange(*args, device=device, **kwargs)
     b = chainerx.arange(*args, **kwargs)
     array_utils.check_device(a, device)
     chainerx.testing.assert_array_equal_ex(a, b)
Exemple #21
0
def test_to_numpy_positive_offset(device, copy):
    a_chx = chainerx.arange(6).reshape(2, 3)[:, 1:]
    a_np = chainerx.to_numpy(a_chx, copy)
    _check_to_numpy(a_np, a_chx, device, copy)
Exemple #22
0
def test_to_numpy_positive_offset(device, copy):
    a_chx = chainerx.arange(6).reshape(2, 3)[:, 1:]
    a_np = chainerx.to_numpy(a_chx, copy)
    _check_to_numpy(a_np, a_chx, device, copy)