Пример #1
0
def test_advinc_subtensor1_dtype():
    # Test the mixed dtype case
    shp = (3, 4)
    for dtype1, dtype2 in [
        ("float32", "int8"),
        ("float32", "float64"),
        ("uint64", "int8"),
        ("int64", "uint8"),
        ("float16", "int8"),
        ("float16", "float64"),
        ("float16", "float16"),
    ]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
        yval = np.empty((2, ) + shp[1:], dtype=dtype2)
        yval[:] = 10
        x = shared(xval, name="x")
        y = tensor.tensor(dtype=yval.dtype,
                          broadcastable=(False, ) * len(yval.shape),
                          name="y")
        expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
        f = aesara.function([y], expr, mode=mode_with_gpu)
        assert (sum([
            isinstance(node.op, GpuAdvancedIncSubtensor1_dev20)
            for node in f.maker.fgraph.toposort()
        ]) == 1)
        rval = f(yval)
        rep = xval.copy()
        np.add.at(rep, [[0, 2]], yval)
        assert np.allclose(rval, rep)
Пример #2
0
def test_incsub_f16():
    shp = (3, 3)
    shared = gpuarray_shared_constructor
    xval = np.arange(np.prod(shp), dtype="float16").reshape(shp) + 1
    yval = np.empty((2, ) + shp[1:], dtype="float16")
    yval[:] = 2
    x = shared(xval, name="x")
    y = tensor.tensor(dtype="float16",
                      broadcastable=(False, ) * len(shp),
                      name="y")
    expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
    f = aesara.function([y], expr, mode=mode_with_gpu)
    assert (sum([
        isinstance(node.op, GpuAdvancedIncSubtensor1)
        for node in f.maker.fgraph.toposort()
    ]) == 1)
    rval = f(yval)
    rep = xval.copy()
    np.add.at(rep, [[0, 2]], yval)
    assert np.allclose(rval, rep)

    expr = tensor.inc_subtensor(x[1:], y)
    f = aesara.function([y], expr, mode=mode_with_gpu)
    assert (sum([
        isinstance(node.op, GpuIncSubtensor)
        for node in f.maker.fgraph.toposort()
    ]) == 1)
    rval = f(yval)
    rep = xval.copy()
    rep[1:] += yval
    assert np.allclose(rval, rep)
Пример #3
0
def test_advinc_subtensor1_vector_scalar():
    # Test the case where x is a vector and y a scalar
    shp = (3, )
    for dtype1, dtype2 in [
        ("float32", "int8"),
        ("float32", "float64"),
        ("float16", "int8"),
        ("float16", "float64"),
        ("float16", "float16"),
        ("int8", "int8"),
        ("int16", "int16"),
    ]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
        yval = np.asarray(10, dtype=dtype2)
        x = shared(xval, name="x")
        y = tensor.tensor(dtype=yval.dtype,
                          broadcastable=(False, ) * len(yval.shape),
                          name="y")
        expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
        f = aesara.function([y], expr, mode=mode_with_gpu)

        assert (sum([
            isinstance(
                node.op,
                (GpuAdvancedIncSubtensor1_dev20, GpuAdvancedIncSubtensor1),
            ) for node in f.maker.fgraph.toposort()
        ]) == 1)
        rval = f(yval)
        rep = xval.copy()
        rep[[0, 2]] += yval
        assert np.allclose(rval, rep)
Пример #4
0
def test_advinc_subtensor1():
    # Test the second case in the opt local_gpu_advanced_incsubtensor1
    for shp in [(3, 3), (3, 3, 3)]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype="float32").reshape(shp) + 1
        yval = np.empty((2, ) + shp[1:], dtype="float32")
        yval[:] = 10
        x = shared(xval, name="x")
        y = tensor.tensor(dtype="float32",
                          broadcastable=(False, ) * len(shp),
                          name="y")
        expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
        f = aesara.function([y], expr, mode=mode_with_gpu)
        assert (sum([
            isinstance(node.op, GpuAdvancedIncSubtensor1)
            for node in f.maker.fgraph.toposort()
        ]) == 1)
        rval = f(yval)
        rep = xval.copy()
        np.add.at(rep, [0, 2], yval)
        assert np.allclose(rval, rep)