Ejemplo n.º 1
0
def test_clip_inplace():

    shape = (100,100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # test two sided clip
    py_matrix.clip_inplace(py_mat, a_min=0, a_max=1)
    torch_matrix.clip_inplace(torch_mat, a_min=0, a_max=1)

    assert_close(py_mat, torch_mat, "clip_inplace (two-sided)")

    # test lower clip
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_matrix.clip_inplace(py_mat, a_min=0)
    torch_matrix.clip_inplace(torch_mat, a_min=0)

    assert_close(py_mat, torch_mat, "clip_inplace (lower)")

    # test upper clip
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_matrix.clip_inplace(py_mat, a_max=1)
    torch_matrix.clip_inplace(torch_mat, a_max=1)

    assert_close(py_mat, torch_mat, "clip_inplace (upper)")
Ejemplo n.º 2
0
def test_stack():
    # vector
    shape = (100,)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_res = py_matrix.stack([py_mat, py_mat], axis=0)
    torch_res = torch_matrix.stack([torch_mat, torch_mat], axis=0)

    assert_close(py_res, torch_res, "stack: vectors, axis=0")

    py_res = py_matrix.stack([py_mat, py_mat], axis=1)
    torch_res = torch_matrix.stack([torch_mat, torch_mat], axis=1)

    assert_close(py_res, torch_res, "stack: vectors, axis=1")

    # matrix
    shape = (100, 100)

    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_res = py_matrix.stack([py_mat, py_mat], axis=0)
    torch_res = torch_matrix.stack([torch_mat, torch_mat], axis=0)

    assert_close(py_res, torch_res, "stack: matrices, axis=0")

    py_res = py_matrix.stack([py_mat, py_mat], axis=1)
    torch_res = torch_matrix.stack([torch_mat, torch_mat], axis=1)

    assert_close(py_res, torch_res, "stack: matrices, axis=1")
Ejemplo n.º 3
0
def test_fast_energy_distance():
    shape = (1000, 50)
    downsample = 100
    # close distributions
    a_mean, a_scale = 0, 1
    b_mean, b_scale = 0.1, 0.9

    py_rand.set_seed()
    py_a = a_mean + a_scale * py_rand.randn(shape)
    py_b = b_mean + b_scale * py_rand.randn(shape)
    torch_a = torch_matrix.float_tensor(py_a)
    torch_b = torch_matrix.float_tensor(py_b)

    py_dist = py_matrix.fast_energy_distance(py_a,
                                             py_b,
                                             downsample=downsample)

    torch_dist = torch_matrix.fast_energy_distance(torch_a,
                                                   torch_b,
                                                   downsample=downsample)

    # python fast_energy_distance is stochastic even after calling
    # py_rand.set_seed() because it uses the numba random number
    # generator rather than the one in numpy.
    # this test can fail stochastically
    assert py_dist < 0.25, \
    "python energy distance is too big"

    assert torch_dist < 0.25, \
    "torch energy distance is too big"

    # distance distributions
    a_mean, a_scale = 1, 1
    b_mean, b_scale = -1, 1

    py_rand.set_seed()
    py_a = a_mean + a_scale * py_rand.randn(shape)
    py_b = b_mean + b_scale * py_rand.randn(shape)
    torch_a = torch_matrix.float_tensor(py_a)
    torch_b = torch_matrix.float_tensor(py_b)

    py_dist = py_matrix.fast_energy_distance(py_a,
                                             py_b,
                                             downsample=downsample)

    torch_dist = torch_matrix.fast_energy_distance(torch_a,
                                                   torch_b,
                                                   downsample=downsample)

    assert py_dist > 10, \
    "python energy distance is too small"

    assert torch_dist > 10, \
    "torch energy distance is too small"
Ejemplo n.º 4
0
def test_logaddexp():
    shape = (100, 100)

    py_rand.set_seed()
    py_x_1 = py_rand.randn(shape)
    py_x_2 = py_rand.randn(shape)

    torch_x_1 = torch_matrix.float_tensor(py_x_1)
    torch_x_2 = torch_matrix.float_tensor(py_x_2)

    py_y = py_func.logaddexp(py_x_1, py_x_2)
    torch_y = torch_func.logaddexp(torch_x_1, torch_x_2)
    assert_close(py_y, torch_y, "logaddexp")
Ejemplo n.º 5
0
def test_sqrt_div():
    shape = (100,100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape) ** 2

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_sqrt_div = py_matrix.sqrt_div(py_x, py_y)
    torch_sqrt_div = torch_matrix.sqrt_div(torch_x, torch_y)

    assert_close(py_sqrt_div, torch_sqrt_div, "sqrt_div")
Ejemplo n.º 6
0
def test_outer():
    a_shape = (100,)
    b_shape = (100,)

    py_rand.set_seed()
    py_a = py_rand.randn(a_shape)
    py_b = py_rand.randn(b_shape)
    torch_a = torch_matrix.float_tensor(py_a)
    torch_b = torch_matrix.float_tensor(py_b)

    py_res = py_matrix.outer(py_a, py_b)
    torch_res = torch_matrix.outer(torch_a, torch_b)

    assert_close(py_res, torch_res, "outer")
Ejemplo n.º 7
0
def test_tall():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape)

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_res = py_matrix.lesser_equal(py_x, py_y)
    torch_res = torch_matrix.lesser_equal(torch_x, torch_y)

    # overall
    py_all = py_matrix.tall(py_res)
    torch_all = torch_matrix.tall(torch_res)
    assert py_all == torch_all, \
    "python tall != torch tall: overall"

    # axis = 0
    py_all = py_matrix.tall(py_res, axis=0)
    torch_all = torch_matrix.tall(torch_res, axis=0)
    py_torch_all = torch_matrix.to_numpy_array(torch_all)

    assert py_matrix.allclose(py_all, py_torch_all), \
    "python tall != torch tall: (axis-0)"

    # axis = 1
    py_all = py_matrix.tall(py_res, axis=1)
    torch_all = torch_matrix.tall(torch_res, axis=1)
    py_torch_all = torch_matrix.to_numpy_array(torch_all)

    assert py_matrix.allclose(py_all, py_torch_all), \
    "python tall != torch tall: (axis-1)"

    # axis = 0, keepdims
    py_all = py_matrix.tall(py_res, axis=0, keepdims=True)
    torch_all = torch_matrix.tall(torch_res, axis=0, keepdims=True)
    py_torch_all = torch_matrix.to_numpy_array(torch_all)

    assert py_matrix.allclose(py_all, py_torch_all), \
    "python tall != torch tall: (axis-0, keepdims)"

    # axis = 1, keepdims
    py_all = py_matrix.tall(py_res, axis=1, keepdims=True)
    torch_all = torch_matrix.tall(torch_res, axis=1, keepdims=True)
    py_torch_all = torch_matrix.to_numpy_array(torch_all)

    assert py_matrix.allclose(py_all, py_torch_all), \
    "python tall != torch tall: (axis-1, keepdim)"
Ejemplo n.º 8
0
def test_minimum():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape)

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_res = py_matrix.minimum(py_x, py_y)
    torch_res = torch_matrix.minimum(torch_x, torch_y)

    assert_close(py_res, torch_res, "minimum")
Ejemplo n.º 9
0
def test_squared_euclidean_distance():
    shape = (100,)

    py_rand.set_seed()
    py_a = py_rand.randn(shape)
    py_b = py_rand.randn(shape)

    torch_a = torch_matrix.float_tensor(py_a)
    torch_b = torch_matrix.float_tensor(py_b)

    py_dist = py_matrix.squared_euclidean_distance(py_a, py_b)
    torch_dist = torch_matrix.squared_euclidean_distance(torch_a, torch_b)

    assert allclose(py_dist, torch_dist), \
    "squared_euclidean_distance failure"
Ejemplo n.º 10
0
def test_square_mix_inplace():
    shape = (100,100)
    torch_w = 0.1
    py_w = py_matrix.float_scalar(torch_w)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape)

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_matrix.square_mix_inplace(py_w, py_x, py_y)
    torch_matrix.square_mix_inplace(torch_w, torch_x, torch_y)

    assert_close(py_x, torch_x, "square_mix_inplace")
Ejemplo n.º 11
0
def test_tsum():
    shape = (100, 100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # overall tsum
    py_tsum = py_matrix.tsum(py_mat)
    torch_tsum = torch_matrix.tsum(torch_mat)

    assert allclose(py_tsum, torch_tsum), \
    "python overal tsum != torch overall tsum"

    # tsum over axis 0
    py_tsum = py_matrix.tsum(py_mat, axis=0)
    torch_tsum = torch_matrix.tsum(torch_mat, axis=0)
    assert_close(py_tsum, torch_tsum, "tsum (axis-0)")

    # tsum over axis 1
    py_tsum = py_matrix.tsum(py_mat, axis=1)
    torch_tsum = torch_matrix.tsum(torch_mat, axis=1)
    assert_close(py_tsum, torch_tsum, "tsum (axis-1)")

    # tsum over axis 0, keepdims = True
    py_tsum = py_matrix.tsum(py_mat, axis=0, keepdims=True)
    torch_tsum = torch_matrix.tsum(torch_mat, axis=0)
    assert_close(py_tsum, torch_tsum, "tsum (axis-0, keepdims)")

    # tsum over axis 1, keepdims = True
    py_tsum = py_matrix.tsum(py_mat, axis=1, keepdims=True)
    torch_tsum = torch_matrix.tsum(torch_mat, axis=1, keepdims=True)
    assert_close(py_tsum, torch_tsum, "tsum (axis-1, keepdims)")
Ejemplo n.º 12
0
def test_tmax():
    shape = (100, 100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # overall max
    py_max = py_matrix.tmax(py_mat)
    torch_max = torch_matrix.tmax(torch_mat)

    assert allclose(py_max, torch_max), \
    "python overal max != torch overall max"

    # max over axis 0
    py_max = py_matrix.tmax(py_mat, axis=0)
    torch_max = torch_matrix.tmax(torch_mat, axis=0)
    assert_close(py_max, torch_max, "tmax (axis-0)")

    # max over axis 1
    py_max = py_matrix.tmax(py_mat, axis=1)
    torch_max = torch_matrix.tmax(torch_mat, axis=1)
    assert_close(py_max, torch_max, "tmax (axis-1)")

    # max over axis 0, keepdims = True
    py_max = py_matrix.tmax(py_mat, axis=0, keepdims=True)
    torch_max = torch_matrix.tmax(torch_mat, axis=0)
    assert_close(py_max, torch_max, "tmax (axis-0, keepdims)")

    # max over axis 1, keepdims = True
    py_max = py_matrix.tmax(py_mat, axis=1, keepdims=True)
    torch_max = torch_matrix.tmax(torch_mat, axis=1, keepdims=True)
    assert_close(py_max, torch_max, "tmax (axis-1, keepdims)")
Ejemplo n.º 13
0
def test_mean():
    shape = (100, 100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # overall mean
    py_mean = py_matrix.mean(py_mat)
    torch_mean = torch_matrix.mean(torch_mat)

    assert allclose(py_mean, torch_mean), \
    "python overal mean != torch overall mean"

    # mean over axis 0
    py_mean = py_matrix.mean(py_mat, axis=0)
    torch_mean = torch_matrix.mean(torch_mat, axis=0)
    assert_close(py_mean, torch_mean, "mean (axis-0)")

    # mean over axis 1
    py_mean = py_matrix.mean(py_mat, axis=1)
    torch_mean = torch_matrix.mean(torch_mat, axis=1)
    assert_close(py_mean, torch_mean, "mean (axis-1)")

    # mean over axis 0, keepdims = True
    py_mean = py_matrix.mean(py_mat, axis=0, keepdims=True)
    torch_mean = torch_matrix.mean(torch_mat, axis=0)
    assert_close(py_mean, torch_mean, "mean (axis-0, keepdims)")

    # mean over axis 1, keepdims = True
    py_mean = py_matrix.mean(py_mat, axis=1, keepdims=True)
    torch_mean = torch_matrix.mean(torch_mat, axis=1, keepdims=True)
    assert_close(py_mean, torch_mean, "mean (axis-1, keepdims)")
Ejemplo n.º 14
0
def test_var():
    shape = (100, 100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # overall var
    py_var = py_matrix.var(py_mat)
    torch_var = torch_matrix.var(torch_mat)

    assert allclose(py_var, torch_var), \
    "python overal var != torch overall var"

    # var over axis 0
    py_var = py_matrix.var(py_mat, axis=0)
    torch_var = torch_matrix.var(torch_mat, axis=0)
    assert_close(py_var, torch_var, "var (axis-0)")

    # var over axis 1
    py_var = py_matrix.var(py_mat, axis=1)
    torch_var = torch_matrix.var(torch_mat, axis=1)
    assert_close(py_var, torch_var, "var (axis-1)")

    # var over axis 0, keepdims = True
    py_var = py_matrix.var(py_mat, axis=0, keepdims=True)
    torch_var = torch_matrix.var(torch_mat, axis=0)
    assert_close(py_var, torch_var, "var (axis-0, keepdims)")

    # var over axis 1, keepdims = True
    py_var = py_matrix.var(py_mat, axis=1, keepdims=True)
    torch_var = torch_matrix.var(torch_mat, axis=1, keepdims=True)
    assert_close(py_var, torch_var, "var (axis-1, keepdims)")
Ejemplo n.º 15
0
def test_not_equal():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape)

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_neq = py_matrix.not_equal(py_x, py_y)
    torch_neq = torch_matrix.not_equal(torch_x, torch_y)
    py_torch_neq = torch_matrix.to_numpy_array(torch_neq)

    assert py_matrix.allclose(py_neq, py_torch_neq), \
    "python not equal != torch not equal"
Ejemplo n.º 16
0
def test_lesser_equal():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape)

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_res = py_matrix.lesser_equal(py_x, py_y)
    torch_res = torch_matrix.lesser_equal(torch_x, torch_y)
    py_torch_res = torch_matrix.to_numpy_array(torch_res)

    assert py_matrix.allclose(py_res, py_torch_res), \
    "python lesser_equal != torch lesser_equal"
Ejemplo n.º 17
0
def test_greater():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape)

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_res = py_matrix.greater(py_x, py_y)
    torch_res = torch_matrix.greater(torch_x, torch_y)
    py_torch_res = torch_matrix.to_numpy_array(torch_res)

    assert py_matrix.allclose(py_res, py_torch_res), \
    "python greater != torch greater"
Ejemplo n.º 18
0
def test_tprod():
    shape = (100, 100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # overall tprod
    py_tprod = py_matrix.tprod(py_mat)
    torch_tprod = torch_matrix.tprod(torch_mat)

    assert allclose(py_tprod, torch_tprod), \
    "python overal tprod != torch overall tprod"

    # tprod over axis 0
    py_tprod = py_matrix.tprod(py_mat, axis=0)
    torch_tprod = torch_matrix.tprod(torch_mat, axis=0)
    assert_close(py_tprod, torch_tprod, "tprod (axis-0)")

    # tprod over axis 1
    py_tprod = py_matrix.tprod(py_mat, axis=1)
    torch_tprod = torch_matrix.tprod(torch_mat, axis=1)
    assert_close(py_tprod, torch_tprod, "tprod (axis-1)")

    # tprod over axis 0, keepdims = True
    py_tprod = py_matrix.tprod(py_mat, axis=0, keepdims=True)
    torch_tprod = torch_matrix.tprod(torch_mat, axis=0)
    assert_close(py_tprod, torch_tprod, "tprod (axis-0, keepdims)")

    # tprod over axis 1, keepdims = True
    py_tprod = py_matrix.tprod(py_mat, axis=1, keepdims=True)
    torch_tprod = torch_matrix.tprod(torch_mat, axis=1, keepdims=True)
    assert_close(py_tprod, torch_tprod, "tprod (axis-1, keepdims)")
Ejemplo n.º 19
0
def test_conversion():

    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.rand(shape)
    torch_x = torch_matrix.float_tensor(py_x)
    py_torch_x = torch_matrix.to_numpy_array(torch_x)

    assert py_matrix.allclose(py_x, py_torch_x), \
    "python -> torch -> python failure"

    torch_rand.set_seed()
    torch_y = torch_rand.rand(shape)
    py_y = torch_matrix.to_numpy_array(torch_y)
    torch_py_y = torch_matrix.float_tensor(py_y)

    assert torch_matrix.allclose(torch_y, torch_py_y), \
    "torch -> python -> torch failure"
Ejemplo n.º 20
0
def test_reciprocal():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.rand(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.reciprocal(py_x)
    torch_y = torch_func.reciprocal(torch_x)
    assert_close(py_y, torch_y, "reciprocal")
Ejemplo n.º 21
0
def test_acosh():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = 1 + py_rand.rand(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.acosh(py_x)
    torch_y = torch_func.acosh(torch_x)
    assert_close(py_y, torch_y, "acosh")
Ejemplo n.º 22
0
def test_logit():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.rand(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.logit(py_x)
    torch_y = torch_func.logit(torch_x)
    assert_close(py_y, torch_y, "logit")
Ejemplo n.º 23
0
def test_sin():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.sin(py_x)
    torch_y = torch_func.sin(torch_x)
    assert_close(py_y, torch_y, "sin")
Ejemplo n.º 24
0
def test_batch_outer():
    L = 10
    N = 100
    M = 50

    v_shape = (L, N)
    h_shape = (L, M)

    py_rand.set_seed()
    py_v = py_rand.randn(v_shape)
    py_h = py_rand.randn(h_shape)

    torch_v = torch_matrix.float_tensor(py_v)
    torch_h = torch_matrix.float_tensor(py_h)

    py_res = py_matrix.batch_outer(py_v, py_h)
    torch_res = torch_matrix.batch_outer(torch_v, torch_h)

    assert_close(py_res, torch_res, "batch_outer")
Ejemplo n.º 25
0
def test_atanh():
    shape = (100, 100)
    py_rand.set_seed()
    py_x = 2 * py_rand.rand(shape) - 1
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.atanh(py_x)
    torch_y = torch_func.atanh(torch_x)
    # the atanh function is a bit less precise than the others
    # so the tolerance is a bit more flexible
    assert_close(py_y, torch_y, "atanh", rtol=1e-05, atol=1e-07)
Ejemplo n.º 26
0
def test_sign():

    shape = (100,100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_sign = py_matrix.sign(py_mat)
    torch_sign = torch_matrix.sign(torch_mat)
    assert_close(py_sign, torch_sign, "sign")
Ejemplo n.º 27
0
def test_tpow():
    shape = (100, 100)
    power = 3

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.tpow(py_x, power)
    torch_y = torch_func.tpow(torch_x, power)
    assert_close(py_y, torch_y, "tpow")
Ejemplo n.º 28
0
def test_reshape():
    shape = (100,100)
    newshape = (5, 2000)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_new = py_matrix.reshape(py_mat, newshape)
    torch_new = torch_matrix.reshape(torch_mat, newshape)

    assert_close(py_new, torch_new, "reshape")
Ejemplo n.º 29
0
def test_normalize():
    shape = (100,)

    py_rand.set_seed()
    py_x = py_rand.rand(shape)

    torch_x = torch_matrix.float_tensor(py_x)

    py_norm = py_matrix.normalize(py_x)
    torch_norm = torch_matrix.normalize(torch_x)

    assert_close(py_norm, torch_norm, "normalize")
Ejemplo n.º 30
0
def test_norm():
    shape = (100,)

    py_rand.set_seed()
    py_x = py_rand.rand(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_norm = py_matrix.norm(py_x)
    torch_norm = torch_matrix.norm(torch_x)

    assert allclose(py_norm, torch_norm), \
    "python l2 norm != torch l2 norm"