Example #1
0
def test_tprod():
    shape = (100, 100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # overall tprod
    py_tprod = py_matrix.tprod(py_mat)
    torch_tprod = torch_matrix.tprod(torch_mat)

    assert allclose(py_tprod, torch_tprod), \
    "python overal tprod != torch overall tprod"

    # tprod over axis 0
    py_tprod = py_matrix.tprod(py_mat, axis=0)
    torch_tprod = torch_matrix.tprod(torch_mat, axis=0)
    assert_close(py_tprod, torch_tprod, "tprod (axis-0)")

    # tprod over axis 1
    py_tprod = py_matrix.tprod(py_mat, axis=1)
    torch_tprod = torch_matrix.tprod(torch_mat, axis=1)
    assert_close(py_tprod, torch_tprod, "tprod (axis-1)")

    # tprod over axis 0, keepdims = True
    py_tprod = py_matrix.tprod(py_mat, axis=0, keepdims=True)
    torch_tprod = torch_matrix.tprod(torch_mat, axis=0)
    assert_close(py_tprod, torch_tprod, "tprod (axis-0, keepdims)")

    # tprod over axis 1, keepdims = True
    py_tprod = py_matrix.tprod(py_mat, axis=1, keepdims=True)
    torch_tprod = torch_matrix.tprod(torch_mat, axis=1, keepdims=True)
    assert_close(py_tprod, torch_tprod, "tprod (axis-1, keepdims)")
Example #2
0
def test_tsum():
    shape = (100, 100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # overall tsum
    py_tsum = py_matrix.tsum(py_mat)
    torch_tsum = torch_matrix.tsum(torch_mat)

    assert allclose(py_tsum, torch_tsum), \
    "python overal tsum != torch overall tsum"

    # tsum over axis 0
    py_tsum = py_matrix.tsum(py_mat, axis=0)
    torch_tsum = torch_matrix.tsum(torch_mat, axis=0)
    assert_close(py_tsum, torch_tsum, "tsum (axis-0)")

    # tsum over axis 1
    py_tsum = py_matrix.tsum(py_mat, axis=1)
    torch_tsum = torch_matrix.tsum(torch_mat, axis=1)
    assert_close(py_tsum, torch_tsum, "tsum (axis-1)")

    # tsum over axis 0, keepdims = True
    py_tsum = py_matrix.tsum(py_mat, axis=0, keepdims=True)
    torch_tsum = torch_matrix.tsum(torch_mat, axis=0)
    assert_close(py_tsum, torch_tsum, "tsum (axis-0, keepdims)")

    # tsum over axis 1, keepdims = True
    py_tsum = py_matrix.tsum(py_mat, axis=1, keepdims=True)
    torch_tsum = torch_matrix.tsum(torch_mat, axis=1, keepdims=True)
    assert_close(py_tsum, torch_tsum, "tsum (axis-1, keepdims)")
Example #3
0
def test_stack():
    # vector
    shape = (100,)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_res = py_matrix.stack([py_mat, py_mat], axis=0)
    torch_res = torch_matrix.stack([torch_mat, torch_mat], axis=0)

    assert_close(py_res, torch_res, "stack: vectors, axis=0")

    py_res = py_matrix.stack([py_mat, py_mat], axis=1)
    torch_res = torch_matrix.stack([torch_mat, torch_mat], axis=1)

    assert_close(py_res, torch_res, "stack: vectors, axis=1")

    # matrix
    shape = (100, 100)

    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_res = py_matrix.stack([py_mat, py_mat], axis=0)
    torch_res = torch_matrix.stack([torch_mat, torch_mat], axis=0)

    assert_close(py_res, torch_res, "stack: matrices, axis=0")

    py_res = py_matrix.stack([py_mat, py_mat], axis=1)
    torch_res = torch_matrix.stack([torch_mat, torch_mat], axis=1)

    assert_close(py_res, torch_res, "stack: matrices, axis=1")
Example #4
0
def test_mean():
    shape = (100, 100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # overall mean
    py_mean = py_matrix.mean(py_mat)
    torch_mean = torch_matrix.mean(torch_mat)

    assert allclose(py_mean, torch_mean), \
    "python overal mean != torch overall mean"

    # mean over axis 0
    py_mean = py_matrix.mean(py_mat, axis=0)
    torch_mean = torch_matrix.mean(torch_mat, axis=0)
    assert_close(py_mean, torch_mean, "mean (axis-0)")

    # mean over axis 1
    py_mean = py_matrix.mean(py_mat, axis=1)
    torch_mean = torch_matrix.mean(torch_mat, axis=1)
    assert_close(py_mean, torch_mean, "mean (axis-1)")

    # mean over axis 0, keepdims = True
    py_mean = py_matrix.mean(py_mat, axis=0, keepdims=True)
    torch_mean = torch_matrix.mean(torch_mat, axis=0)
    assert_close(py_mean, torch_mean, "mean (axis-0, keepdims)")

    # mean over axis 1, keepdims = True
    py_mean = py_matrix.mean(py_mat, axis=1, keepdims=True)
    torch_mean = torch_matrix.mean(torch_mat, axis=1, keepdims=True)
    assert_close(py_mean, torch_mean, "mean (axis-1, keepdims)")
Example #5
0
def test_var():
    shape = (100, 100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # overall var
    py_var = py_matrix.var(py_mat)
    torch_var = torch_matrix.var(torch_mat)

    assert allclose(py_var, torch_var), \
    "python overal var != torch overall var"

    # var over axis 0
    py_var = py_matrix.var(py_mat, axis=0)
    torch_var = torch_matrix.var(torch_mat, axis=0)
    assert_close(py_var, torch_var, "var (axis-0)")

    # var over axis 1
    py_var = py_matrix.var(py_mat, axis=1)
    torch_var = torch_matrix.var(torch_mat, axis=1)
    assert_close(py_var, torch_var, "var (axis-1)")

    # var over axis 0, keepdims = True
    py_var = py_matrix.var(py_mat, axis=0, keepdims=True)
    torch_var = torch_matrix.var(torch_mat, axis=0)
    assert_close(py_var, torch_var, "var (axis-0, keepdims)")

    # var over axis 1, keepdims = True
    py_var = py_matrix.var(py_mat, axis=1, keepdims=True)
    torch_var = torch_matrix.var(torch_mat, axis=1, keepdims=True)
    assert_close(py_var, torch_var, "var (axis-1, keepdims)")
Example #6
0
def test_clip_inplace():

    shape = (100,100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # test two sided clip
    py_matrix.clip_inplace(py_mat, a_min=0, a_max=1)
    torch_matrix.clip_inplace(torch_mat, a_min=0, a_max=1)

    assert_close(py_mat, torch_mat, "clip_inplace (two-sided)")

    # test lower clip
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_matrix.clip_inplace(py_mat, a_min=0)
    torch_matrix.clip_inplace(torch_mat, a_min=0)

    assert_close(py_mat, torch_mat, "clip_inplace (lower)")

    # test upper clip
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_matrix.clip_inplace(py_mat, a_max=1)
    torch_matrix.clip_inplace(torch_mat, a_max=1)

    assert_close(py_mat, torch_mat, "clip_inplace (upper)")
Example #7
0
def test_tmax():
    shape = (100, 100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    # overall max
    py_max = py_matrix.tmax(py_mat)
    torch_max = torch_matrix.tmax(torch_mat)

    assert allclose(py_max, torch_max), \
    "python overal max != torch overall max"

    # max over axis 0
    py_max = py_matrix.tmax(py_mat, axis=0)
    torch_max = torch_matrix.tmax(torch_mat, axis=0)
    assert_close(py_max, torch_max, "tmax (axis-0)")

    # max over axis 1
    py_max = py_matrix.tmax(py_mat, axis=1)
    torch_max = torch_matrix.tmax(torch_mat, axis=1)
    assert_close(py_max, torch_max, "tmax (axis-1)")

    # max over axis 0, keepdims = True
    py_max = py_matrix.tmax(py_mat, axis=0, keepdims=True)
    torch_max = torch_matrix.tmax(torch_mat, axis=0)
    assert_close(py_max, torch_max, "tmax (axis-0, keepdims)")

    # max over axis 1, keepdims = True
    py_max = py_matrix.tmax(py_mat, axis=1, keepdims=True)
    torch_max = torch_matrix.tmax(torch_mat, axis=1, keepdims=True)
    assert_close(py_max, torch_max, "tmax (axis-1, keepdims)")
Example #8
0
def test_sin():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.sin(py_x)
    torch_y = torch_func.sin(torch_x)
    assert_close(py_y, torch_y, "sin")
Example #9
0
def test_logit():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.rand(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.logit(py_x)
    torch_y = torch_func.logit(torch_x)
    assert_close(py_y, torch_y, "logit")
Example #10
0
def test_acosh():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = 1 + py_rand.rand(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.acosh(py_x)
    torch_y = torch_func.acosh(torch_x)
    assert_close(py_y, torch_y, "acosh")
Example #11
0
def test_reciprocal():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.rand(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.reciprocal(py_x)
    torch_y = torch_func.reciprocal(torch_x)
    assert_close(py_y, torch_y, "reciprocal")
Example #12
0
def test_tpow():
    shape = (100, 100)
    power = 3

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.tpow(py_x, power)
    torch_y = torch_func.tpow(torch_x, power)
    assert_close(py_y, torch_y, "tpow")
Example #13
0
def test_atanh():
    shape = (100, 100)
    py_rand.set_seed()
    py_x = 2 * py_rand.rand(shape) - 1
    torch_x = torch_matrix.float_tensor(py_x)

    py_y = py_func.atanh(py_x)
    torch_y = torch_func.atanh(torch_x)
    # the atanh function is a bit less precise than the others
    # so the tolerance is a bit more flexible
    assert_close(py_y, torch_y, "atanh", rtol=1e-05, atol=1e-07)
Example #14
0
def test_sign():

    shape = (100,100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_sign = py_matrix.sign(py_mat)
    torch_sign = torch_matrix.sign(torch_mat)
    assert_close(py_sign, torch_sign, "sign")
Example #15
0
def test_fast_energy_distance():
    shape = (1000, 50)
    downsample = 100
    # close distributions
    a_mean, a_scale = 0, 1
    b_mean, b_scale = 0.1, 0.9

    py_rand.set_seed()
    py_a = a_mean + a_scale * py_rand.randn(shape)
    py_b = b_mean + b_scale * py_rand.randn(shape)
    torch_a = torch_matrix.float_tensor(py_a)
    torch_b = torch_matrix.float_tensor(py_b)

    py_dist = py_matrix.fast_energy_distance(py_a,
                                             py_b,
                                             downsample=downsample)

    torch_dist = torch_matrix.fast_energy_distance(torch_a,
                                                   torch_b,
                                                   downsample=downsample)

    # python fast_energy_distance is stochastic even after calling
    # py_rand.set_seed() because it uses the numba random number
    # generator rather than the one in numpy.
    # this test can fail stochastically
    assert py_dist < 0.25, \
    "python energy distance is too big"

    assert torch_dist < 0.25, \
    "torch energy distance is too big"

    # distance distributions
    a_mean, a_scale = 1, 1
    b_mean, b_scale = -1, 1

    py_rand.set_seed()
    py_a = a_mean + a_scale * py_rand.randn(shape)
    py_b = b_mean + b_scale * py_rand.randn(shape)
    torch_a = torch_matrix.float_tensor(py_a)
    torch_b = torch_matrix.float_tensor(py_b)

    py_dist = py_matrix.fast_energy_distance(py_a,
                                             py_b,
                                             downsample=downsample)

    torch_dist = torch_matrix.fast_energy_distance(torch_a,
                                                   torch_b,
                                                   downsample=downsample)

    assert py_dist > 10, \
    "python energy distance is too small"

    assert torch_dist > 10, \
    "torch energy distance is too small"
Example #16
0
def test_reshape():
    shape = (100,100)
    newshape = (5, 2000)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_new = py_matrix.reshape(py_mat, newshape)
    torch_new = torch_matrix.reshape(torch_mat, newshape)

    assert_close(py_new, torch_new, "reshape")
Example #17
0
def test_repeat():
    shape = (100,)
    n_repeats = 5

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_res = py_matrix.repeat(py_x, n_repeats)
    torch_res = torch_matrix.repeat(torch_x, n_repeats)

    assert_close(py_res, torch_res, "repeat")
Example #18
0
def test_normalize():
    shape = (100,)

    py_rand.set_seed()
    py_x = py_rand.rand(shape)

    torch_x = torch_matrix.float_tensor(py_x)

    py_norm = py_matrix.normalize(py_x)
    torch_norm = torch_matrix.normalize(torch_x)

    assert_close(py_norm, torch_norm, "normalize")
Example #19
0
def test_norm():
    shape = (100,)

    py_rand.set_seed()
    py_x = py_rand.rand(shape)
    torch_x = torch_matrix.float_tensor(py_x)

    py_norm = py_matrix.norm(py_x)
    torch_norm = torch_matrix.norm(torch_x)

    assert allclose(py_norm, torch_norm), \
    "python l2 norm != torch l2 norm"
Example #20
0
def test_tround():

    shape = (100,100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_round = py_matrix.tround(py_mat)
    torch_round = torch_matrix.tround(torch_mat)

    assert_close(py_round, torch_round, "tround")
Example #21
0
def test_inv():
    shape = (100, 100)

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_res = py_matrix.inv(py_mat)
    torch_res = torch_matrix.inv(torch_mat)

    # needs a lower tolerance to pass than other tests
    assert_close(py_res, torch_res, "inv", rtol=1e-4, atol=1e-5)
Example #22
0
def test_logaddexp():
    shape = (100, 100)

    py_rand.set_seed()
    py_x_1 = py_rand.randn(shape)
    py_x_2 = py_rand.randn(shape)

    torch_x_1 = torch_matrix.float_tensor(py_x_1)
    torch_x_2 = torch_matrix.float_tensor(py_x_2)

    py_y = py_func.logaddexp(py_x_1, py_x_2)
    torch_y = torch_func.logaddexp(torch_x_1, torch_x_2)
    assert_close(py_y, torch_y, "logaddexp")
Example #23
0
def test_outer():
    a_shape = (100,)
    b_shape = (100,)

    py_rand.set_seed()
    py_a = py_rand.randn(a_shape)
    py_b = py_rand.randn(b_shape)
    torch_a = torch_matrix.float_tensor(py_a)
    torch_b = torch_matrix.float_tensor(py_b)

    py_res = py_matrix.outer(py_a, py_b)
    torch_res = torch_matrix.outer(torch_a, torch_b)

    assert_close(py_res, torch_res, "outer")
Example #24
0
def test_minimum():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape)

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_res = py_matrix.minimum(py_x, py_y)
    torch_res = torch_matrix.minimum(torch_x, torch_y)

    assert_close(py_res, torch_res, "minimum")
Example #25
0
def test_tall():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape)

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_res = py_matrix.lesser_equal(py_x, py_y)
    torch_res = torch_matrix.lesser_equal(torch_x, torch_y)

    # overall
    py_all = py_matrix.tall(py_res)
    torch_all = torch_matrix.tall(torch_res)
    assert py_all == torch_all, \
    "python tall != torch tall: overall"

    # axis = 0
    py_all = py_matrix.tall(py_res, axis=0)
    torch_all = torch_matrix.tall(torch_res, axis=0)
    py_torch_all = torch_matrix.to_numpy_array(torch_all)

    assert py_matrix.allclose(py_all, py_torch_all), \
    "python tall != torch tall: (axis-0)"

    # axis = 1
    py_all = py_matrix.tall(py_res, axis=1)
    torch_all = torch_matrix.tall(torch_res, axis=1)
    py_torch_all = torch_matrix.to_numpy_array(torch_all)

    assert py_matrix.allclose(py_all, py_torch_all), \
    "python tall != torch tall: (axis-1)"

    # axis = 0, keepdims
    py_all = py_matrix.tall(py_res, axis=0, keepdims=True)
    torch_all = torch_matrix.tall(torch_res, axis=0, keepdims=True)
    py_torch_all = torch_matrix.to_numpy_array(torch_all)

    assert py_matrix.allclose(py_all, py_torch_all), \
    "python tall != torch tall: (axis-0, keepdims)"

    # axis = 1, keepdims
    py_all = py_matrix.tall(py_res, axis=1, keepdims=True)
    torch_all = torch_matrix.tall(torch_res, axis=1, keepdims=True)
    py_torch_all = torch_matrix.to_numpy_array(torch_all)

    assert py_matrix.allclose(py_all, py_torch_all), \
    "python tall != torch tall: (axis-1, keepdim)"
Example #26
0
def test_sqrt_div():
    shape = (100,100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape) ** 2

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_sqrt_div = py_matrix.sqrt_div(py_x, py_y)
    torch_sqrt_div = torch_matrix.sqrt_div(torch_x, torch_y)

    assert_close(py_sqrt_div, torch_sqrt_div, "sqrt_div")
Example #27
0
def test_squared_euclidean_distance():
    shape = (100,)

    py_rand.set_seed()
    py_a = py_rand.randn(shape)
    py_b = py_rand.randn(shape)

    torch_a = torch_matrix.float_tensor(py_a)
    torch_b = torch_matrix.float_tensor(py_b)

    py_dist = py_matrix.squared_euclidean_distance(py_a, py_b)
    torch_dist = torch_matrix.squared_euclidean_distance(torch_a, torch_b)

    assert allclose(py_dist, torch_dist), \
    "squared_euclidean_distance failure"
Example #28
0
def test_resample():
    shape = (100, 50)
    n = 10

    py_rand.set_seed()
    py_mat = py_rand.randn(shape)
    torch_mat = torch_matrix.float_tensor(py_mat)

    py_sample = py_matrix.resample(py_mat, n, replace = False)
    torch_sample = torch_matrix.resample(torch_mat, n, replace = False)

    # python backend and pytorch backend do not share a random number generator
    # therefore, we can only test that the shapes are the same
    assert py_matrix.shape(py_sample) == torch_matrix.shape(torch_sample), \
    "python resample shape != torch resample shape"
Example #29
0
def test_lesser_equal():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape)

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_res = py_matrix.lesser_equal(py_x, py_y)
    torch_res = torch_matrix.lesser_equal(torch_x, torch_y)
    py_torch_res = torch_matrix.to_numpy_array(torch_res)

    assert py_matrix.allclose(py_res, py_torch_res), \
    "python lesser_equal != torch lesser_equal"
Example #30
0
def test_greater():
    shape = (100, 100)

    py_rand.set_seed()
    py_x = py_rand.randn(shape)
    py_y = py_rand.randn(shape)

    torch_x = torch_matrix.float_tensor(py_x)
    torch_y = torch_matrix.float_tensor(py_y)

    py_res = py_matrix.greater(py_x, py_y)
    torch_res = torch_matrix.greater(torch_x, torch_y)
    py_torch_res = torch_matrix.to_numpy_array(torch_res)

    assert py_matrix.allclose(py_res, py_torch_res), \
    "python greater != torch greater"