Exemple #1
0
def test_np_uniform():
    types = [None, "float32", "float64"]
    ctx = mx.context.current_context()
    samples = 1000000
    # Generation test
    trials = 8
    num_buckets = 5
    for dtype in types:
        for low, high in [(-100.0, -98.0), (99.0, 101.0)]:
            scale = high - low
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.uniform.ppf(x, loc=low, scale=scale), num_buckets)
            buckets = np.array(buckets, dtype=dtype).tolist()
            probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(num_buckets)]
            generator_mx_np = lambda x: mx.np.random.uniform(low, high, size=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx_np, buckets=buckets, probs=probs, nsamples=samples, nrepeat=trials)

    # Broadcasting test
    params = [
        (1.0, mx.np.ones((4,4)) + 2.0),
        (mx.np.zeros((4,4)) + 1, 2.0),
        (mx.np.zeros((1,4)), mx.np.ones((4,4)) + mx.np.array([1, 2, 3, 4])),
        (mx.np.array([1, 2, 3, 4]), mx.np.ones((2,4,4)) * 5)
    ]
    for dtype in types:
        for low, high in params:
            expect_mean = (low + high) / 2
            expanded_size = (samples,) + expect_mean.shape
            uniform_samples = mx.np.random.uniform(low, high, size=expanded_size, dtype=dtype)
            mx.test_utils.assert_almost_equal(uniform_samples.asnumpy().mean(0), expect_mean.asnumpy(), rtol=0.20, atol=1e-1)
def test_np_gamma():
    types = [None, "float32", "float64"]
    ctx = mx.context.current_context()
    samples = 1000000
    # Generation test
    trials = 8
    num_buckets = 5
    for dtype in types:
        for alpha, beta in [(2.0, 3.0), (0.5, 1.0)]:
            buckets, probs = gen_buckets_probs_with_ppf(
                lambda x: ss.gamma.ppf(x, a=alpha, loc=0, scale=beta),
                num_buckets)
            buckets = np.array(buckets).tolist()

            def generator_mx(x):
                return np.random.gamma(alpha, beta, size=samples,
                                       ctx=ctx).asnumpy()

            verify_generator(generator=generator_mx,
                             buckets=buckets,
                             probs=probs,
                             nsamples=samples,
                             nrepeat=trials)
            generator_mx_same_seed =\
                lambda x: _np.concatenate(
                    [np.random.gamma(alpha, beta, size=(x // 10), ctx=ctx).asnumpy()
                        for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed,
                             buckets=buckets,
                             probs=probs,
                             nsamples=samples,
                             nrepeat=trials)
Exemple #3
0
def test_randint_generator():
    ctx = mx.context.current_context()
    for dtype in ['int32', 'int64']:
        for low, high in [(50000000, 50001000), (-50000100, -50000000),
                          (-500, 199)]:
            scale = high - low
            buckets, probs = gen_buckets_probs_with_ppf(
                lambda x: ss.uniform.ppf(x, loc=low, scale=scale), 5)
            # Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly
            buckets = np.array(buckets, dtype=dtype).tolist()
            probs = [(buckets[i][1] - buckets[i][0]) / float(scale)
                     for i in range(5)]
            generator_mx = lambda x: mx.nd.random.randint(
                low, high, shape=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx,
                             buckets=buckets,
                             probs=probs,
                             nrepeat=100)
            # Scipy uses alpha = 0.01 for testing discrete distribution generator but we are using default alpha=0.05 (higher threshold ensures robustness)
            # Refer - https://github.com/scipy/scipy/blob/9f12af697763fb5f9767d5cb1280ce62456a3974/scipy/stats/tests/test_discrete_basic.py#L45
            generator_mx_same_seed = \
                lambda x: np.concatenate(
                    [mx.nd.random.randint(low, high, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
                        for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed,
                             buckets=buckets,
                             probs=probs,
                             nrepeat=100)
def test_normal_generator():
    ctx = mx.context.current_context()
    samples = 1000000
    # Default success rate is 0.25, so 2 successes of 8 trials will pass.
    trials = 8
    num_buckets = 5
    for dtype in ['float16', 'float32', 'float64']:
        for mu, sigma in [(0.0, 1.0), (1.0, 5.0)]:
            buckets, probs = gen_buckets_probs_with_ppf(
                lambda x: ss.norm.ppf(x, mu, sigma), num_buckets)
            # Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly
            buckets = np.array(buckets, dtype=dtype).tolist()
            probs = [(ss.norm.cdf(buckets[i][1], mu, sigma) -
                      ss.norm.cdf(buckets[i][0], mu, sigma))
                     for i in range(num_buckets)]
            generator_mx = lambda x: mx.nd.random.normal(
                mu, sigma, shape=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx,
                             buckets=buckets,
                             probs=probs,
                             nsamples=samples,
                             nrepeat=trials)
            generator_mx_same_seed =\
                lambda x: np.concatenate(
                    [mx.nd.random.normal(mu, sigma, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
                     for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed,
                             buckets=buckets,
                             probs=probs,
                             nsamples=samples,
                             nrepeat=trials)
def test_np_gumbel():
    samples = 1000000
    # Generation test
    trials = 8
    num_buckets = 5
    for loc, scale in [(0.0, 1.0), (1.0, 5.0)]:
        buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.gumbel_r.ppf(x, loc=loc, scale=scale), num_buckets)
        buckets = np.array(buckets).tolist()
        probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(num_buckets)]
        generator_mx_np = lambda x: mx.np.random.gumbel(loc, scale, size=x).asnumpy()
        verify_generator(generator=generator_mx_np, buckets=buckets, probs=probs, nsamples=samples, nrepeat=trials)
def test_np_exponential():
    samples = 1000000
    # Generation test
    trials = 8
    num_buckets = 5
    for scale in [1.0, 5.0]:
        buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.expon.ppf(x, scale=scale), num_buckets)
        buckets = np.array(buckets, dtype="float32").tolist()
        probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(num_buckets)]
        generator_mx_np = lambda x: mx.np.random.exponential(size=x).asnumpy()
        verify_generator(generator=generator_mx_np, buckets=buckets, probs=probs, nsamples=samples, nrepeat=trials)
Exemple #7
0
def test_exponential_generator():
    ctx = mx.context.current_context()
    for dtype in ['float16', 'float32', 'float64']:
        for scale in [0.1, 1.0]:
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.expon.ppf(x, loc=0, scale=scale), 5)
            generator_mx = lambda x: mx.nd.random.exponential(scale, shape=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
            generator_mx_same_seed = \
                lambda x: np.concatenate(
                    [mx.nd.random.exponential(scale, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
                     for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
def test_uniform_generator():
    ctx = mx.context.current_context()
    for dtype in ['float16', 'float32', 'float64']:
        for low, high in [(-1.0, 1.0), (1.0, 3.0)]:
            print("ctx=%s, dtype=%s, Low=%g, High=%g:" % (ctx, dtype, low, high))
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.uniform.ppf(x, loc=low, scale=high - low), 5)
            generator_mx = lambda x: mx.nd.random.uniform(low, high, shape=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
            generator_mx_same_seed = \
                lambda x: np.concatenate(
                    [mx.nd.random.uniform(low, high, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
                     for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
def test_normal_generator():
    ctx = mx.context.current_context()
    for dtype in ['float16', 'float32', 'float64']:
        for mu, sigma in [(0.0, 1.0), (1.0, 5.0)]:
            print("ctx=%s, dtype=%s, Mu=%g, Sigma=%g:" % (ctx, dtype, mu, sigma))
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.norm.ppf(x, mu, sigma), 5)
            generator_mx = lambda x: mx.nd.random.normal(mu, sigma, shape=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
            generator_mx_same_seed =\
                lambda x: np.concatenate(
                    [mx.nd.random.normal(mu, sigma, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
                     for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
Exemple #10
0
def test_gamma_generator():
    success_rate = 0.05
    ctx = mx.context.current_context()
    for dtype in ['float16', 'float32', 'float64']:
        for kappa, theta in [(0.5, 1.0), (1.0, 5.0)]:
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.gamma.ppf(x, a=kappa, loc=0, scale=theta), 5)
            generator_mx = lambda x: mx.nd.random.gamma(kappa, theta, shape=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx, buckets=buckets, probs=probs, success_rate=success_rate)
            generator_mx_same_seed = \
                lambda x: np.concatenate(
                    [mx.nd.random.gamma(kappa, theta, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
                     for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs, success_rate=success_rate)
def test_normal_generator():
    ctx = mx.context.current_context()
    for dtype in ['float16', 'float32', 'float64']:
        for mu, sigma in [(0.0, 1.0), (1.0, 5.0)]:
            print("ctx=%s, dtype=%s, Mu=%g, Sigma=%g:" % (ctx, dtype, mu, sigma))
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.norm.ppf(x, mu, sigma), 5)
            generator_mx = lambda x: mx.nd.random.normal(mu, sigma, shape=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
            generator_mx_same_seed =\
                lambda x: np.concatenate(
                    [mx.nd.random.normal(mu, sigma, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
                     for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
def test_np_laplace():
    types = [None, "float32", "float64"]
    ctx = mx.context.current_context()
    samples = 1000000
    # Generation test
    trials = 8
    num_buckets = 5
    for dtype in types:
        for loc, scale in [(0.0, 1.0), (1.0, 5.0)]:
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.laplace.ppf(x, loc=loc, scale=scale), num_buckets)
            buckets = np.array(buckets, dtype=dtype).tolist()
            probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(num_buckets)]
            generator_mx_np = lambda x: np.random.laplace(loc, scale, size=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx_np, buckets=buckets, probs=probs, nsamples=samples, nrepeat=trials)
def test_np_normal():
    types = [None, "float32", "float64"]
    device = mx.device.current_device()
    samples = 1000000
    # Generation test
    trials = 8
    num_buckets = 5
    for dtype in types:
        for loc, scale in [(0.0, 1.0), (1.0, 5.0)]:
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.norm.ppf(x, loc=loc, scale=scale), num_buckets)
            buckets = np.array(buckets, dtype=dtype).tolist()
            probs = [(ss.norm.cdf(buckets[i][1], loc, scale) -
                      ss.norm.cdf(buckets[i][0], loc, scale)) for i in range(num_buckets)]
            generator_mx_np = lambda x: mx.np.random.normal(loc, scale, size=x, device=device, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx_np, buckets=buckets, probs=probs, nsamples=samples, nrepeat=trials)
def test_np_uniform():
    types = [None, "float32", "float64"]
    ctx = mx.context.current_context()
    samples = 1000000
    # Generation test
    trials = 8
    num_buckets = 5
    for dtype in types:
        for low, high in [(-100.0, -98.0), (99.0, 101.0)]:
            scale = high - low
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.uniform.ppf(x, loc=low, scale=scale), num_buckets)
            buckets = np.array(buckets, dtype=dtype).tolist()
            probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(num_buckets)]
            generator_mx_np = lambda x: mx.np.random.uniform(low, high, size=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx_np, buckets=buckets, probs=probs, nsamples=samples, nrepeat=trials)
Exemple #15
0
def test_uniform_generator():
    ctx = mx.context.current_context()
    for dtype in ['float16', 'float32', 'float64']:
        for low, high in [(-1.0, 1.0), (1.0, 3.0)]:
            scale = high - low
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.uniform.ppf(x, loc=low, scale=scale), 5)
            # Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly
            buckets = np.array(buckets, dtype=dtype).tolist()
            probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(5)]
            generator_mx = lambda x: mx.nd.random.uniform(low, high, shape=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx, buckets=buckets, probs=probs)
            generator_mx_same_seed = \
                lambda x: np.concatenate(
                    [mx.nd.random.uniform(low, high, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
                     for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs)
def test_randint_generator():
    ctx = mx.context.current_context()
    for dtype in ['int32', 'int64']:
        for low, high in [(50000000, 50001000),(-50000100,-50000000),(-500,199)]:
            scale = high - low
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.uniform.ppf(x, loc=low, scale=scale), 5)
            # Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly
            buckets = np.array(buckets, dtype=dtype).tolist()
            probs = [(buckets[i][1] - buckets[i][0]) / float(scale) for i in range(5)]
            generator_mx = lambda x: mx.nd.random.randint(low, high, shape=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx, buckets=buckets, probs=probs, nrepeat=100)
            # Scipy uses alpha = 0.01 for testing discrete distribution generator but we are using default alpha=0.05 (higher threshold ensures robustness)
            # Refer - https://github.com/scipy/scipy/blob/9f12af697763fb5f9767d5cb1280ce62456a3974/scipy/stats/tests/test_discrete_basic.py#L45
            generator_mx_same_seed = \
                lambda x: np.concatenate(
                    [mx.nd.random.randint(low, high, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
                        for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs, nrepeat=100)
Exemple #17
0
def test_uniform_generator():
    ctx = mx.context.current_context()
    for dtype in ['float16', 'float32', 'float64']:
        for low, high in [(-1.0, 1.0), (1.0, 3.0)]:
            print("ctx=%s, dtype=%s, Low=%g, High=%g:" %
                  (ctx, dtype, low, high))
            buckets, probs = gen_buckets_probs_with_ppf(
                lambda x: ss.uniform.ppf(x, loc=low, scale=high - low), 5)
            generator_mx = lambda x: mx.nd.random.uniform(
                low, high, shape=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx,
                             buckets=buckets,
                             probs=probs)
            generator_mx_same_seed = \
                lambda x: np.concatenate(
                    [mx.nd.random.uniform(low, high, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
                     for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed,
                             buckets=buckets,
                             probs=probs)
def test_normal_generator():
    ctx = mx.context.current_context()
    samples = 1000000
    # Default success rate is 0.25, so 2 successes of 8 trials will pass.
    trials = 8
    num_buckets = 5
    for dtype in ['float16', 'float32', 'float64']:
        for mu, sigma in [(0.0, 1.0), (1.0, 5.0)]:
            buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.norm.ppf(x, mu, sigma), num_buckets)
            # Quantize bucket boundaries to reflect the actual dtype and adjust probs accordingly
            buckets = np.array(buckets, dtype=dtype).tolist()
            probs = [(ss.norm.cdf(buckets[i][1], mu, sigma) -
                      ss.norm.cdf(buckets[i][0], mu, sigma)) for i in range(num_buckets)]
            generator_mx = lambda x: mx.nd.random.normal(mu, sigma, shape=x, ctx=ctx, dtype=dtype).asnumpy()
            verify_generator(generator=generator_mx, buckets=buckets, probs=probs,
                             nsamples=samples, nrepeat=trials)
            generator_mx_same_seed =\
                lambda x: np.concatenate(
                    [mx.nd.random.normal(mu, sigma, shape=x // 10, ctx=ctx, dtype=dtype).asnumpy()
                     for _ in range(10)])
            verify_generator(generator=generator_mx_same_seed, buckets=buckets, probs=probs,
                             nsamples=samples, nrepeat=trials)