예제 #1
0
def test_regression_gradient():
    random = RandomState(94584)
    N = 50
    X = random.randn(N, 100)
    offset = 0.5

    mean = OffsetMean()
    mean.offset = offset
    mean.fix('offset')
    mean.set_data(N)

    cov = LinearCov()
    cov.scale = 1.0
    cov.set_data((X, X))

    y = random.randn(N)

    lmm = SlowLMM(y, mean, cov)

    def func(x):
        cov.scale = exp(x[0])
        return lmm.value(mean.feed().value(), cov.feed().value())

    def grad(x):
        cov.scale = exp(x[0])
        return lmm.gradient(mean.feed().value(),
                            cov.feed().value(),
                            mean.feed().gradient(),
                            cov.feed().gradient())

    assert_almost_equal(check_grad(func, grad, [0]), 0)
예제 #2
0
파일: canonical.py 프로젝트: Horta/lim
def _mean_cov(offset, G, heritability, causal_variants, causal_variance,
              random_state):
    nsamples = G.shape[0]
    G = stdnorm(G, axis=0)

    G /= sqrt(G.shape[1])

    mean1 = OffsetMean()
    mean1.offset = offset

    cov1 = LinearCov()
    cov2 = EyeCov()
    cov = SumCov([cov1, cov2])

    mean1.set_data(nsamples, 'sample')
    cov1.set_data((G, G), 'sample')
    a = arange(nsamples)
    cov2.set_data((a, a), 'sample')

    cov1.scale = heritability - causal_variance
    cov2.scale = 1 - heritability - causal_variance

    means = [mean1]
    if causal_variants is not None:
        means += [_causal_mean(causal_variants, causal_variance, random_state)]

    mean = SumMean(means)

    return mean, cov
예제 #3
0
def test_fast_scan():
    random = np.random.RandomState(9458)
    N = 500
    X = random.randn(N, N + 1)
    X -= X.mean(0)
    X /= X.std(0)
    X /= np.sqrt(X.shape[1])
    offset = 1.0

    mean = OffsetMean()
    mean.offset = offset
    mean.set_data(N, purpose='sample')

    cov_left = LinearCov()
    cov_left.scale = 1.5
    cov_left.set_data((X, X), purpose='sample')

    cov_right = EyeCov()
    cov_right.scale = 1.5
    cov_right.set_data((arange(N), arange(N)), purpose='sample')

    cov = SumCov([cov_left, cov_right])

    lik = DeltaProdLik()

    y = GLMMSampler(lik, mean, cov).sample(random)

    (Q0, Q1), S0 = economic_qs_linear(X)

    flmm = FastLMM(y, Q0, Q1, S0, covariates=ones((N, 1)))

    flmm.learn(progress=False)

    markers = random.randn(N, 2)

    flmm_ = flmm.copy()
    flmm_.M = concatenate([flmm.M, markers[:, 0][:, newaxis]], axis=1)
    lml0 = flmm_.lml()

    flmm_ = flmm.copy()
    flmm_.M = concatenate([flmm.M, markers[:, 1][:, newaxis]], axis=1)
    lml1 = flmm_.lml()

    lik_trick = flmm.get_normal_likelihood_trick()

    lmls = lik_trick.fast_scan(markers)[0]
    assert_allclose(lmls, [lml0, lml1], rtol=1e-5)
예제 #4
0
def test_slowlmm_value_1():
    random = RandomState(94584)
    N = 50
    X = random.randn(N, 100)
    offset = 0.5

    mean = OffsetMean()
    mean.offset = offset
    mean.fix('offset')
    mean.set_data(N)

    cov = LinearCov()
    cov.scale = 1.0
    cov.set_data((X, X))

    y = random.randn(N)

    lmm = SlowLMM(y, mean, cov)
    assert_almost_equal(lmm.feed().value(), -153.623791551399108)
예제 #5
0
def test_maximize_2():
    random = RandomState(94584)
    N = 50
    X = random.randn(N, 100)
    offset = 0.5

    mean = OffsetMean()
    mean.offset = offset
    mean.set_data(N)

    cov = LinearCov()
    cov.scale = 1.0
    cov.set_data((X, X))

    y = random.randn(N)

    lmm = SlowLMM(y, mean, cov)
    lmm.feed().maximize()
    assert_almost_equal(lmm.feed().value(), -79.365136339619610)
예제 #6
0
def test_GLMMSampler_binomial():
    random = RandomState(4503)
    X = random.randn(10, 15)
    link = LogitLink()
    lik = BinomialLik(5, link)

    mean = OffsetMean()
    mean.offset = 1.2
    mean.set_data(10, 'sample')
    cov = LinearCov()
    cov.set_data((X, X), 'sample')
    sampler = GLMMSampler(lik, mean, cov)
    assert_equal(sampler.sample(random), [0, 5, 0, 5, 1, 1, 5, 0, 5, 5])

    mean.offset = 0.
    assert_equal(sampler.sample(random), [5, 4, 1, 0, 0, 1, 4, 5, 5, 0])

    mean = OffsetMean()
    mean.offset = 0.0
    mean.set_data(10, 'sample')

    cov1 = LinearCov()
    cov1.set_data((X, X), 'sample')

    cov2 = EyeCov()
    a = arange(10)
    cov2.set_data((a, a), 'sample')

    cov1.scale = 1e-4
    cov2.scale = 1e-4

    cov = SumCov([cov1, cov2])

    lik = BinomialLik(100, link)
    sampler = GLMMSampler(lik, mean, cov)
    assert_equal(
        sampler.sample(random), [56, 56, 55, 51, 59, 45, 47, 43, 51, 38])

    cov2.scale = 100.
    sampler = GLMMSampler(lik, mean, cov)
    assert_equal(
        sampler.sample(random), [99, 93, 99, 75, 77, 0, 0, 100, 99, 12])
예제 #7
0
def test_GLMMSampler_poisson():
    random = RandomState(4503)
    X = random.randn(10, 15)
    link = LogLink()
    lik = PoissonLik(link)

    mean = OffsetMean()
    mean.offset = 1.2
    mean.set_data(10, 'sample')
    cov = LinearCov()
    cov.set_data((X, X), 'sample')
    sampler = GLMMSampler(lik, mean, cov)
    assert_equal(
        sampler.sample(random), [0, 289, 0, 11, 0, 0, 176, 0, 228, 82])

    mean = OffsetMean()
    mean.offset = 0.0
    mean.set_data(10, 'sample')

    cov1 = LinearCov()
    cov1.set_data((X, X), 'sample')

    cov2 = EyeCov()
    a = arange(10)
    cov2.set_data((a, a), 'sample')

    cov1.scale = 1e-4
    cov2.scale = 1e-4

    cov = SumCov([cov1, cov2])

    sampler = GLMMSampler(lik, mean, cov)

    assert_equal(sampler.sample(random), [2, 0, 1, 2, 1, 1, 1, 2, 0, 0])

    cov2.scale = 100.
    sampler = GLMMSampler(lik, mean, cov)
    assert_equal(sampler.sample(random), [0, 0, 0, 0, 1, 0, 0, 1196, 0, 0])
예제 #8
0
def test_learn():
    random = np.random.RandomState(9458)
    N = 500
    X = random.randn(N, N + 1)
    X -= X.mean(0)
    X /= X.std(0)
    X /= np.sqrt(X.shape[1])
    offset = 1.0

    mean = OffsetMean()
    mean.offset = offset
    mean.set_data(N, purpose='sample')

    cov_left = LinearCov()
    cov_left.scale = 1.5
    cov_left.set_data((X, X), purpose='sample')

    cov_right = EyeCov()
    cov_right.scale = 1.5
    cov_right.set_data((arange(N), arange(N)), purpose='sample')

    cov = SumCov([cov_left, cov_right])

    lik = DeltaProdLik()

    y = GLMMSampler(lik, mean, cov).sample(random)

    (Q0, Q1), S0 = economic_qs_linear(X)

    flmm = FastLMM(y, Q0, Q1, S0, covariates=ones((N, 1)))

    flmm.learn(progress=False)

    assert_allclose(flmm.beta[0], 0.8997652129631661, rtol=1e-5)
    assert_allclose(flmm.genetic_variance, 1.7303981309775553, rtol=1e-5)
    assert_allclose(flmm.environmental_variance, 1.2950028351268132, rtol=1e-5)
예제 #9
0
def test_maximize_1():
    random = RandomState(94584)
    N = 50
    X = random.randn(N, 100)
    offset = 0.5

    mean = OffsetMean()
    mean.offset = offset
    mean.fix('offset')
    mean.set_data(N)

    cov = LinearCov()
    cov.scale = 1.0
    cov.set_data((X, X))

    y = random.randn(N)

    lmm = SlowLMM(y, mean, cov)
    m = mean.feed().value()
    K = cov.feed().value()
    assert_almost_equal(lmm.value(m, K), -153.62379155139911)

    lmm.feed().maximize()
    assert_almost_equal(lmm.feed().value(), -79.899212241487518)