def test_initialise():
    I,J,K = 5,3,2
    R = numpy.ones((I,J))
    M = numpy.ones((I,J))
    
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    # First do a random initialisation - we can then only check whether values are correctly initialised
    init = 'random'
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.initialise(init)
    
    assert BNMF.tau >= 0.0
    for i,k in itertools.product(xrange(0,I),xrange(0,K)):
        assert BNMF.U[i,k] >= 0.0
    for j,k in itertools.product(xrange(0,J),xrange(0,K)):
        assert BNMF.V[j,k] >= 0.0
        
    # Then initialise with expectation values
    init = 'exp'
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.initialise(init)
    
    assert BNMF.tau >= 0.0
    for i,k in itertools.product(xrange(0,I),xrange(0,K)):
        assert BNMF.U[i,k] == 1./2.
    for j,k in itertools.product(xrange(0,J),xrange(0,K)):
        assert BNMF.V[j,k] == 1./3.
Example #2
0
def test_log_likelihood():
    R = numpy.array([[1, 2], [3, 4]], dtype=float)
    M = numpy.array([[1, 1], [0, 1]])
    I, J, K = 2, 2, 3
    lambdaU = 2 * numpy.ones((I, K))
    lambdaV = 3 * numpy.ones((J, K))
    alpha, beta = 3, 1
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }

    iterations = 10
    burnin, thinning = 4, 2
    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.all_U = [numpy.ones((I, K)) for i in range(0, iterations)]
    BNMF.all_V = [2 * numpy.ones((J, K)) for i in range(0, iterations)]
    BNMF.all_tau = [3. for i in range(0, iterations)]
    # expU*expV.T = [[6.]]

    log_likelihood = 3. / 2. * (
        math.log(3.) - math.log(2 * math.pi)) - 3. / 2. * (5**2 + 4**2 + 2**2)
    AIC = -2 * log_likelihood + 2 * (2 * 3 + 2 * 3)
    BIC = -2 * log_likelihood + (2 * 3 + 2 * 3) * math.log(3)
    MSE = (5**2 + 4**2 + 2**2) / 3.

    assert log_likelihood == BNMF.quality('loglikelihood', burnin, thinning)
    assert AIC == BNMF.quality('AIC', burnin, thinning)
    assert BIC == BNMF.quality('BIC', burnin, thinning)
    assert MSE == BNMF.quality('MSE', burnin, thinning)
    with pytest.raises(AssertionError) as error:
        BNMF.quality('FAIL', burnin, thinning)
    assert str(error.value) == "Unrecognised metric for model quality: FAIL."
def test_run():
    I,J,K = 10,5,2
    R = numpy.ones((I,J))
    M = numpy.ones((I,J))
    M[0,0], M[2,2], M[3,1] = 0, 0, 0
    
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    init = 'exp' #U=1/2,V=1/3
    
    U_prior = numpy.ones((I,K))/2.
    V_prior = numpy.ones((J,K))/3.
    
    iterations = 15
    
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.initialise(init)
    (Us,Vs,taus) = BNMF.run(iterations)
    
    assert BNMF.all_U.shape == (iterations,I,K)
    assert BNMF.all_V.shape == (iterations,J,K)
    assert BNMF.all_tau.shape == (iterations,)
    
    for i,k in itertools.product(xrange(0,I),xrange(0,K)):
        assert Us[0,i,k] != U_prior[i,k]
    for j,k in itertools.product(xrange(0,J),xrange(0,K)):
        assert Vs[0,j,k] != V_prior[j,k]
    assert taus[1] != alpha/float(beta)
def test_log_likelihood():
    R = numpy.array([[1,2],[3,4]],dtype=float)
    M = numpy.array([[1,1],[0,1]])
    I, J, K = 2, 2, 3
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    iterations = 10
    burnin, thinning = 4, 2
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.all_U = [numpy.ones((I,K)) for i in range(0,iterations)]
    BNMF.all_V = [2*numpy.ones((J,K)) for i in range(0,iterations)]
    BNMF.all_tau = [3. for i in range(0,iterations)]
    # expU*expV.T = [[6.]]
    
    log_likelihood = 3./2.*(math.log(3.)-math.log(2*math.pi)) - 3./2. * (5**2 + 4**2 + 2**2)
    AIC = -2*log_likelihood + 2*(2*3+2*3)
    BIC = -2*log_likelihood + (2*3+2*3)*math.log(3)
    MSE = (5**2+4**2+2**2)/3.
    
    assert log_likelihood == BNMF.quality('loglikelihood',burnin,thinning)
    assert AIC == BNMF.quality('AIC',burnin,thinning)
    assert BIC == BNMF.quality('BIC',burnin,thinning)
    assert MSE == BNMF.quality('MSE',burnin,thinning)
    with pytest.raises(AssertionError) as error:
        BNMF.quality('FAIL',burnin,thinning)
    assert str(error.value) == "Unrecognised metric for model quality: FAIL."
def test_approx_expectation():
    burn_in = 2
    thinning = 3 # so index 2,5,8 -> m=3,m=6,m=9
    (I,J,K) = (5,3,2)
    Us = [numpy.ones((I,K)) * 3*m**2 for m in range(1,10+1)] #first is 1's, second is 4's, third is 9's, etc.
    Vs = [numpy.ones((J,K)) * 2*m**2 for m in range(1,10+1)]
    taus = [m**2 for m in range(1,10+1)]
    
    expected_exp_tau = (9.+36.+81.)/3.
    expected_exp_U = numpy.array([[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.]])
    expected_exp_V = numpy.array([[(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.)],[(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.)],[(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.)]])
    
    R = numpy.ones((I,J))
    M = numpy.ones((I,J))
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.all_U = Us
    BNMF.all_V = Vs
    BNMF.all_tau = taus
    (exp_U, exp_V, exp_tau) = BNMF.approx_expectation(burn_in,thinning)
    
    assert expected_exp_tau == exp_tau
    assert numpy.array_equal(expected_exp_U,exp_U)
    assert numpy.array_equal(expected_exp_V,exp_V)
Example #6
0
def test_run():
    I, J, K = 10, 5, 2
    R = numpy.ones((I, J))
    M = numpy.ones((I, J))
    M[0, 0], M[2, 2], M[3, 1] = 0, 0, 0

    lambdaU = 2 * numpy.ones((I, K))
    lambdaV = 3 * numpy.ones((J, K))
    alpha, beta = 3, 1
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }
    init = 'exp'  #U=1/2,V=1/3

    U_prior = numpy.ones((I, K)) / 2.
    V_prior = numpy.ones((J, K)) / 3.

    iterations = 15

    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.initialise(init)
    (Us, Vs, taus) = BNMF.run(iterations)

    assert BNMF.all_U.shape == (iterations, I, K)
    assert BNMF.all_V.shape == (iterations, J, K)
    assert BNMF.all_tau.shape == (iterations, )

    for i, k in itertools.product(xrange(0, I), xrange(0, K)):
        assert Us[0, i, k] != U_prior[i, k]
    for j, k in itertools.product(xrange(0, J), xrange(0, K)):
        assert Vs[0, j, k] != V_prior[j, k]
    assert taus[1] != alpha / float(beta)
Example #7
0
def test_compute_statistics():
    R = numpy.array([[1, 2], [3, 4]], dtype=float)
    M = numpy.array([[1, 1], [0, 1]])
    I, J, K = 2, 2, 3
    lambdaU = 2 * numpy.ones((I, K))
    lambdaV = 3 * numpy.ones((J, K))
    alpha, beta = 3, 1
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }

    BNMF = bnmf_gibbs_optimised(R, M, K, priors)

    R_pred = numpy.array([[500, 550], [1220, 1342]], dtype=float)
    M_pred = numpy.array([[0, 0], [1, 1]])

    MSE_pred = (1217**2 + 1338**2) / 2.0
    R2_pred = 1. - (1217**2 + 1338**2) / (0.5**2 + 0.5**2)  #mean=3.5
    Rp_pred = 61. / (math.sqrt(.5) * math.sqrt(7442.)
                     )  #mean=3.5,var=0.5,mean_pred=1281,var_pred=7442,cov=61

    assert MSE_pred == BNMF.compute_MSE(M_pred, R, R_pred)
    assert R2_pred == BNMF.compute_R2(M_pred, R, R_pred)
    assert Rp_pred == BNMF.compute_Rp(M_pred, R, R_pred)
def test_predict():
    burn_in = 2
    thinning = 3 # so index 2,5,8 -> m=3,m=6,m=9
    (I,J,K) = (5,3,2)
    Us = [numpy.ones((I,K)) * 3*m**2 for m in range(1,10+1)] #first is 1's, second is 4's, third is 9's, etc.
    Vs = [numpy.ones((J,K)) * 2*m**2 for m in range(1,10+1)]
    Us[2][0,0] = 24 #instead of 27 - to ensure we do not get 0 variance in our predictions
    taus = [m**2 for m in range(1,10+1)]
    
    R = numpy.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15]],dtype=float)
    M = numpy.ones((I,J))
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    #expected_exp_U = numpy.array([[125.,126.],[126.,126.],[126.,126.],[126.,126.],[126.,126.]])
    #expected_exp_V = numpy.array([[84.,84.],[84.,84.],[84.,84.]])
    #R_pred = numpy.array([[21084.,21084.,21084.],[ 21168.,21168.,21168.],[21168.,21168.,21168.],[21168.,21168.,21168.],[21168.,21168.,21168.]])
    
    M_test = numpy.array([[0,0,1],[0,1,0],[0,0,0],[1,1,0],[0,0,0]]) #R->3,5,10,11, P_pred->21084,21168,21168,21168
    MSE = (444408561. + 447872569. + 447660964. + 447618649) / 4.
    R2 = 1. - (444408561. + 447872569. + 447660964. + 447618649) / (4.25**2+2.25**2+2.75**2+3.75**2) #mean=7.25
    Rp = 357. / ( math.sqrt(44.75) * math.sqrt(5292.) ) #mean=7.25,var=44.75, mean_pred=21147,var_pred=5292, corr=(-4.25*-63 + -2.25*21 + 2.75*21 + 3.75*21)
    
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.all_U = Us
    BNMF.all_V = Vs
    BNMF.all_tau = taus
    performances = BNMF.predict(M_test,burn_in,thinning)
    
    assert performances['MSE'] == MSE
    assert performances['R^2'] == R2
    assert performances['Rp'] == Rp
Example #9
0
def test_tauV():
    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.initialise(init)
    BNMF.tau = 3.
    #U^2 = [[1/4,1/4],[1/4,1/4],[1/4,1/4],[1/4,1/4],[1/4,1/4]], sum_i U^2 = [1,1,1] (index=j)
    tauV = 3. * numpy.array([[1., 1.], [1., 1.], [1., 1.]])
    for j, k in itertools.product(xrange(0, J), xrange(0, K)):
        assert BNMF.tauV(k)[j] == tauV[j, k]
def test_tauV():
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.initialise(init)
    BNMF.tau = 3.
    #U^2 = [[1/4,1/4],[1/4,1/4],[1/4,1/4],[1/4,1/4],[1/4,1/4]], sum_i U^2 = [1,1,1] (index=j)
    tauV = 3.*numpy.array([[1.,1.],[1.,1.],[1.,1.]])
    for j,k in itertools.product(xrange(0,J),xrange(0,K)):
        assert BNMF.tauV(k)[j] == tauV[j,k]
def test_tauU():
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.initialise(init)
    BNMF.tau = 3.
    #V^2 = [[1/9,1/9],[1/9,1/9],[1/9,1/9]], sum_j V^2 = [2/9,1/3,2/9,2/9,1/3] (index=i)
    tauU = 3.*numpy.array([[2./9.,2./9.],[1./3.,1./3.],[2./9.,2./9.],[2./9.,2./9.],[1./3.,1./3.]])
    for i,k in itertools.product(xrange(0,I),xrange(0,K)):
        assert BNMF.tauU(k)[i] == tauU[i,k]
def test_muV():
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.initialise(init)
    BNMF.tau = 3.
    #U*V^T - Uik*Vjk = [[1/6,..]], so Rij - Ui * Vj + Uik * Vjk = 5/6
    tauV = 3.*numpy.array([[1.,1.],[1.,1.],[1.,1.]])
    muV = 1./tauV * ( 3. * numpy.array([[4.*(5./6.)*(1./2.),4.*(5./6.)*(1./2.)],[4.*(5./6.)*(1./2.),4.*(5./6.)*(1./2.)],[4.*(5./6.)*(1./2.),4.*(5./6.)*(1./2.)]]) - lambdaV )
    for j,k in itertools.product(xrange(0,J),xrange(0,K)):
        assert BNMF.muV(tauV[:,k],k)[j] == muV[j,k]
def test_muU():
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.initialise(init)
    BNMF.tau = 3.
    #U*V^T - Uik*Vjk = [[1/6,..]], so Rij - Ui * Vj + Uik * Vjk = 5/6
    tauU = 3.*numpy.array([[2./9.,2./9.],[1./3.,1./3.],[2./9.,2./9.],[2./9.,2./9.],[1./3.,1./3.]])
    muU = 1./tauU * ( 3. * numpy.array([[2.*(5./6.)*(1./3.),10./18.],[15./18.,15./18.],[10./18.,10./18.],[10./18.,10./18.],[15./18.,15./18.]]) - lambdaU )
    for i,k in itertools.product(xrange(0,I),xrange(0,K)):
        assert abs(BNMF.muU(tauU[:,k],k)[i] - muU[i,k]) < 0.000000000000001
Example #14
0
def test_tauU():
    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.initialise(init)
    BNMF.tau = 3.
    #V^2 = [[1/9,1/9],[1/9,1/9],[1/9,1/9]], sum_j V^2 = [2/9,1/3,2/9,2/9,1/3] (index=i)
    tauU = 3. * numpy.array([[2. / 9., 2. / 9.], [1. / 3., 1. / 3.],
                             [2. / 9., 2. / 9.], [2. / 9., 2. / 9.],
                             [1. / 3., 1. / 3.]])
    for i, k in itertools.product(xrange(0, I), xrange(0, K)):
        assert BNMF.tauU(k)[i] == tauU[i, k]
Example #15
0
def test_muV():
    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.initialise(init)
    BNMF.tau = 3.
    #U*V^T - Uik*Vjk = [[1/6,..]], so Rij - Ui * Vj + Uik * Vjk = 5/6
    tauV = 3. * numpy.array([[1., 1.], [1., 1.], [1., 1.]])
    muV = 1. / tauV * (3. * numpy.array(
        [[4. * (5. / 6.) * (1. / 2.), 4. * (5. / 6.) *
          (1. / 2.)], [4. * (5. / 6.) * (1. / 2.), 4. * (5. / 6.) * (1. / 2.)],
         [4. * (5. / 6.) * (1. / 2.), 4. * (5. / 6.) * (1. / 2.)]]) - lambdaV)
    for j, k in itertools.product(xrange(0, J), xrange(0, K)):
        assert BNMF.muV(tauV[:, k], k)[j] == muV[j, k]
Example #16
0
def test_predict():
    burn_in = 2
    thinning = 3  # so index 2,5,8 -> m=3,m=6,m=9
    (I, J, K) = (5, 3, 2)
    Us = [numpy.ones((I, K)) * 3 * m**2 for m in range(1, 10 + 1)
          ]  #first is 1's, second is 4's, third is 9's, etc.
    Vs = [numpy.ones((J, K)) * 2 * m**2 for m in range(1, 10 + 1)]
    Us[2][
        0,
        0] = 24  #instead of 27 - to ensure we do not get 0 variance in our predictions
    taus = [m**2 for m in range(1, 10 + 1)]

    R = numpy.array(
        [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]],
        dtype=float)
    M = numpy.ones((I, J))
    lambdaU = 2 * numpy.ones((I, K))
    lambdaV = 3 * numpy.ones((J, K))
    alpha, beta = 3, 1
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }

    #expected_exp_U = numpy.array([[125.,126.],[126.,126.],[126.,126.],[126.,126.],[126.,126.]])
    #expected_exp_V = numpy.array([[84.,84.],[84.,84.],[84.,84.]])
    #R_pred = numpy.array([[21084.,21084.,21084.],[ 21168.,21168.,21168.],[21168.,21168.,21168.],[21168.,21168.,21168.],[21168.,21168.,21168.]])

    M_test = numpy.array([[0, 0, 1], [0, 1, 0], [0, 0, 0], [1, 1, 0],
                          [0, 0,
                           0]])  #R->3,5,10,11, P_pred->21084,21168,21168,21168
    MSE = (444408561. + 447872569. + 447660964. + 447618649) / 4.
    R2 = 1. - (444408561. + 447872569. + 447660964. + 447618649) / (
        4.25**2 + 2.25**2 + 2.75**2 + 3.75**2)  #mean=7.25
    Rp = 357. / (
        math.sqrt(44.75) * math.sqrt(5292.)
    )  #mean=7.25,var=44.75, mean_pred=21147,var_pred=5292, corr=(-4.25*-63 + -2.25*21 + 2.75*21 + 3.75*21)

    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.all_U = Us
    BNMF.all_V = Vs
    BNMF.all_tau = taus
    performances = BNMF.predict(M_test, burn_in, thinning)

    assert performances['MSE'] == MSE
    assert performances['R^2'] == R2
    assert performances['Rp'] == Rp
Example #17
0
def test_muU():
    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.initialise(init)
    BNMF.tau = 3.
    #U*V^T - Uik*Vjk = [[1/6,..]], so Rij - Ui * Vj + Uik * Vjk = 5/6
    tauU = 3. * numpy.array([[2. / 9., 2. / 9.], [1. / 3., 1. / 3.],
                             [2. / 9., 2. / 9.], [2. / 9., 2. / 9.],
                             [1. / 3., 1. / 3.]])
    muU = 1. / tauU * (
        3. * numpy.array([[2. * (5. / 6.) *
                           (1. / 3.), 10. / 18.], [15. / 18., 15. / 18.],
                          [10. / 18., 10. / 18.], [10. / 18., 10. / 18.],
                          [15. / 18., 15. / 18.]]) - lambdaU)
    for i, k in itertools.product(xrange(0, I), xrange(0, K)):
        assert abs(BNMF.muU(tauU[:, k], k)[i] - muU[i, k]) < 0.000000000000001
Example #18
0
def test_initialise():
    I, J, K = 5, 3, 2
    R = numpy.ones((I, J))
    M = numpy.ones((I, J))

    lambdaU = 2 * numpy.ones((I, K))
    lambdaV = 3 * numpy.ones((J, K))
    alpha, beta = 3, 1
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }

    # First do a random initialisation - we can then only check whether values are correctly initialised
    init = 'random'
    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.initialise(init)

    assert BNMF.tau >= 0.0
    for i, k in itertools.product(xrange(0, I), xrange(0, K)):
        assert BNMF.U[i, k] >= 0.0
    for j, k in itertools.product(xrange(0, J), xrange(0, K)):
        assert BNMF.V[j, k] >= 0.0

    # Then initialise with expectation values
    init = 'exp'
    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.initialise(init)

    assert BNMF.tau >= 0.0
    for i, k in itertools.product(xrange(0, I), xrange(0, K)):
        assert BNMF.U[i, k] == 1. / 2.
    for j, k in itertools.product(xrange(0, J), xrange(0, K)):
        assert BNMF.V[j, k] == 1. / 3.
Example #19
0
def test_approx_expectation():
    burn_in = 2
    thinning = 3  # so index 2,5,8 -> m=3,m=6,m=9
    (I, J, K) = (5, 3, 2)
    Us = [numpy.ones((I, K)) * 3 * m**2 for m in range(1, 10 + 1)
          ]  #first is 1's, second is 4's, third is 9's, etc.
    Vs = [numpy.ones((J, K)) * 2 * m**2 for m in range(1, 10 + 1)]
    taus = [m**2 for m in range(1, 10 + 1)]

    expected_exp_tau = (9. + 36. + 81.) / 3.
    expected_exp_U = numpy.array([[9. + 36. + 81., 9. + 36. + 81.],
                                  [9. + 36. + 81., 9. + 36. + 81.],
                                  [9. + 36. + 81., 9. + 36. + 81.],
                                  [9. + 36. + 81., 9. + 36. + 81.],
                                  [9. + 36. + 81., 9. + 36. + 81.]])
    expected_exp_V = numpy.array([[(9. + 36. + 81.) * (2. / 3.),
                                   (9. + 36. + 81.) * (2. / 3.)],
                                  [(9. + 36. + 81.) * (2. / 3.),
                                   (9. + 36. + 81.) * (2. / 3.)],
                                  [(9. + 36. + 81.) * (2. / 3.),
                                   (9. + 36. + 81.) * (2. / 3.)]])

    R = numpy.ones((I, J))
    M = numpy.ones((I, J))
    lambdaU = 2 * numpy.ones((I, K))
    lambdaV = 3 * numpy.ones((J, K))
    alpha, beta = 3, 1
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }

    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.all_U = Us
    BNMF.all_V = Vs
    BNMF.all_tau = taus
    (exp_U, exp_V, exp_tau) = BNMF.approx_expectation(burn_in, thinning)

    assert expected_exp_tau == exp_tau
    assert numpy.array_equal(expected_exp_U, exp_U)
    assert numpy.array_equal(expected_exp_V, exp_V)
def test_compute_statistics():
    R = numpy.array([[1,2],[3,4]],dtype=float)
    M = numpy.array([[1,1],[0,1]])
    I, J, K = 2, 2, 3
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    
    R_pred = numpy.array([[500,550],[1220,1342]],dtype=float)
    M_pred = numpy.array([[0,0],[1,1]])
    
    MSE_pred = (1217**2 + 1338**2) / 2.0
    R2_pred = 1. - (1217**2+1338**2)/(0.5**2+0.5**2) #mean=3.5
    Rp_pred = 61. / ( math.sqrt(.5) * math.sqrt(7442.) ) #mean=3.5,var=0.5,mean_pred=1281,var_pred=7442,cov=61
    
    assert MSE_pred == BNMF.compute_MSE(M_pred,R,R_pred)
    assert R2_pred == BNMF.compute_R2(M_pred,R,R_pred)
    assert Rp_pred == BNMF.compute_Rp(M_pred,R,R_pred)
Example #21
0
        check_empty_rows_columns(M,fraction)


# We now run the Gibbs sampler on each of the M's for each fraction.
all_performances = {metric:[] for metric in metrics} 
average_performances = {metric:[] for metric in metrics} # averaged over repeats
for (fraction,Ms,Ms_test) in zip(fractions_unknown,all_Ms,all_Ms_test):
    print "Trying fraction %s." % fraction
    
    # Run the algorithm <repeats> times and store all the performances
    for metric in metrics:
        all_performances[metric].append([])
    for (repeat,M,M_test) in zip(range(0,repeats),Ms,Ms_test):
        print "Repeat %s of fraction %s." % (repeat+1, fraction)
    
        BNMF = bnmf_gibbs_optimised(R,M,K,priors)
        BNMF.initialise(init_UV)
        BNMF.run(iterations)
    
        # Measure the performances
        performances = BNMF.predict(M_test,burn_in,thinning)
        for metric in metrics:
            # Add this metric's performance to the list of <repeat> performances for this fraction
            all_performances[metric][-1].append(performances[metric])
            
    # Compute the average across attempts
    for metric in metrics:
        average_performances[metric].append(sum(all_performances[metric][-1])/repeats)
    
 
    
Example #22
0
def test_beta_s():
    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.initialise(init)
    beta_s = beta + .5 * (12 * (2. / 3.)**2)  #U*V.T = [[1/6+1/6,..]]
    assert abs(BNMF.beta_s() - beta_s) < 0.000000000000001
Example #23
0
def test_alpha_s():
    BNMF = bnmf_gibbs_optimised(R, M, K, priors)
    BNMF.initialise(init)
    alpha_s = alpha + 6.
    assert BNMF.alpha_s() == alpha_s
Example #24
0
import numpy, matplotlib.pyplot as plt

##########

standardised = False #standardised Sanger or unstandardised

iterations = 1000
burnin = 800
thinning = 5
init_UV = 'random'
I, J, K = 622,138,10

alpha, beta = 1., 1.
lambdaU = numpy.ones((I,K))/10.
lambdaV = numpy.ones((J,K))/10.
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }

# Load in data
(_,X_min,M,_,_,_,_) = load_gdsc(standardised=standardised)

# Run the Gibbs sampler
BNMF = bnmf_gibbs_optimised(X_min,M,K,priors)
BNMF.initialise(init_UV)
BNMF.run(iterations)

# Plot the tau expectation values to check convergence
plt.plot(BNMF.all_tau)

# Print the performances across iterations (MSE)
print "all_performances = %s" % BNMF.all_performances['MSE']
def test_alpha_s():
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.initialise(init)
    alpha_s = alpha + 6.
    assert BNMF.alpha_s() == alpha_s
def test_beta_s():
    BNMF = bnmf_gibbs_optimised(R,M,K,priors)
    BNMF.initialise(init)
    beta_s = beta + .5*(12*(2./3.)**2) #U*V.T = [[1/6+1/6,..]]
    assert abs(BNMF.beta_s() - beta_s) < 0.000000000000001
def test_init():
    # Test getting an exception when R and M are different sizes, and when R is not a 2D array.
    R1 = numpy.ones(3)
    M = numpy.ones((2,3))
    I,J,K = 5,3,1
    lambdaU = numpy.ones((I,K))
    lambdaV = numpy.ones((J,K))
    alpha, beta = 3, 1    
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R1,M,K,priors)
    assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 1-dimensional."
    
    R2 = numpy.ones((4,3,2))
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R2,M,K,priors)
    assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 3-dimensional."
    
    R3 = numpy.ones((3,2))
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R3,M,K,priors)
    assert str(error.value) == "Input matrix R is not of the same size as the indicator matrix M: (3, 2) and (2, 3) respectively."
    
    # Similarly for lambdaU, lambdaV
    R4 = numpy.ones((2,3))
    lambdaU = numpy.ones((2+1,1))
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R4,M,K,priors)
    assert str(error.value) == "Prior matrix lambdaU has the wrong shape: (3, 1) instead of (2, 1)."
    
    lambdaU = numpy.ones((2,1))
    lambdaV = numpy.ones((3+1,1))
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R4,M,K,priors)
    assert str(error.value) == "Prior matrix lambdaV has the wrong shape: (4, 1) instead of (3, 1)."
    
    # Test getting an exception if a row or column is entirely unknown
    lambdaU = numpy.ones((2,1))
    lambdaV = numpy.ones((3,1))
    M1 = [[1,1,1],[0,0,0]]
    M2 = [[1,1,0],[1,0,0]]
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R4,M1,K,priors)
    assert str(error.value) == "Fully unobserved row in R, row 1."
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R4,M2,K,priors)
    assert str(error.value) == "Fully unobserved column in R, column 2."
    
    # Finally, a successful case
    I,J,K = 3,2,2
    R5 = 2*numpy.ones((I,J))
    lambdaU = numpy.ones((I,K))
    lambdaV = numpy.ones((J,K))
    M = numpy.ones((I,J))
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    BNMF = bnmf_gibbs_optimised(R5,M,K,priors)
    
    assert numpy.array_equal(BNMF.R,R5)
    assert numpy.array_equal(BNMF.M,M)
    assert BNMF.I == I
    assert BNMF.J == J
    assert BNMF.K == K
    assert BNMF.size_Omega == I*J
    assert BNMF.alpha == alpha
    assert BNMF.beta == beta
    assert numpy.array_equal(BNMF.lambdaU,lambdaU)
    assert numpy.array_equal(BNMF.lambdaV,lambdaV)
    
    # And when lambdaU and lambdaV are integers    
    I,J,K = 3,2,2
    R5 = 2*numpy.ones((I,J))
    lambdaU = 3.
    lambdaV = 4.
    M = numpy.ones((I,J))
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    BNMF = bnmf_gibbs_optimised(R5,M,K,priors)
    
    assert numpy.array_equal(BNMF.R,R5)
    assert numpy.array_equal(BNMF.M,M)
    assert BNMF.I == I
    assert BNMF.J == J
    assert BNMF.K == K
    assert BNMF.size_Omega == I*J
    assert BNMF.alpha == alpha
    assert BNMF.beta == beta
    assert numpy.array_equal(BNMF.lambdaU,lambdaU*numpy.ones((I,K)))
    assert numpy.array_equal(BNMF.lambdaV,lambdaV*numpy.ones((J,K)))
Example #28
0
def test_init():
    # Test getting an exception when R and M are different sizes, and when R is not a 2D array.
    R1 = numpy.ones(3)
    M = numpy.ones((2, 3))
    I, J, K = 5, 3, 1
    lambdaU = numpy.ones((I, K))
    lambdaV = numpy.ones((J, K))
    alpha, beta = 3, 1
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }

    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R1, M, K, priors)
    assert str(
        error.value
    ) == "Input matrix R is not a two-dimensional array, but instead 1-dimensional."

    R2 = numpy.ones((4, 3, 2))
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R2, M, K, priors)
    assert str(
        error.value
    ) == "Input matrix R is not a two-dimensional array, but instead 3-dimensional."

    R3 = numpy.ones((3, 2))
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R3, M, K, priors)
    assert str(
        error.value
    ) == "Input matrix R is not of the same size as the indicator matrix M: (3, 2) and (2, 3) respectively."

    # Similarly for lambdaU, lambdaV
    R4 = numpy.ones((2, 3))
    lambdaU = numpy.ones((2 + 1, 1))
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R4, M, K, priors)
    assert str(
        error.value
    ) == "Prior matrix lambdaU has the wrong shape: (3, 1) instead of (2, 1)."

    lambdaU = numpy.ones((2, 1))
    lambdaV = numpy.ones((3 + 1, 1))
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R4, M, K, priors)
    assert str(
        error.value
    ) == "Prior matrix lambdaV has the wrong shape: (4, 1) instead of (3, 1)."

    # Test getting an exception if a row or column is entirely unknown
    lambdaU = numpy.ones((2, 1))
    lambdaV = numpy.ones((3, 1))
    M1 = [[1, 1, 1], [0, 0, 0]]
    M2 = [[1, 1, 0], [1, 0, 0]]
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }

    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R4, M1, K, priors)
    assert str(error.value) == "Fully unobserved row in R, row 1."
    with pytest.raises(AssertionError) as error:
        bnmf_gibbs_optimised(R4, M2, K, priors)
    assert str(error.value) == "Fully unobserved column in R, column 2."

    # Finally, a successful case
    I, J, K = 3, 2, 2
    R5 = 2 * numpy.ones((I, J))
    lambdaU = numpy.ones((I, K))
    lambdaV = numpy.ones((J, K))
    M = numpy.ones((I, J))
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }
    BNMF = bnmf_gibbs_optimised(R5, M, K, priors)

    assert numpy.array_equal(BNMF.R, R5)
    assert numpy.array_equal(BNMF.M, M)
    assert BNMF.I == I
    assert BNMF.J == J
    assert BNMF.K == K
    assert BNMF.size_Omega == I * J
    assert BNMF.alpha == alpha
    assert BNMF.beta == beta
    assert numpy.array_equal(BNMF.lambdaU, lambdaU)
    assert numpy.array_equal(BNMF.lambdaV, lambdaV)

    # And when lambdaU and lambdaV are integers
    I, J, K = 3, 2, 2
    R5 = 2 * numpy.ones((I, J))
    lambdaU = 3.
    lambdaV = 4.
    M = numpy.ones((I, J))
    priors = {
        'alpha': alpha,
        'beta': beta,
        'lambdaU': lambdaU,
        'lambdaV': lambdaV
    }
    BNMF = bnmf_gibbs_optimised(R5, M, K, priors)

    assert numpy.array_equal(BNMF.R, R5)
    assert numpy.array_equal(BNMF.M, M)
    assert BNMF.I == I
    assert BNMF.J == J
    assert BNMF.K == K
    assert BNMF.size_Omega == I * J
    assert BNMF.alpha == alpha
    assert BNMF.beta == beta
    assert numpy.array_equal(BNMF.lambdaU, lambdaU * numpy.ones((I, K)))
    assert numpy.array_equal(BNMF.lambdaV, lambdaV * numpy.ones((J, K)))
Example #29
0
init_UV = 'random'
I, J, K = 100, 80, 10

alpha, beta = 1., 1.
lambdaU = numpy.ones((I, K)) / 10
lambdaV = numpy.ones((J, K)) / 10
priors = {'alpha': alpha, 'beta': beta, 'lambdaU': lambdaU, 'lambdaV': lambdaV}

# Load in data
R = numpy.loadtxt(input_folder + "R.txt")
M = numpy.ones((I, J))

M_test = calc_inverse_M(numpy.loadtxt(input_folder + "M.txt"))

# Run the Gibbs sampler
BNMF = bnmf_gibbs_optimised(R, M, K, priors)
BNMF.initialise(init_UV)
BNMF.run(iterations)

taus = BNMF.all_tau
Us = BNMF.all_U
Vs = BNMF.all_V

# Plot tau against iterations to see that it converges
f, axarr = plt.subplots(3, sharex=True)
x = range(1, len(taus) + 1)
axarr[0].set_title('Convergence of values')
axarr[0].plot(x, taus)
axarr[0].set_ylabel("tau")
axarr[1].plot(x, Us[:, 0, 0])
axarr[1].set_ylabel("U[0,0]")
Example #30
0
burnin = 800
thinning = 5

init_UV = 'random'
I, J, K = 622, 139, 10

alpha, beta = 1., 1.
lambdaU = numpy.ones((I, K)) / 10.
lambdaV = numpy.ones((J, K)) / 10.
priors = {'alpha': alpha, 'beta': beta, 'lambdaU': lambdaU, 'lambdaV': lambdaV}

# Load in data
(_, X_min, M, _, _, _, _) = load_Sanger(standardised=standardised)

# Run the Gibbs sampler
BNMF = bnmf_gibbs_optimised(X_min, M, K, priors)
BNMF.initialise(init_UV)
BNMF.run(iterations)

# Also measure the performances on the training data
performances = BNMF.predict(M, burnin, thinning)
print performances

# Plot the tau expectation values to check convergence
plt.plot(BNMF.all_tau)

# Print the performances across iterations (MSE)
print "all_performances = %s" % BNMF.all_performances['MSE']
'''
all_performances = [70.259413341682688, 4.8971290647901098, 3.3366462476811503, 3.0455206033882209, 2.7447108749105587, 2.578435577932114, 2.4833813872813524, 2.415888300731782, 2.3536778144262742, 2.3088912946697913, 2.2786420788211239, 2.2509724374956193, 2.22667741922423, 2.2031884180283976, 2.1847566352846948, 2.1648189088246772, 2.1460938492581767, 2.1278516243120729, 2.1062953182339323, 2.0926693199948403, 2.0848241907939036, 2.0685862785831257, 2.0570830259861981, 2.0462940795476516, 2.0363409068498219, 2.029886686971718, 2.0176899697641772, 2.0106525727433837, 2.0020713949604159, 2.002281406734058, 1.9915030498718691, 1.9858194977875878, 1.9784353255998126, 1.9714051757376778, 1.9670522298960877, 1.968008185696684, 1.958200794994801, 1.9512012470672166, 1.9477052707167406, 1.9441854808214387, 1.9381855724814856, 1.9364174709592417, 1.9356997589656086, 1.9352627387119066, 1.9348522789686642, 1.9268881475296915, 1.9226271597884173, 1.9227851205336726, 1.9238989731434151, 1.9200171025517994, 1.9188880484063568, 1.9197134237406113, 1.916425415282702, 1.9153621414295772, 1.9146371137589364, 1.9117992681911631, 1.9168120579196799, 1.9127152168907351, 1.9136381248527585, 1.9124659290909347, 1.9066105730057274, 1.9037046201625576, 1.9028382053907444, 1.9021808021820057, 1.9004858423544877, 1.8992801612891583, 1.9026757750113414, 1.8989792109356545, 1.9002989177295584, 1.8992655262831213, 1.8934610682212321, 1.8979909500121086, 1.8945815858012922, 1.8907158205439492, 1.8917927364302576, 1.8865372627921817, 1.8912813674331883, 1.8898267699326203, 1.8893514208248487, 1.8882906674495989, 1.8889217948546417, 1.8910702063291298, 1.8862765947755964, 1.8877016402682192, 1.8828696751101761, 1.8831999040158809, 1.8889025568532367, 1.8888870564800815, 1.8836578818889995, 1.8869617280625401, 1.8860400812198455, 1.8863000130495331, 1.8851087719269559, 1.8845040611080666, 1.8827057480448237, 1.8832954206478851, 1.87928254801469, 1.878876141623214, 1.8829304461976393, 1.8811097684308042, 1.8814313662987925, 1.8795462994449332, 1.8810082200277263, 1.8773722023867854, 1.8764619038465475, 1.8728679469329701, 1.8770515486775219, 1.8782980320737679, 1.8826121584711468, 1.8757288208822585, 1.8788258706355208, 1.8772802429060622, 1.8788864164135914, 1.8734299681695261, 1.8700347675886981, 1.870333214887979, 1.8723530679008547, 1.8709157139185337, 1.8740720223330249, 1.8712399129163875, 1.8709338966727382, 1.8718862112612891, 1.8659499249172649, 1.8659120681815102, 1.8672445444726185, 1.8714215601165245, 1.8728733348608553, 1.8687953661295253, 1.8652660451550196, 1.8683144529757103, 1.8654813288418781, 1.8629133657731987, 1.8615308199301024, 1.8633835652892472, 1.8635299360830686, 1.8622117225019912, 1.8600044049985365, 1.8613448830605202, 1.8596049965966128, 1.8630259368735869, 1.8626525242951686, 1.8630774878788063, 1.8601220734028741, 1.8600599783429403, 1.857457934904525, 1.8601271403367068, 1.8568535971646063, 1.8612346254514536, 1.8611408140481331, 1.8614386100282956, 1.8606892342850505, 1.8599826470711498, 1.8553177075213387, 1.8529554429655664, 1.8567829829776814, 1.8571288841122873, 1.8569144996257509, 1.8548402647829396, 1.8531725091430296, 1.8493709639763063, 1.8547317468555411, 1.8536901989635526, 1.8527616039027608, 1.8509969217011817, 1.8516130649294436, 1.8508041898180099, 1.8559929427103923, 1.8499277325619743, 1.8517657229166384, 1.8518300974399386, 1.8481858431631508, 1.8474364251979238, 1.8463942884787827, 1.844509210684329, 1.8496115746219548, 1.8505740385190306, 1.85030649486764, 1.8552041644809307, 1.8542898912085684, 1.8529955711377819, 1.8487825126859405, 1.8540838202162937, 1.8504676234789401, 1.849470388801967, 1.8483163335076953, 1.8517654585449175, 1.8510913579940835, 1.8469128107788015, 1.8473778093911559, 1.847004385971597, 1.8458021102144542, 1.8509265306309526, 1.8503474650542513, 1.8516603607739082, 1.8474027900532639, 1.8468857200220394, 1.8448214813999044, 1.8450267968203624, 1.8491166487307811, 1.8425504909767754, 1.8426525529017579, 1.8422710159904057, 1.8422155241005733, 1.8448072090558871, 1.8434516173597069, 1.8437470436399823, 1.8444804366060203, 1.8433717347101852, 1.841477200212267, 1.844293994646498, 1.8422661463788972, 1.8428473400136387, 1.8439907261104951, 1.8411168790182713, 1.8433420138449532, 1.8454449359424492, 1.8481689658688667, 1.8453685898212211, 1.8430275258199629, 1.8432609988878297, 1.8438542766930424, 1.8422365527186759, 1.8415899264741675, 1.8383043681795097, 1.8389372980102983, 1.8393917305887439, 1.8423945516971165, 1.8396670614432309, 1.8396253921899597, 1.8401204892999747, 1.8376926594524021, 1.8390940017730439, 1.83879885968027, 1.8420994460180349, 1.8362354406044461, 1.8370298828099048, 1.8352494788467757, 1.8322159786376118, 1.8342051314422558, 1.8357261620747474, 1.8335968901845991, 1.8349274830058353, 1.8338058207566073, 1.8328340193471466, 1.8330340880407545, 1.8294756293643735, 1.827981962316749, 1.8308926132068484, 1.8274794406423331, 1.8283735314035754, 1.8251228324503228, 1.8220302973788387, 1.8291237506255509, 1.8264615228424479, 1.8289862238023202, 1.8298049359694606, 1.8316610361890944, 1.8331018094388432, 1.8349773232153919, 1.8277972919907781, 1.8344019749039759, 1.828417821305627, 1.8306017578749738, 1.8271287232966391, 1.8255215516232837, 1.8217242947333776, 1.824198650320296, 1.8215950872417954, 1.8214757142336215, 1.8232684374182901, 1.821794435977292, 1.8247209648276244, 1.8232832258436926, 1.8206550637790047, 1.8227618355280708, 1.8260695252057779, 1.8221518371449024, 1.8209690818328395, 1.8222312282954947, 1.8238966959876579, 1.8194296599325535, 1.8214947332098013, 1.8252022668518193, 1.8231259400263535, 1.8193921197435963, 1.8222098193527376, 1.8211522114526928, 1.82182622642231, 1.8223859749620408, 1.822465302300136, 1.8176082070069646, 1.8208731968530401, 1.8215793430381599, 1.8196303073679396, 1.817824567053659, 1.8163559020888265, 1.8199749118430535, 1.8254030176912213, 1.8236706369425644, 1.8219187242529495, 1.8219014634313437, 1.8230431228361841, 1.8229213177590924, 1.8208956196482569, 1.8179611166935776, 1.8228758205624682, 1.8180390304384408, 1.8204313653967139, 1.8149969567474746, 1.8190800612773277, 1.8214935788302138, 1.8219589087061301, 1.8191552477785409, 1.8178079724180083, 1.816317789111134, 1.8202885438147263, 1.8160474356826377, 1.8158260896230245, 1.8169672481046353, 1.8221213077686098, 1.821313086329557, 1.8187072018542558, 1.8165899004766257, 1.8201292485506277, 1.8208767482932418, 1.8172799995021385, 1.8148333287096798, 1.8142366486801487, 1.8168460051925015, 1.8150066418252047, 1.8153678823347215, 1.8126498491453511, 1.8122453658858044, 1.8107159739230294, 1.8119195315049639, 1.8114696923652127, 1.8100998161739139, 1.8118108908995176, 1.8118887619478641, 1.8147008822090427, 1.8146109576872205, 1.8113364784611019, 1.8087742581168302, 1.8093835915050671, 1.8073602483478328, 1.8076268107315834, 1.8095937711155967, 1.8139219598151013, 1.812224249373549, 1.8119153876728695, 1.8106487389669843, 1.8112698687622837, 1.8072839831985748, 1.8083821837027219, 1.8113192745258424, 1.807922260810582, 1.8123182538305713, 1.8109789886101373, 1.8085670496188255, 1.8060936667415188, 1.8062456146802488, 1.8083826623317432, 1.8068266517019334, 1.8053356970471228, 1.804728732333484, 1.8027773071940412, 1.8063069751749206, 1.8088216089425622, 1.8083363508813137, 1.8037827504700765, 1.8040072234204807, 1.8063720577724145, 1.8041494948976191, 1.802763796483692, 1.8024465735116251, 1.8041376787817691, 1.8030430761718848, 1.8042146838714097, 1.7993656244206533, 1.802297824086758, 1.8014687379083527, 1.803901894637292, 1.80296020450046, 1.8033426663193153, 1.8010846313628914, 1.8043070005636412, 1.8080958494536403, 1.8068984507341272, 1.8051278733694007, 1.7999330562558598, 1.8056775066628543, 1.8003628841796515, 1.8040828612138518, 1.8020644293590777, 1.8043542955962315, 1.8013049201915641, 1.8044811623028605, 1.8005314444740184, 1.8022538995606154, 1.7991005970770895, 1.8030598731546923, 1.7982095609463575, 1.7999563607552302, 1.7993099168742583, 1.8042069157634115, 1.8030356161904526, 1.8009053797108077, 1.8006716087831407, 1.8001592059411013, 1.8014156741764342, 1.8014763324659628, 1.7981607340230665, 1.8031058460682854, 1.8019887220175357, 1.8035446305551674, 1.8004182919691918, 1.8024009888436592, 1.7989624044116315, 1.7991117396424972, 1.8006386496729812, 1.8023033845435776, 1.7986368875912617, 1.7993292836161361, 1.7981132557987407, 1.7980799484240086, 1.7998616404957017, 1.7989190729253508, 1.7979217417160009, 1.8025217618932161, 1.7971509863729673, 1.7972545271045628, 1.7961036185447437, 1.7992863349415338, 1.7939150320186865, 1.794915730768178, 1.7976710447924786, 1.7952258789462248, 1.7965102961253947, 1.7957685683250177, 1.802172266469191, 1.7963181933827597, 1.7960749394387991, 1.7965235228473801, 1.7919213712043354, 1.7907265745703844, 1.7921103464602992, 1.791769705108752, 1.7944597112394172, 1.7934069366399388, 1.7940362544328037, 1.7951574502046657, 1.7982605271925332, 1.7996643776786769, 1.802501352364382, 1.7988985101295567, 1.8017792768349572, 1.8012278909701078, 1.7970795633865833, 1.7971374429915437, 1.799102384333737, 1.7947743282190023, 1.7978800001320641, 1.7980334111361473, 1.798236623565985, 1.7968443895450017, 1.7951308448082299, 1.7963745692939239, 1.7964206585964271, 1.7996687267550675, 1.7984512926369132, 1.7963621423851464, 1.7951325453555755, 1.797520817885609, 1.7961474272761473, 1.797163706473754, 1.7950188817814834, 1.790442053106436, 1.7948592231248004, 1.7946702485725552, 1.794599352245378, 1.7965171077958395, 1.7963895464854605, 1.7927887285110933, 1.7946997275698693, 1.7958095111204484, 1.7962949519781921, 1.7918226503001973, 1.796419401273758, 1.7894700867096671, 1.7910482133346712, 1.795602215991343, 1.7912383548560036, 1.7927660887379908, 1.7912292573034354, 1.7889965355864321, 1.7887125024456154, 1.7880155852946256, 1.7864120997157742, 1.7883186624190688, 1.7898418392016504, 1.786788825215766, 1.7860464147267778, 1.7874970154926444, 1.7865244989315945, 1.7866492532879585, 1.7846045429723414, 1.7837036804446755, 1.7835418708368516, 1.786138733316772, 1.7833969049090586, 1.7825215866432367, 1.7858025070768866, 1.7877219018431678, 1.7847402493145448, 1.7814626235316837, 1.7809373303451332, 1.7846367855135288, 1.7881527513008955, 1.786263132830834, 1.784460050381488, 1.7803837641288409, 1.7811571164768085, 1.7857444363207406, 1.7862455897501355, 1.7849006408071759, 1.7833948942047051, 1.7868997777574627, 1.7849116114732215, 1.7840027167752568, 1.7866253194594344, 1.7880067632464136, 1.7901480794337805, 1.7899763224723655, 1.7864259368993254, 1.7870509120875475, 1.7851252587600424, 1.7875235436473682, 1.787360867042348, 1.787267476976119, 1.7888320451600392, 1.7878350471798867, 1.7881427369427483, 1.7871942308006576, 1.786964731256399, 1.7873340010521461, 1.7886585408697959, 1.7895670217545527, 1.7886677750059279, 1.7825419244153171, 1.784747128407087, 1.7870041486771544, 1.787503007719125, 1.7855057161999721, 1.7824038106743365, 1.7806812768602003, 1.7808274385859353, 1.7836900973252439, 1.782302263492946, 1.7798607288486659, 1.7808342831034119, 1.7833267583467043, 1.7822717411452076, 1.7834910113833793, 1.7793157103282373, 1.7799545901678255, 1.7771784984795758, 1.7794342212684982, 1.7783519826327803, 1.779095733683987, 1.7765112066522852, 1.7768989718510424, 1.7771185501567806, 1.7763457171805863, 1.7796330354748502, 1.7788399308401845, 1.7799182008454699, 1.7773942094172794, 1.7771765912221531, 1.776871229564652, 1.7754208839829533, 1.7779754625089239, 1.78350439392742, 1.7761236907278342, 1.7781101926628824, 1.7784243140528904, 1.7810476359072978, 1.7811524375962005, 1.780983965884763, 1.7765454317283866, 1.7774034886534247, 1.7808853465832917, 1.7776559449660667, 1.7800482672430049, 1.778796540551701, 1.778397064910946, 1.7767399072263534, 1.7776257805203222, 1.7765895625405876, 1.7784021537126293, 1.7778562686136332, 1.7761701360496216, 1.7757381715631881, 1.7754575608969327, 1.7760407899235739, 1.779681024395434, 1.7803413775785004, 1.7821197966003302, 1.7828546397992764, 1.7789328607514858, 1.7798951373938896, 1.7769512802392824, 1.7744779041838386, 1.7772952186331823, 1.77843297891323, 1.7758218662063319, 1.7772923455302021, 1.7727986413586894, 1.7741477930347862, 1.7756365397128504, 1.7743483496582082, 1.7747382427963088, 1.7729290421925259, 1.7770647234504875, 1.7730932685927718, 1.7748978243826361, 1.7719498638193021, 1.7776214470956568, 1.7754537595077582, 1.7737374053805186, 1.7773585598772981, 1.7761902756657604, 1.7734016866645799, 1.7725039993794647, 1.7732318967695162, 1.7715517445884907, 1.7743741312322046, 1.7743170230911223, 1.7765371641792638, 1.7749886793742158, 1.7739275930541827, 1.7746884481630265, 1.7751183790537886, 1.7791149573575948, 1.7779617143456832, 1.7772497302836578, 1.7805695534780335, 1.7763400564295448, 1.7758839141478651, 1.7745897977844485, 1.7715575476512648, 1.7737507507560752, 1.7728670490087237, 1.775699801111728, 1.774058381983358, 1.7722582697589055, 1.7738750351706771, 1.7779843994030751, 1.7765541031060097, 1.7775645421723822, 1.77596226737851, 1.7755120297250264, 1.7728017034871224, 1.7754424490533465, 1.7763109681093219, 1.7757684789175201, 1.7739420078068924, 1.7775820470618706, 1.7745962639094219, 1.7745836308569074, 1.7733253780615048, 1.7759157592517303, 1.7736161462090718, 1.7719696496880375, 1.7703918858779675, 1.7723254201290379, 1.7695781715679832, 1.773727729183344, 1.7721246663851977, 1.7693886622938011, 1.7681945082563775, 1.7674279134762092, 1.769667789544767, 1.7721628865265124, 1.771913632018143, 1.7713377240977486, 1.7729415364924466, 1.7713510437197368, 1.771201172330267, 1.7736859444140423, 1.7682833899164703, 1.7688357021571484, 1.7716990454421537, 1.7726825619514186, 1.7737546661865551, 1.7692278909146963, 1.7717084569857136, 1.7705993308959311, 1.773201310599225, 1.7676386761724283, 1.7674080505928407, 1.7699068584317434, 1.7709131750329263, 1.7689237316498561, 1.7690070347383888, 1.7696408263579464, 1.772789009673424, 1.7712782769312396, 1.7698614447463792, 1.7712101126673854, 1.7685994935257685, 1.7681491151882343, 1.7719949209842834, 1.7728002001763266, 1.7712249535265909, 1.7712715782312514, 1.7711625546406855, 1.7724139079881394, 1.7729684732826105, 1.7736940182315821, 1.7703061660086286, 1.7722436262197665, 1.7725026329420621, 1.77136480334952, 1.7689723770561605, 1.771778527282369, 1.7697129702999834, 1.766013143743308, 1.7688861564309695, 1.7691657378767398, 1.7691114434255923, 1.7699127625765911, 1.7704475151027139, 1.7708153006678775, 1.7722890541565337, 1.7741147903191097, 1.7714969405533172, 1.7741099838867531, 1.7666048643873855, 1.7722898672949443, 1.770129246604518, 1.7705466065245004, 1.7716054678499387, 1.7701074274757795, 1.7688357768484948, 1.7669397636418547, 1.7688108189287166, 1.7690103443901435, 1.7673077278632034, 1.7682362234308926, 1.7676235971680547, 1.7671025825528901, 1.7653691700911651, 1.7674423635867524, 1.7627931474338709, 1.7646858205705234, 1.7616781601234552, 1.7650520827614145, 1.7659704624380943, 1.7704870505331878, 1.7682423036865345, 1.7684013482048402, 1.771498606269617, 1.7702345516761351, 1.7715584181305426, 1.7736258504978364, 1.7713361104232066, 1.7735969763423829, 1.7715529446688691, 1.7754961456336931, 1.7712234082169511, 1.774767177221253, 1.7745134730587719, 1.7712342652559854, 1.7706828841298758, 1.7698378969282071, 1.7722720627303168, 1.7688363616719094, 1.7705134725930247, 1.7713547172589186, 1.7673611227179935, 1.7704572143487216, 1.769790946802573, 1.7728633873329769, 1.7714141642665766, 1.768170868064356, 1.7717848745823639, 1.7757170001548939, 1.7745445622572376, 1.77391361448931, 1.7751193043128783, 1.7744273670623192, 1.7736099210078549, 1.7748696149511389, 1.7746797842803861, 1.773868629653047, 1.7726640719179516, 1.7757893585036175, 1.773915569051685, 1.7724201697869812, 1.7730805609042943, 1.7719640594399693, 1.7730910970676867, 1.772489682850769, 1.7711673312027638, 1.7679595944060114, 1.7690333136341332, 1.7666423191136109, 1.7669748238444278, 1.7664208342301608, 1.7696331689946136, 1.7666863444952647, 1.7691277427104981, 1.7677841453082015, 1.7688246095245705, 1.7710383732311159, 1.7716664051518471, 1.7720116725043922, 1.7698158443308678, 1.7655981288832987, 1.7652891532004125, 1.7666797879185019, 1.7650419098424897, 1.7724282590695168, 1.7718135740249323, 1.7683261292162338, 1.7688210281278032, 1.77003796415856, 1.7731702615194569, 1.7738204114417291, 1.7708991004773935, 1.770465609442631, 1.7678156106735108, 1.7692930942877831, 1.7669850214305158, 1.7685447523755882, 1.7698126971488997, 1.7690615599652677, 1.770083093126825, 1.7718627835858594, 1.7683127281885347, 1.7676351316174757, 1.7692177741068358, 1.7687269841831723, 1.7679187622575332, 1.7676821406631182, 1.7707158734856678, 1.7711745628004725, 1.7710641343181599, 1.7696441304208175, 1.7699108960031298, 1.7676243421781166, 1.7672591694758608, 1.7704426620406704, 1.767694776302102, 1.7694701823385521, 1.7725240319773796, 1.768579527858801, 1.7674786377812737, 1.7689260524429165, 1.7684251602369148, 1.7683689702076815, 1.7684060879063486, 1.7716529826292728, 1.7764237125818394, 1.7715584900680943, 1.7730577960423466, 1.775024865000977, 1.7721083492635026, 1.7741778479655834, 1.7728163460253388, 1.7723999529175911, 1.7756535106173743, 1.7727449076888699, 1.7739824235279078, 1.7706819951312123, 1.7709191427834332, 1.7739767740658918, 1.769898659036075, 1.7707949025580114, 1.7691700497326481, 1.7708473273235796, 1.7709910617460767, 1.7767647512462912, 1.7715600254713133, 1.7703341415853746, 1.7690069770049459, 1.7693654286579759, 1.7690682902898665, 1.7703112073030569, 1.7704223485597339, 1.7702781657327307, 1.7700973490094585, 1.7717512366899351, 1.7697495446560771, 1.7689183991299817, 1.7688802004546265, 1.7675286933844507, 1.7698018296363471, 1.767943851333647, 1.7682497843010276, 1.7668319809613293, 1.7687202867378413, 1.77045374223156, 1.766477456320503, 1.7684253158990229, 1.7628957883590641, 1.7623523765123363, 1.7646076885015747, 1.7613957432621308, 1.7693700255640059, 1.768074887263269, 1.7699040992518289, 1.768670002000309, 1.7661829389405554, 1.7646456893937046, 1.7637008099083118, 1.7668263770873942, 1.7650142593476608, 1.7643912458084192, 1.7644074606966171, 1.7638664995961442, 1.7663150966810732, 1.7684680963837716, 1.7657597327096055, 1.7664988839902269, 1.7654601830072121, 1.7630680656122644, 1.7637680071514803, 1.7613342542453034, 1.7637422429674439, 1.7616051671086521, 1.7666286361415524, 1.7700246232848358, 1.7700294082694836, 1.7685876414522386, 1.76865448888055, 1.7653091631970785, 1.7648658062982649, 1.7667053421499721, 1.767343341665933, 1.7664196578083222, 1.7637206575909392, 1.7636510576704538, 1.7616946939681126, 1.7586761278101453, 1.7657406629492469, 1.7672475238956722, 1.7652036393218475, 1.7652073753986335, 1.7640664452008961, 1.7629960577114008, 1.7604389721404241, 1.7632423729392401, 1.7638126226400548, 1.764549822501599, 1.7629939317855337, 1.7606609529379058, 1.7587378612446467, 1.7621154986854517, 1.7639703380822673, 1.761842998292686, 1.7677320585097054, 1.769345825241055, 1.7657977159752827, 1.7665410280408029, 1.7692254285797533, 1.7666085294729588, 1.7637080189490855, 1.7655762747248958, 1.7657036799669381, 1.7651378646436633, 1.763043183130667, 1.7600517666010829, 1.7642072688854662, 1.7646888645656371, 1.7658195453130465, 1.7659495885380747, 1.7662613654547752, 1.7675179273079449, 1.7669999679113633, 1.7706130719904969, 1.7694435292914468, 1.766714750612979, 1.7692300455459833, 1.7693735911666628, 1.7648803956407704, 1.7645879087915151, 1.7614620561861611, 1.7626650924073046, 1.7655083988307458, 1.7672045500806526, 1.7666813938635877, 1.7644957762833966, 1.7641742541730212, 1.7637363526659595, 1.7619401991870478, 1.760921250643003, 1.7595690847465377, 1.7630461759194906, 1.7619265106137016]
'''