Пример #1
0
def test_initialise():
    I,J,K = 5,3,2
    R = numpy.ones((I,J))
    M = numpy.ones((I,J))
    
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    # Initialisation with expectation
    init = 'exp'
    BNMF = bnmf_vb_optimised(R,M,K,priors)
    BNMF.initialise(init)
    
    assert BNMF.alpha_s == alpha + 15./2.
    #assert BNMF.alpha_s == alpha 
    assert BNMF.beta_s == beta + BNMF.exp_square_diff()/2.
    #assert BNMF.beta_s == beta 
    
    for i,k in itertools.product(xrange(0,I),xrange(0,K)):
        assert BNMF.tauU[i,k] == 1.
        assert BNMF.muU[i,k] == 1./lambdaU[i,k]
    for j,k in itertools.product(xrange(0,J),xrange(0,K)):
        assert BNMF.tauV[j,k] == 1.
        assert BNMF.muV[j,k] == 1./lambdaV[j,k]
        
    assert BNMF.exptau == (alpha + 15./2.) / (beta + BNMF.exp_square_diff()/2.)
    #assert BNMF.exptau == alpha / beta
    
    for i,k in itertools.product(xrange(0,I),xrange(0,K)):
        assert abs(BNMF.expU[i,k] - (0.5 + 0.352065 / (1-0.3085))) < 0.0001
    for j,k in itertools.product(xrange(0,J),xrange(0,K)):
        assert abs(BNMF.expV[j,k] - (1./3. + 0.377383 / (1-0.3694))) < 0.0001
        
    # Initialise tauU, tauV using predefined values
    tauUV = {
        'tauU' : 2*numpy.ones((I,K)),
        'tauV' : 3*numpy.ones((J,K))
    }
    init = 'exp'
    
    BNMF = bnmf_vb_optimised(R,M,K,priors)
    BNMF.initialise(init,tauUV)
    for i,k in itertools.product(xrange(0,I),xrange(0,K)):
        assert BNMF.tauU[i,k] == 2.
    for j,k in itertools.product(xrange(0,J),xrange(0,K)):
        assert BNMF.tauV[j,k] == 3.
Пример #2
0
def test_log_likelihood():
    R = numpy.array([[1,2],[3,4]],dtype=float)
    M = numpy.array([[1,1],[0,1]])
    I, J, K = 2, 2, 3
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    BNMF = bnmf_vb_optimised(R,M,K,priors)
    BNMF.expU = numpy.ones((I,K))
    BNMF.expV = 2*numpy.ones((J,K))
    BNMF.explogtau = 5.
    BNMF.exptau = 3.
    # expU*expV.T = [[6.]]
    
    log_likelihood = 3./2.*(5.-math.log(2*math.pi)) - 3./2. * (5**2 + 4**2 + 2**2)
    AIC = -2*log_likelihood + 2*(2*3+2*3)
    BIC = -2*log_likelihood + (2*3+2*3)*math.log(3)
    MSE = (5**2+4**2+2**2)/3.
    
    assert log_likelihood == BNMF.quality('loglikelihood')
    assert AIC == BNMF.quality('AIC')
    assert BIC == BNMF.quality('BIC')
    assert MSE == BNMF.quality('MSE')
    with pytest.raises(AssertionError) as error:
        BNMF.quality('FAIL')
    assert str(error.value) == "Unrecognised metric for model quality: FAIL."
Пример #3
0
def test_predict():
    (I,J,K) = (5,3,2)
    R = numpy.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15]],dtype=float)
    M = numpy.ones((I,J))
    K = 3
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    expU = numpy.array([[125.,126.],[126.,126.],[126.,126.],[126.,126.],[126.,126.]])
    expV = numpy.array([[84.,84.],[84.,84.],[84.,84.]])
    
    M_test = numpy.array([[0,0,1],[0,1,0],[0,0,0],[1,1,0],[0,0,0]]) #R->3,5,10,11, P_pred->21084,21168,21168,21168
    MSE = (444408561. + 447872569. + 447660964. + 447618649) / 4.
    R2 = 1. - (444408561. + 447872569. + 447660964. + 447618649) / (4.25**2+2.25**2+2.75**2+3.75**2) #mean=7.25
    Rp = 357. / ( math.sqrt(44.75) * math.sqrt(5292.) ) #mean=7.25,var=44.75, mean_pred=21147,var_pred=5292, corr=(-4.25*-63 + -2.25*21 + 2.75*21 + 3.75*21)
    
    BNMF = bnmf_vb_optimised(R,M,K,priors)
    BNMF.expU = expU
    BNMF.expV = expV
    performances = BNMF.predict(M_test)
    
    assert performances['MSE'] == MSE
    assert performances['R^2'] == R2
    assert performances['Rp'] == Rp
Пример #4
0
def test_run():
    I,J,K = 10,5,2
    R = numpy.ones((I,J))
    M = numpy.ones((I,J))
    M[0,0], M[2,2], M[3,1] = 0, 0, 0
    R[0,1], R[0,2] = 2., 3.
    
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    iterations = 2
    
    BNMF = bnmf_vb_optimised(R,M,K,priors)
    BNMF.initialise()
    BNMF.run(iterations)
    
    for i,k in itertools.product(xrange(0,I),xrange(0,K)):
        assert BNMF.muU[i,k] != 1./lambdaU[i,k]
        assert BNMF.tauU[i,k] != 1.
        assert BNMF.expU[i,k] != numpy.inf and not math.isnan(BNMF.expU[i,k])
        assert BNMF.tauU[i,k] != numpy.inf and not math.isnan(BNMF.tauU[i,k])
    for j,k in itertools.product(xrange(0,J),xrange(0,K)):
        assert BNMF.muV[j,k] != 1./lambdaV[j,k]
        assert BNMF.tauV[j,k] != 1.
        assert BNMF.expV[j,k] != numpy.inf and not math.isnan(BNMF.expV[j,k])
        assert BNMF.tauV[j,k] != numpy.inf and not math.isnan(BNMF.tauV[j,k])
    assert BNMF.alpha_s != alpha
    assert BNMF.beta_s != beta
    assert BNMF.exptau != numpy.inf and not math.isnan(BNMF.exptau)
    assert BNMF.explogtau != numpy.inf and not math.isnan(BNMF.explogtau)
Пример #5
0
def test_update_exp_V():
    for k in range(0,K):
        BNMF = bnmf_vb_optimised(R,M,K,priors)
        BNMF.initialise() 
        BNMF.tauV = 4*numpy.ones((J,K)) # muV = [[1./3.]], tauV = [[4.]]
        BNMF.update_exp_V(k) #-mu*sqrt(tau) = -2./3., lambda(..) = 0.319448 / (1-0.2525) = 0.4273551839464883, gamma = 
        for j in range(0,J):        
            assert abs(BNMF.expV[j,k] - (1./3. + 1./2. * 0.4273551839464883)) < 0.00001
            assert abs(BNMF.varV[j,k] - 1./4.*(1. - 0.4675359092102624)) < 0.00001
Пример #6
0
def test_update_tau():
    BNMF = bnmf_vb_optimised(R,M,K,priors)
    BNMF.expU = 1./lambdaU #[[1./2.]]
    BNMF.expV = 1./lambdaV #[[1./3.]]
    BNMF.varU = numpy.ones((I,K))*2 #[[2.]]
    BNMF.varV = numpy.ones((J,K))*3 #[[3.]]
    BNMF.update_tau()
    assert BNMF.alpha_s == alpha + 12./2.
    assert BNMF.beta_s == beta + 172.66666666666666/2.
Пример #7
0
def test_exp_square_diff():
    BNMF = bnmf_vb_optimised(R,M,K,priors)
    BNMF.expU = 1./lambdaU #[[1./2.]]
    BNMF.expV = 1./lambdaV #[[1./3.]]
    BNMF.varU = numpy.ones((I,K))*2 #[[2.]]
    BNMF.varV = numpy.ones((J,K))*3 #[[3.]]
    # expU * expV.T = [[1./3.]]. (varU+expU^2)=2.25, (varV+expV^2)=3.+1./9.
    exp_square_diff = 172.66666666666666 #12.*(4./9.) + 12.*(2*(2.25*(3.+1./9.)-0.25/9.)) 
    assert BNMF.exp_square_diff() == exp_square_diff
Пример #8
0
def test_elbo():
    I,J,K = 5,3,2
    R = numpy.ones((I,J))
    M = numpy.ones((I,J))
    M[0,0], M[2,2], M[3,1] = 0, 0, 0 # size Omega = 12
    
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    expU = 5*numpy.ones((I,K))
    expV = 6*numpy.ones((J,K))
    varU = 11*numpy.ones((I,K))
    varV = 12*numpy.ones((J,K))
    exptau = 8.
    explogtau = 9.
    
    muU = 14*numpy.ones((I,K))
    muV = 15*numpy.ones((J,K))
    tauU = numpy.ones((I,K))/100.
    tauV = numpy.ones((J,K))/101.
    alpha_s = 20.
    beta_s = 21.
    
    # expU * expV = [[60]]
    # (R - expU*expV)^2 = 12*59^2 = 41772
    # Var[U*V] = 12*K*((11+5^2)*(12+6^2)-5^2*6^2) = 12*2*828 = 19872
    
    # -muU*sqrt(tauU) = -14*math.sqrt(100) = -1.4
    # -muV*sqrt(tauV) = -15*math.sqrt(101) = -1.4925557853149838
    # cdf(-1.4) = 0.080756659233771066
    # cdf(-1.4925557853149838) = 0.067776752211548219
    
    ELBO = 12./2.*(explogtau - math.log(2*math.pi)) - 8./2.*(41772+19872) \
         + 5*2*(math.log(2.) - 2.*5.) + 3*2*(math.log(3.) - 3.*6.) \
         + 3.*numpy.log(1.) - numpy.log(math.gamma(3.)) + 2.*9. - 1.*8. \
         - 20.*numpy.log(21.) + numpy.log(math.gamma(20.)) - 19.*9. + 21.*8. \
         - 0.5*5*2*math.log(1./100.) + 0.5*5*2*math.log(2*math.pi) + 5*2*math.log(1.-0.080756659233771066) \
         + 0.5*5*2*1./100.*(11.+81.) \
         - 0.5*3*2*math.log(1./101.) + 0.5*3*2*math.log(2*math.pi) + 3*2*math.log(1.-0.067776752211548219) \
         + 0.5*3*2*1./101.*(12.+81.)
         
    BNMF = bnmf_vb_optimised(R,M,K,priors)
    BNMF.expU = expU
    BNMF.expV = expV
    BNMF.varU = varU
    BNMF.varV = varV
    BNMF.exptau = exptau
    BNMF.explogtau = explogtau
    BNMF.muU = muU
    BNMF.muV = muV
    BNMF.tauU = tauU
    BNMF.tauV = tauV
    BNMF.alpha_s = alpha_s
    BNMF.beta_s = beta_s
    assert BNMF.elbo() == ELBO
Пример #9
0
def test_update_exp_U():
    for k in range(0,K):
        BNMF = bnmf_vb_optimised(R,M,K,priors)
        BNMF.initialise()
        BNMF.tauU = 4*numpy.ones((I,K)) # muU = [[0.5]], tauU = [[4.]]
        BNMF.update_exp_U(k) #-mu*sqrt(tau) = -0.5*2 = -1. lambda(1) = 0.241971 / (1-0.1587) = 0.2876155949126352. gamma = 0.37033832534958433
        for i in range(0,I):        
            assert abs(BNMF.expU[i,k] - (0.5 + 1./2. * 0.2876155949126352)) < 0.00001
            assert abs(BNMF.varU[i,k] - 1./4.*(1.-0.37033832534958433)) < 0.00001
Пример #10
0
def test_update_V():
    for k in range(0,K):
        BNMF = bnmf_vb_optimised(R,M,K,priors)
        BNMF.muV = numpy.zeros((J,K))
        BNMF.tauV = numpy.zeros((J,K))
        BNMF.expU = 1./lambdaU #[[1./2.]]
        BNMF.expV = 1./lambdaV #[[1./3.]]
        BNMF.varU = numpy.ones((I,K))*2 #[[2.]]
        BNMF.varV = numpy.ones((J,K))*3 #[[3.]]
        BNMF.exptau = 3.
        BNMF.update_V(k)
        for j in range(0,J):
            assert BNMF.tauV[j,k] == 3. * (M[:,j] * ( BNMF.expU[:,k]*BNMF.expU[:,k] + BNMF.varU[:,k] )).sum()
            assert BNMF.muV[j,k] == (1./(3. * (M[:,j] * ( BNMF.expU[:,k]*BNMF.expU[:,k] + BNMF.varU[:,k] )).sum())) * \
                                    ( -3. + BNMF.exptau * (M[:,j]*( (BNMF.R[:,j] - numpy.dot(BNMF.expU,BNMF.expV[j]) + BNMF.expU[:,k]*BNMF.expV[j,k])*BNMF.expU[:,k] )).sum() )
Пример #11
0
def test_update_U():
    for k in range(0,K):
        BNMF = bnmf_vb_optimised(R,M,K,priors)
        BNMF.muU = numpy.zeros((I,K))
        BNMF.tauU = numpy.zeros((I,K))
        BNMF.expU = 1./lambdaU #[[1./2.]]
        BNMF.expV = 1./lambdaV #[[1./3.]]
        BNMF.varU = numpy.ones((I,K))*2 #[[2.]]
        BNMF.varV = numpy.ones((J,K))*3 #[[3.]]
        BNMF.exptau = 3.
        BNMF.update_U(k)
        for i in range(0,I):
            assert BNMF.tauU[i,k] == 3. * (M[i] * ( BNMF.expV[:,k]*BNMF.expV[:,k] + BNMF.varV[:,k] )).sum()
            assert BNMF.muU[i,k] == (1./(3. * (M[i] * ( BNMF.expV[:,k]*BNMF.expV[:,k] + BNMF.varV[:,k] )).sum())) * \
                                    ( -2. + BNMF.exptau * (M[i]*( (BNMF.R[i] - numpy.dot(BNMF.expU[i],BNMF.expV.T) + BNMF.expU[i,k]*BNMF.expV[:,k])*BNMF.expV[:,k] )).sum() )
Пример #12
0
def test_compute_statistics():
    R = numpy.array([[1,2],[3,4]],dtype=float)
    M = numpy.array([[1,1],[0,1]])
    I, J, K = 2, 2, 3
    lambdaU = 2*numpy.ones((I,K))
    lambdaV = 3*numpy.ones((J,K))
    alpha, beta = 3, 1
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    BNMF = bnmf_vb_optimised(R,M,K,priors)
    
    R_pred = numpy.array([[500,550],[1220,1342]],dtype=float)
    M_pred = numpy.array([[0,0],[1,1]])
    
    MSE_pred = (1217**2 + 1338**2) / 2.0
    R2_pred = 1. - (1217**2+1338**2)/(0.5**2+0.5**2) #mean=3.5
    Rp_pred = 61. / ( math.sqrt(.5) * math.sqrt(7442.) ) #mean=3.5,var=0.5,mean_pred=1281,var_pred=7442,cov=61
    
    assert MSE_pred == BNMF.compute_MSE(M_pred,R,R_pred)
    assert R2_pred == BNMF.compute_R2(M_pred,R,R_pred)
    assert Rp_pred == BNMF.compute_Rp(M_pred,R,R_pred)
Пример #13
0
def test_update_exp_tau():
    BNMF = bnmf_vb_optimised(R,M,K,priors)
    BNMF.initialise()  
    assert abs(BNMF.exptau - (3+12./2.)/(1+35.4113198623/2.)) < 0.000000000001
    #assert abs(BNMF.exptau - 3./1.) < 0.000000000001
    assert abs(BNMF.explogtau - (2.1406414779556 - math.log(1+35.4113198623/2.))) < 0.000000000001
Пример #14
0
standardised = False  #standardised Sanger or unstandardised
no_folds = 5

iterations = 1000
init_UV = 'random'
I, J, K = 622, 139, 10

alpha, beta = 1., 1.
lambdaU = numpy.ones((I, K)) / 10.
lambdaV = numpy.ones((J, K)) / 10.
priors = {'alpha': alpha, 'beta': beta, 'lambdaU': lambdaU, 'lambdaV': lambdaV}

tauUV = {'tauU': numpy.ones((I, K)) * 1., 'tauV': numpy.ones((J, K)) * 1.}

# Load in data
(_, X_min, M, _, _, _, _) = load_Sanger(standardised=standardised)

# Run the Gibbs sampler
BNMF = bnmf_vb_optimised(X_min, M, K, priors)
BNMF.initialise(init_UV, tauUV=tauUV)
BNMF.run(iterations)

# Plot the tau expectation values to check convergence
plt.plot(BNMF.all_exp_tau)

# Print the performances across iterations (MSE)
print "all_performances = %s" % BNMF.all_performances['MSE']
'''
all_performances = [1776.7981612790977, 14.329927985632544, 3.0969563097499564, 3.0259470842729761, 2.9764629132181586, 2.9323817364734799, 2.8880781484273643, 2.8360910876208782, 2.7663596616581909, 2.6710272824953436, 2.5654623144929043, 2.4771123476189278, 2.4157245166427876, 2.3738284830943055, 2.3421778012568231, 2.3150833014458492, 2.2897410718171933, 2.2651466079403222, 2.2416777297673365, 2.2208062127846193, 2.2024936769294916, 2.1860562538008836, 2.1705868442090579, 2.155326018678104, 2.1400626344771951, 2.1248433345961533, 2.1098252734696197, 2.0951892704174933, 2.0810904812094075, 2.0676412048595476, 2.0549138668774125, 2.0429501126052467, 2.0317672635342778, 2.0213601421757059, 2.0117009518818603, 2.0027420477755706, 1.9944229181141191, 1.9866782510299879, 1.9794440570072849, 1.9726607215973608, 1.9662737210538157, 1.9602336924777901, 1.9544967333204868, 1.949024458989933, 1.9437827305884094, 1.9387398452462148, 1.9338661063776958, 1.9291343336263911, 1.9245202366871186, 1.9200025156181899, 1.9155628007583561, 1.9111855032998366, 1.9068576836341515, 1.9025689950361149, 1.8983116565469169, 1.894080389472486, 1.8898722908769876, 1.8856866496540909, 1.8815247234888706, 1.8773894924854249, 1.8732853912970906, 1.8692180095352346, 1.8651937606169986, 1.861219548846903, 1.8573024738382551, 1.8534495663127335, 1.8496674840323688, 1.8459621408244991, 1.8423384037439174, 1.8388000245238423, 1.8353497291242626, 1.8319891709269145, 1.8287187682463593, 1.8255377426104398, 1.8224443622343232, 1.8194361939572157, 1.8165102880102717, 1.8136633123974903, 1.8108916554327632, 1.8081915090961838, 1.8055589622706629, 1.8029901386596092, 1.8004813589240072, 1.7980292485874663, 1.7956307728629608, 1.7932832215869492, 1.7909841019167518, 1.7887309822282218, 1.7865214390850721, 1.7843531301757054, 1.7822238837020075, 1.7801317075221625, 1.7780746951358293, 1.7760508890582503, 1.7740582012394399, 1.7720944375093366, 1.7701574092387846, 1.7682450989116085, 1.7663558128577119, 1.7644882346599864, 1.7626413700632639, 1.7608144481126118, 1.7590068326700967, 1.757217971600449, 1.7554473860378226, 1.7536946736662631, 1.7519595002880317, 1.7502415798196882, 1.7485406583973639, 1.7468565165435852, 1.7451890021843339, 1.7435381033407173, 1.7419040223118303, 1.7402871338902501, 1.7386877630105337, 1.7371059518640226, 1.7355414485497513, 1.7339938816397535, 1.7324629539026888, 1.7309486293225049, 1.7294513452137719, 1.7279721080867798, 1.7265121307617137, 1.7250720834831563, 1.7236517660903723, 1.7222504548690254, 1.7208673534949201, 1.7195018047722692, 1.7181533051362317, 1.7168214365359593, 1.7155057957044588, 1.7142059575829875, 1.7129214722879136, 1.7116518793052977, 1.7103967248977492, 1.7091555758683366, 1.7079280280453542, 1.7067137101944823, 1.7055122845371418, 1.704323445123195, 1.7031469171983571, 1.701982467816797, 1.7008299454950411, 1.6996893378905424, 1.6985607656219928, 1.6974443638989212, 1.6963401557233546, 1.695248022600361, 1.6941677480515935, 1.6930990677314024, 1.6920416985344562, 1.6909953486774394, 1.6899597178003412, 1.6889344946880265, 1.687919356607271, 1.6869139709505665, 1.6859179988195072, 1.6849311015120589, 1.6839529526082142, 1.6829832569766692, 1.682021771102286, 1.6810683112583844, 1.6801227392622387, 1.6791849322239008, 1.6782547550024591, 1.6773320483337932, 1.6764166317244829, 1.6755083131169612, 1.6746068988484175, 1.6737122021545192, 1.6728240533578271, 1.6719423193380065, 1.6710669395116413, 1.6701979687835815, 1.6693355841538495, 1.668480004155672, 1.6676313396846592, 1.6667894881268193, 1.6659541609556405, 1.6651249985598486, 1.6643016669407555, 1.6634838951969351, 1.6626714729031271, 1.6618642342351408, 1.6610620428039671, 1.6602647810728732, 1.6594723440386245, 1.6586846358165885, 1.6579015678176028, 1.6571230575157103, 1.6563490271773598, 1.6555794023124371, 1.6548141099878553, 1.6540530774681883, 1.6532962318820807, 1.652543501677266, 1.6517948203916966, 1.6510501325805875, 1.6503094006537682, 1.6495726103682362, 1.6488397725789465, 1.6481109200175745, 1.6473860998794942, 1.6466653648916036, 1.6459487672624034, 1.6452363635315634, 1.6445282475270497, 1.6438246351445287, 1.6431259577116146, 1.6424327583572464, 1.6417453644357376, 1.6410637749858672, 1.6403878635444531, 1.6397175455374853, 1.6390528050933872, 1.6383936683349378, 1.6377401787496844, 1.6370923868170029, 1.6364503489562618, 1.6358141287184791, 1.6351837968069529, 1.6345594296717105, 1.6339411075035113, 1.6333289123429053, 1.6327229266251408, 1.6321232321817927, 1.6315299095637763, 1.6309430375006417, 1.6303626923308152, 1.6297889473054692, 1.6292218717608236, 1.628661530241565, 1.6281079817118638, 1.6275612789909042, 1.6270214684995925, 1.6264885903204882, 1.6259626784707113, 1.6254437611974661, 1.624931861059145, 1.6244269945891148, 1.6239291714759436, 1.6234383934327183, 1.6229546532091041, 1.6224779343891413, 1.6220082125234359, 1.6215454576343735, 1.6210896373313848, 1.6206407191483385, 1.6201986708209672, 1.619763458189672, 1.6193350416483561, 1.61891337269254, 1.6184983918086242, 1.6180900281019912, 1.6176882003272322, 1.617292818677587, 1.6169037867511062, 1.6165210033214428, 1.6161443637460762, 1.6157737609801266, 1.6154090862388353, 1.6150502293828446, 1.6146970791145596, 1.614349523069649, 1.6140074478780622, 1.6136707392475504, 1.613339282099179, 1.6130129607611543, 1.61269165921199, 1.612375261357667, 1.6120636513286555, 1.6117567137903421, 1.6114543342664627, 1.6111563994819684, 1.6108627977288821, 1.6105734192531413, 1.6102881566453906, 1.6100069052046051, 1.6097295632287008, 1.6094560321860372, 1.6091862167324085, 1.6089200245650519, 1.6086573661361367, 1.60839815427515, 1.6081423037784532, 1.6078897310170168, 1.6076403535939114, 1.607394090064342, 1.6071508597271433, 1.6069105825107375, 1.6066731790063229, 1.6064385707263584, 1.6062066806589141, 1.6059774341182935, 1.605750759759982, 1.6055265904855955, 1.6053048639062018, 1.6050855221349805, 1.6048685109194947, 1.6046537783738981, 1.604441273700556, 1.6042309462531894, 1.6040227451441085, 1.603816619426863, 1.6036125187338102, 1.6034103941299567, 1.6032101988747118, 1.6030118888025269, 1.6028154221824091, 1.6026207591426203, 1.6024278609361167, 1.6022366893553115, 1.6020472064975289, 1.601859374909157, 1.6016731579994805, 1.601488520543676, 1.6013054290882234, 1.6011238521233238, 1.6009437599687983, 1.600765124417495, 1.600587918248108, 1.600412114745845, 1.600237687342547, 1.6000646094337461, 1.5998928543694797, 1.5997223955732627, 1.5995532067265354, 1.5993852619563937, 1.5992185359827389, 1.5990530041994384, 1.5988886426849995, 1.598725428155608, 1.5985633378828199, 1.5984023496040805, 1.5982424414511158, 1.598083591914079, 1.5979257798497808, 1.597768984531118, 1.5976131857274221, 1.5974583637991868, 1.5973044997893109, 1.5971515754949956, 1.5969995735072411, 1.5968484772144274, 1.5966982707716468, 1.5965489390486343, 1.5964004675733368, 1.5962528424920748, 1.5961060505624483, 1.5959600791880031, 1.5958149164865851, 1.595670551371642, 1.595526973614078, 1.595384173853843, 1.5952421435431665, 1.5951008748238411, 1.5949603603604443, 1.5948205931607662, 1.5946815664109995, 1.5945432733404674, 1.5944057071198638, 1.5942688607895521, 1.5941327272139905, 1.5939972990617635, 1.5938625688083319, 1.5937285287569243, 1.5935951710665901, 1.5934624877736809, 1.5933304707947857, 1.5931991119048208, 1.5930684026939028, 1.5929383345135324, 1.5928088984258191, 1.5926800851683485, 1.592551885141688, 1.5924242884255631, 1.5922972848323282, 1.5921708640135168, 1.5920450156395547, 1.5919197296691072, 1.5917949966989553, 1.5916708083458713, 1.5915471575672613, 1.5914240387948146, 1.5913014477604488, 1.5911793809417727, 1.5910578346487199, 1.5909368039041851, 1.5908162814342381, 1.5906962572447001, 1.5905767193281468, 1.5904576558273671, 1.590339058306999, 1.5902209248147632, 1.5901032608727699, 1.589986077264478, 1.589869385334036, 1.5897531920785268, 1.589637497264476, 1.589522293343846, 1.5894075674152492, 1.5892933038715924, 1.5891794866711311, 1.5890661007982401, 1.5889531329708282, 1.5888405718627785, 1.5887284080985562, 1.5886166341630801, 1.5885052442503784, 1.5883942340097645, 1.5882836001651885, 1.5881733400546618, 1.5880634512036378, 1.5879539310522306, 1.5878447768935087, 1.5877359859919005, 1.5876275557903763, 1.5875194841059697, 1.5874117692472887, 1.5873044100333595, 1.5871974057294038, 1.5870907559352565, 1.5869844604630412, 1.5868785192338235, 1.5867729322097441, 1.5866676993667967, 1.5865628207031683, 1.5864582962693035, 1.5863541262041962, 1.5862503107600423, 1.586146850303819, 1.5860437452899756, 1.5859409962090032, 1.5858386035236616, 1.5857365676082231, 1.5856348887047482, 1.5855335669045476, 1.5854326021564171, 1.5853319942957742, 1.5852317430866114, 1.5851318482671599, 1.5850323095914856, 1.5849331268634863, 1.5848342999612863, 1.58473582885443, 1.5846377136165106, 1.5845399544372365, 1.5844425516382423, 1.5843455056954434, 1.584248817270965, 1.5841524872570407, 1.5840565168341125, 1.5839609075462771, 1.5838656613966271, 1.5837707809618664, 1.5836762695222057, 1.5835821311921641, 1.5834883710307113, 1.5833949950994608, 1.5833020104360858, 1.5832094249132096, 1.5831172469672836, 1.5830254852010053, 1.5829341478845578, 1.5828432424025329, 1.5827527747077239, 1.5826627488563887, 1.5825731667100575, 1.5824840278947232, 1.5823953301002023, 1.5823070697578088, 1.5822192430452766, 1.5821318470447168, 1.5820448807784186, 1.5819583458374582, 1.581872246432068, 1.5817865888832265, 1.5817013807485014, 1.5816166298465908, 1.5815323434062762, 1.5814485274638361, 1.5813651865318217, 1.5812823234980398, 1.5811999396911065, 1.5811180350562815, 1.581036608401946, 1.5809556576952177, 1.5808751803917904, 1.5807951737847385, 1.5807156353459784, 1.5806365630242847, 1.5805579554575224, 1.5804798120655776, 1.5804021330130666, 1.5803249190620481, 1.5802481713636543, 1.580171891248231, 1.5800960800662665, 1.5800207391081529, 1.579945869601824, 1.5798714727661847, 1.5797975498866745, 1.5797241023817703, 1.5796511318351338, 1.5795786399809852, 1.5795066286398445, 1.5794350996105631, 1.5793640545322107, 1.5792934947319928, 1.5792234210759251, 1.579153833837101, 1.579084732592408, 1.5790161161558918, 1.5789479825546673, 1.5788803290475097, 1.5788131521863722, 1.5787464479151663, 1.5786802117026997, 1.578614438704627, 1.5785491239421627, 1.5784842624607356, 1.5784198493969852, 1.5783558798601023, 1.5782923485656575, 1.5782292492642411, 1.5781665741427147, 1.5781043134484636, 1.5780424555367676, 1.5779809873813311, 1.5779198954192624, 1.5778591665090269, 1.5777987887880474, 1.5777387522946162, 1.5776790493062562, 1.5776196744178317, 1.5775606244170375, 1.5775018980248694, 1.5774434955625354, 1.5773854185955003, 1.5773276695917318, 1.5772702516214137, 1.5772131681142938, 1.5771564226834371, 1.5771000190143163, 1.5770439608111106, 1.5769882517838218, 1.5769328956534312, 1.5768778961472407, 1.5768232569580805, 1.5767689816467081, 1.576715073481231, 1.5766615352261475, 1.5766083689101789, 1.5765555756134859, 1.576503155313123, 1.5764511068145985, 1.5763994277771982, 1.5763481148227858, 1.5762971637043495, 1.576246569504034, 1.5761963268341166, 1.5761464300196566, 1.5760968732512926, 1.5760476507038939, 1.5759987566217424, 1.5759501853755566, 1.575901931496664, 1.575853989694094, 1.5758063548599595, 1.5757590220667306, 1.575711986559613, 1.5756652437459671, 1.5756187891833959, 1.5755726185675374, 1.5755267277200908, 1.5754811125778867, 1.575435769183505, 1.5753906936776145, 1.575345882293846, 1.5753013313557378, 1.5752570372768153, 1.5752129965635762, 1.5751692058206417, 1.5751256617587968, 1.5750823612039517, 1.5750393011077777, 1.5749964785573694, 1.5749538907839986, 1.5749115351691589, 1.574869409247609, 1.5748275107062994, 1.5747858373796171, 1.5747443872408045, 1.5747031583908735, 1.5746621490455173, 1.5746213575215087, 1.5745807822234448, 1.5745404216319421, 1.5745002742934946, 1.574460338812002, 1.5744206138421195, 1.5743810980835717, 1.5743417902756087, 1.5743026891917824, 1.5742637936334107, 1.5742251024221108, 1.5741866143911687, 1.5741483283756488, 1.5741102432018594, 1.5740723576765703, 1.5740346705764741, 1.5739971806391353, 1.5739598865555127, 1.573922786965108, 1.5738858804542586, 1.5738491655576774, 1.5738126407635233, 1.5737763045220363, 1.5737401552564563, 1.5737041913759617, 1.5736684112890038, 1.5736328134160307, 1.5735973961999599, 1.5735621581141368, 1.5735270976682658, 1.5734922134132145, 1.5734575039473693, 1.5734229679264335, 1.573388604078874, 1.5733544112245799, 1.5733203882933424, 1.5732865343348363, 1.5732528485120407, 1.5732193300721429, 1.5731859782964568, 1.5731527924380544, 1.5731197716633085, 1.5730869150159696, 1.5730542214180645, 1.5730216897131439, 1.5729893187468138, 1.5729571074693529, 1.5729250550396818, 1.5728931609096712, 1.5728614248708266, 1.5728298470552007, 1.5727984278901284, 1.5727671680148676, 1.5727360681732181, 1.5727051290972929, 1.5726743513965313, 1.5726437354623692, 1.5726132813953169, 1.5725829889577236, 1.5725528575517969, 1.5725228862216052, 1.5724930736752711, 1.5724634183225197, 1.5724339183233635, 1.5724045716419097, 1.5723753761010799, 1.572346329434831, 1.5723174293339408, 1.5722886734850914, 1.572260059601178, 1.5722315854437738, 1.5722032488382105, 1.5721750476822391, 1.5721469799497751, 1.5721190436908556, 1.5720912370294164, 1.5720635581594549, 1.572036005341312, 1.5720085768984342, 1.5719812712152252, 1.5719540867366384, 1.571927021969431, 1.5719000754853476, 1.5718732459255491, 1.5718465320063371, 1.5718199325248858, 1.5717934463647909, 1.5717670724998569, 1.5717408099958283, 1.571714658009024, 1.5716886157812027, 1.5716626826309334, 1.5716368579413766, 1.5716111411447204, 1.5715855317049887, 1.5715600290991829, 1.5715346327989703, 1.5715093422535176, 1.5714841568745495, 1.5714590760247311, 1.5714340990100133, 1.5714092250759828, 1.5713844534090151, 1.5713597831413602, 1.5713352133604994, 1.5713107431218631, 1.5712863714639578, 1.5712620974252016, 1.5712379200613493, 1.5712138384614014, 1.5711898517619731, 1.5711659591578397, 1.5711421599086512, 1.5711184533406892, 1.5710948388443478, 1.571071315867123, 1.5710478839036328, 1.5710245424831497, 1.571001291156469, 1.5709781294827108, 1.5709550570173929, 1.570932073302421, 1.5709091778581137, 1.570886370177788, 1.5708636497244497, 1.5708410159296946, 1.5708184681938486, 1.5707960058878869, 1.5707736283558251, 1.5707513349181019, 1.5707291248748527, 1.5707069975095436, 1.5706849520927724, 1.5706629878853, 1.5706411041416193, 1.5706193001126478, 1.5705975750485577, 1.5705759282011369, 1.5705543588261366, 1.5705328661849558, 1.5705114495467316, 1.5704901081899039, 1.5704688414037102, 1.5704476484895624, 1.5704265287625496, 1.5704054815524811, 1.5703845062053576, 1.5703636020843641, 1.5703427685710121, 1.5703220050663089, 1.5703013109914932, 1.570280685789001, 1.5702601289231093, 1.5702396398805176, 1.5702192181705708, 1.5701988633254256, 1.570178574899985, 1.5701583524713949, 1.5701381956385012, 1.5701181040209105, 1.5700980772580271, 1.5700781150075396, 1.5700582169441251, 1.570038382757698, 1.5700186121517856, 1.5699989048416758, 1.5699792605526846, 1.5699596790183237, 1.5699401599786327, 1.5699207031786591, 1.569901308366785, 1.5698819752935722, 1.5698627037105675, 1.5698434933692527, 1.5698243440206217, 1.5698052554144328, 1.5697862272991934, 1.5697672594223238, 1.5697483515305086, 1.5697295033706264, 1.5697107146907703, 1.5696919852418325, 1.5696733147790407, 1.5696547030645223, 1.5696361498691409, 1.569617654975056, 1.569599218178513, 1.5695808392920441, 1.5695625181469302, 1.5695442545952245, 1.5695260485115703, 1.5695078997940468, 1.5694898083650521, 1.5694717741710169, 1.569453797181745, 1.5694358773889221, 1.5694180148039483, 1.5694002094554362, 1.5693824613860343, 1.5693647706489191, 1.5693471373043004, 1.5693295614155955, 1.569312043045948, 1.56929458225473, 1.5692771790944842, 1.569259833608291, 1.5692425458276107, 1.569225315770326, 1.569208143439796, 1.5691910288239188, 1.569173971895079, 1.5691569726101133, 1.5691400309109429, 1.5691231467253153, 1.5691063199679383, 1.5690895505418436, 1.569072838339342, 1.5690561832440211, 1.5690395851315764, 1.5690230438717372, 1.5690065593291973, 1.5689901313650543, 1.5689737598379645, 1.5689574446049688, 1.5689411855225914, 1.5689249824472686, 1.5689088352362817, 1.5688927437478102, 1.5688767078415704, 1.5688607273789614, 1.568844802223005, 1.5688289322385474, 1.5688131172920221, 1.5687973572514708, 1.5687816519861486, 1.5687660013664633, 1.568750405263543, 1.5687348635490468, 1.5687193760948359, 1.5687039427725586, 1.5686885634535577, 1.5686732380085147, 1.5686579663072737, 1.5686427482187537, 1.5686275836110197, 1.568612472351171, 1.5685974143058758, 1.5685824093417531, 1.5685674573260442, 1.5685525581275603, 1.5685377116180972, 1.5685229176740243, 1.5685081761781807, 1.5684934870225489, 1.5684788501111373, 1.568464265363307, 1.5684497327179863, 1.5684352521380274, 1.5684208236155093, 1.5684064471776611, 1.5683921228933078, 1.568377850880567, 1.5683636313150651, 1.5683494644392955, 1.5683353505733748, 1.5683212901266737, 1.5683072836109768, 1.5682933316549017, 1.568279435019295, 1.5682655946141608, 1.5682518115155979, 1.5682380869832939, 1.5682244224772741, 1.5682108196729128, 1.568197280473971, 1.5681838070208507, 1.5681704016947409, 1.5681570671143303, 1.5681438061257145, 1.5681306217830644, 1.5681175173206461, 1.568104496115216, 1.5680915616402333, 1.5680787174121396, 1.5680659669316408, 1.5680533136209975, 1.5680407607616804, 1.568028311433429, 1.5680159684592907, 1.5680037343574926, 1.5679916113030656, 1.5679796011001081, 1.5679677051648027, 1.5679559245195174, 1.567944259797204, 1.5679327112542651, 1.5679212787914385, 1.5679099619800336, 1.5678987600929477, 1.5678876721383612, 1.5678766968950644, 1.5678658329485293, 1.5678550787263541, 1.5678444325327814, 1.5678338925812183, 1.5678234570246812, 1.567813123983073, 1.5678028915671449, 1.5677927578987345, 1.5677827211270097, 1.5677727794404599, 1.5677629310750365, 1.5677531743182136, 1.5677435075101684, 1.5677339290422396, 1.5677244373534898, 1.5677150309265879, 1.5677057082830272, 1.5676964679790899, 1.5676873086025345, 1.5676782287703166, 1.567669227127477, 1.5676603023473747, 1.5676514531326753, 1.5676426782172246, 1.5676339763682836, 1.5676253463889394, 1.56761678712073, 1.5676082974456016, 1.5675998762880177, 1.5675915226162467, 1.5675832354432031, 1.5675750138271201, 1.5675668568712384, 1.5675587637235062, 1.5675507335757799, 1.5675427656626721, 1.567534859260209, 1.567527013684483, 1.5675192282900214, 1.5675115024681954, 1.5675038356455635, 1.5674962272823627, 1.5674886768708209, 1.5674811839336946, 1.5674737480228627, 1.5674663687176624, 1.5674590456236985, 1.5674517783713673, 1.5674445666146448, 1.5674374100296808, 1.5674303083134835, 1.567423261182894, 1.5674162683731254, 1.5674093296366953, 1.5674024447421575, 1.5673956134730593, 1.5673888356268337, 1.5673821110134656, 1.5673754394548041, 1.5673688207832177, 1.5673622548408652, 1.5673557414785726, 1.567349280554982, 1.5673428719357569, 1.5673365154927208, 1.5673302111030118, 1.5673239586484973, 1.5673177580149191, 1.5673116090914601, 1.5673055117699848, 1.5672994659444746, 1.5672934715107025, 1.5672875283655696, 1.5672816364067896, 1.567275795532576, 1.5672700056410303, 1.5672642666303531, 1.5672585783979858, 1.5672529408409472, 1.5672473538552558, 1.5672418173360918, 1.567236331177392, 1.5672308952720813, 1.5672255095117522, 1.5672201737867757, 1.5672148879862531, 1.5672096519980323, 1.5672044657086766, 1.5671993290036024, 1.5671942417669604, 1.5671892038818336, 1.5671842152301358, 1.5671792756927716, 1.5671743851496192, 1.5671695434796016, 1.5671647505605655, 1.5671600062695197, 1.5671553104825351, 1.5671506630746956, 1.5671460639201216, 1.5671415128920885, 1.5671370098627719, 1.5671325547032997, 1.5671281472837599]
'''
Пример #15
0
    all_R.append(R)
    
    
# We now run the VB algorithm on each of the M's for each noise ratio    
all_performances = {metric:[] for metric in metrics} 
average_performances = {metric:[] for metric in metrics} # averaged over repeats
for (noise,R,Ms,Ms_test) in zip(noise_ratios,all_R,all_Ms,all_Ms_test):
    print "Trying noise ratio %s." % noise
    
    # Run the algorithm <repeats> times and store all the performances
    for metric in metrics:
        all_performances[metric].append([])
    for (repeat,M,M_test) in zip(range(0,repeats),Ms,Ms_test):
        print "Repeat %s of noise ratio %s." % (repeat+1, noise)
    
        BNMF = bnmf_vb_optimised(R,M,K,priors)
        BNMF.initialise(init_UV)
        BNMF.run(iterations)
    
        # Measure the performances
        performances = BNMF.predict(M_test)
        for metric in metrics:
            # Add this metric's performance to the list of <repeat> performances for this noise ratio
            all_performances[metric][-1].append(performances[metric])
            
    # Compute the average across attempts
    for metric in metrics:
        average_performances[metric].append(sum(all_performances[metric][-1])/repeats)
    

    
Пример #16
0
def test_init():
    # Test getting an exception when R and M are different sizes, and when R is not a 2D array.
    R1 = numpy.ones(3)
    M = numpy.ones((2,3))
    I,J,K = 5,3,1
    lambdaU = numpy.ones((I,K))
    lambdaV = numpy.ones((J,K))
    alpha, beta = 3, 1    
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    with pytest.raises(AssertionError) as error:
        bnmf_vb_optimised(R1,M,K,priors)
    assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 1-dimensional."
    
    R2 = numpy.ones((4,3,2))
    with pytest.raises(AssertionError) as error:
        bnmf_vb_optimised(R2,M,K,priors)
    assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 3-dimensional."
    
    R3 = numpy.ones((3,2))
    with pytest.raises(AssertionError) as error:
        bnmf_vb_optimised(R3,M,K,priors)
    assert str(error.value) == "Input matrix R is not of the same size as the indicator matrix M: (3, 2) and (2, 3) respectively."
    
    # Similarly for lambdaU, lambdaV
    R4 = numpy.ones((2,3))
    lambdaU = numpy.ones((2+1,1))
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    with pytest.raises(AssertionError) as error:
        bnmf_vb_optimised(R4,M,K,priors)
    assert str(error.value) == "Prior matrix lambdaU has the wrong shape: (3, 1) instead of (2, 1)."
    
    lambdaU = numpy.ones((2,1))
    lambdaV = numpy.ones((3+1,1))
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    with pytest.raises(AssertionError) as error:
        bnmf_vb_optimised(R4,M,K,priors)
    assert str(error.value) == "Prior matrix lambdaV has the wrong shape: (4, 1) instead of (3, 1)."
    
    # Test getting an exception if a row or column is entirely unknown
    lambdaU = numpy.ones((2,1))
    lambdaV = numpy.ones((3,1))
    M1 = [[1,1,1],[0,0,0]]
    M2 = [[1,1,0],[1,0,0]]
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    
    with pytest.raises(AssertionError) as error:
        bnmf_vb_optimised(R4,M1,K,priors)
    assert str(error.value) == "Fully unobserved row in R, row 1."
    with pytest.raises(AssertionError) as error:
        bnmf_vb_optimised(R4,M2,K,priors)
    assert str(error.value) == "Fully unobserved column in R, column 2."
    
    # Finally, a successful case
    I,J,K = 3,2,2
    R5 = 2*numpy.ones((I,J))
    lambdaU = numpy.ones((I,K))
    lambdaV = numpy.ones((J,K))
    M = numpy.ones((I,J))
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    BNMF = bnmf_vb_optimised(R5,M,K,priors)
    
    assert numpy.array_equal(BNMF.R,R5)
    assert numpy.array_equal(BNMF.M,M)
    assert BNMF.I == I
    assert BNMF.J == J
    assert BNMF.K == K
    assert BNMF.size_Omega == I*J
    assert BNMF.alpha == alpha
    assert BNMF.beta == beta
    assert numpy.array_equal(BNMF.lambdaU,lambdaU)
    assert numpy.array_equal(BNMF.lambdaV,lambdaV)
    
    # And when lambdaU and lambdaV are integers    
    I,J,K = 3,2,2
    R5 = 2*numpy.ones((I,J))
    lambdaU = 3.
    lambdaV = 4.
    M = numpy.ones((I,J))
    priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
    BNMF = bnmf_vb_optimised(R5,M,K,priors)
    
    assert numpy.array_equal(BNMF.R,R5)
    assert numpy.array_equal(BNMF.M,M)
    assert BNMF.I == I
    assert BNMF.J == J
    assert BNMF.K == K
    assert BNMF.size_Omega == I*J
    assert BNMF.alpha == alpha
    assert BNMF.beta == beta
    assert numpy.array_equal(BNMF.lambdaU,lambdaU*numpy.ones((I,K)))
    assert numpy.array_equal(BNMF.lambdaV,lambdaV*numpy.ones((J,K)))
Пример #17
0
from BNMTF.code.bnmf_vb_optimised import bnmf_vb_optimised
from BNMTF.drug_sensitivity.experiments_gdsc.load_data import load_Sanger

import numpy, matplotlib.pyplot as plt

##########

standardised = False #standardised Sanger or unstandardised

iterations = 1000
init_UV = 'random'
I, J, K = 622,138,10

alpha, beta = 1., 1.
lambdaU = numpy.ones((I,K))/10.
lambdaV = numpy.ones((J,K))/10.
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }

# Load in data
(_,X_min,M,_,_,_,_) = load_Sanger(standardised=standardised)

# Run the Gibbs sampler
BNMF = bnmf_vb_optimised(X_min,M,K,priors)
BNMF.initialise(init_UV)
BNMF.run(iterations)

# Plot the tau expectation values to check convergence
plt.plot(BNMF.all_exp_tau)

# Print the performances across iterations (MSE)
print "all_performances = %s" % BNMF.all_performances['MSE']
Пример #18
0
lambdaV = numpy.ones((J,K))/10.
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }

# Load in data
(_,R,M,_,_,_,_) = load_Sanger(standardised=standardised)


# Run the VB algorithm, <repeats> times
times_repeats = []
performances_repeats = []
for i in range(0,repeats):
    # Set all the seeds
    numpy.random.seed(0)
    
    # Run the classifier
    BNMF = bnmf_vb_optimised(R,M,K,priors) 
    BNMF.initialise(init_UV)
    BNMF.run(iterations)

    # Extract the performances and timestamps across all iterations
    times_repeats.append(BNMF.all_times)
    performances_repeats.append(BNMF.all_performances)

# Check whether seed worked: all performances should be the same
assert all(numpy.array_equal(performances, performances_repeats[0]) for performances in performances_repeats), \
    "Seed went wrong - performances not the same across repeats!"

# Print out the performances, and the average times
vb_all_times_average = list(numpy.average(times_repeats, axis=0))
vb_all_performances = performances_repeats[0]
print "vb_all_times_average = %s" % vb_all_times_average