Exemple #1
0
def MM_E_step(x, K, opts, tmp_mu, tmp_v, tmp_PI, xpos, xneg):
    #PS=np.zeros([K,size(x)])
    #D=np.zeros([K,size(x)]) # storages probability of samples wrt distributions
    PS = np.zeros([K, x.shape[0]])
    D = np.zeros([K, x.shape[0]
                  ])  # storages probability of samples wrt distributions

    tmp_a = np.zeros(
        K)  #it will remain zero for non-gamma or inv gamma distributions
    tmp_b = np.zeros(
        K)  #it will remain zero for non-gamma or inv gamma distributions
    for k in range(K):
        if opts['Components_Model'][k] == 'Gauss':
            Nobj = scipy.stats.norm(tmp_mu[k], np.power(tmp_v[k], 0.5))
            PS[k, :] = Nobj.pdf(x)
        elif opts['Components_Model'][k] == 'Gamma':
            tmp_a[k] = alb.alphaGm(tmp_mu[k], tmp_v[k])
            tmp_b[k] = alb.betaGm(tmp_mu[k], tmp_v[k])
            PS[k, :] = alb.gam_self(x, tmp_a[k], tmp_b[k])
            PS[k, xneg] = 0
        elif opts['Components_Model'][k] == '-Gamma':
            tmp_a[k] = alb.alphaGm(-1 * tmp_mu[k], tmp_v[k])
            tmp_b[k] = alb.betaGm(-1 * tmp_mu[k], tmp_v[k])
            PS[k, :] = alb.gam_self(-1 * x, tmp_a[k], tmp_b[k])
            PS[k, xpos] = 0
        elif opts['Components_Model'][k] == 'InvGamma':
            tmp_a[k] = alb.alphaIG(tmp_mu[k], tmp_v[k])
            tmp_b[k] = alb.betaIG(tmp_mu[k], tmp_v[k])
            PS[k, :] = alb.invgam(x, tmp_a[k], tmp_b[k])
            PS[k, xneg] = 0
        elif opts['Components_Model'][k] == '-InvGamma':
            tmp_a[k] = alb.alphaIG(-1 * tmp_mu[k], tmp_v[k])
            tmp_b[k] = alb.betaIG(-1 * tmp_mu[k], tmp_v[k])
            PS[k, :] = alb.invgam(-1 * x, tmp_a[k], tmp_b[k])
            PS[k, xpos] = 0
        elif opts['Components_Model'][k] == 'Beta':
            tmp_a[k] = alb.a_beta_distr(tmp_mu[k], tmp_v[k])
            tmp_b[k] = alb.b_beta_distr(tmp_mu[k], tmp_v[k])
            PS[k, :] = scipy.stats.beta.pdf(x, tmp_a[k], tmp_b[k])

    PS[np.isnan(PS)] = 0
    PS[np.isinf(PS)] = 0
    D = np.multiply(PS, np.matrix(tmp_PI).T)
    resp = np.divide(D, np.matrix(np.sum(D, 0)))
    N = np.sum(resp, 1)
    tmp_PI = np.divide(N, np.sum(resp)).T
    if 0:
        dum = np.add(np.log(PS), np.log(tmp_PI).T)
        dum[np.isinf(dum)] = 0
        dum[np.isinf(dum)] = 0
        Exp_lik = np.sum(np.multiply(resp, dum))
    else:
        dum = np.multiply(tmp_PI.T, PS)  #add(np.log(PS),np.log(tmp_PI).T)
        dum[np.isinf(dum)] = 1
        dum[np.isinf(dum)] = 1
        dum[dum == 0] = 1
        Exp_lik = np.sum(np.log(dum))

    return PS, resp, tmp_PI, N, Exp_lik


# Plot the resulting fit on a histogram of the data
import numpy as np
from alb_MM_functions import invgam
from alb_MM_functions import gam
from scipy.stats import norm

my_range=np.linspace(0.001,10,10000)

#plt0=np.multiply( Model['Mixing Prop.'][0],norm.pdf(my_range,Model['mu1'][0],np.sqrt(np.divide(1,Model['tau1s'][0]))  ) )
dist_plt=np.zeros([Number_of_Components,10000])
for k in range(Number_of_Components):
    if Components_Model[k]=='InvGamma':
        dist_plt[k,:]=np.multiply( Model['Mixing Prop.'][k],invgam(my_range,Model['shapes'][k],Model['scales'][k]))
    elif Components_Model[k]=='Gamma':
        dist_plt[k,:]=np.multiply( Model['Mixing Prop.'][k],gam(my_range,Model['shapes'][k],np.divide(1,Model['rates'][k])))
    #elif Components_Model[k]=='-InvGamma':
    #    dist_plt[k,:]=np.multiply( Model['Mixing Prop.'][k],invgam(-my_range,Model['shapes'][k],Model['scales'][k]))
    #elif Components_Model[2]=='-Gamma':
    #    dist_plt[k,:]=np.multiply( Model['Mixing Prop.'][k],gam(-my_range,Model['shapes'][k],np.divide(1,Model['rates'][k])))
        
#plt[:,my_range>0]=0
full_fit=np.sum(dist_plt,0)
        
    


import matplotlib.pyplot as plt
plt.hist(data_vector,bins=50,density=True,alpha=1, color='g')
import numpy as np
from alb_MM_functions import invgam
from alb_MM_functions import gam
from scipy.stats import norm

my_range = np.linspace(-10, 10, 10000)

plt0 = np.multiply(
    Model['Mixing Prop.'][0],
    norm.pdf(my_range, Model['mu1'][0],
             np.sqrt(np.divide(1, Model['tau1s'][0]))))

if Components_Model[1] == 'InvGamma':
    plt1 = np.multiply(
        Model['Mixing Prop.'][1],
        invgam(my_range, Model['shapes'][1], Model['scales'][1]))
elif Components_Model[1] == 'Gamma':
    plt1 = np.multiply(
        Model['Mixing Prop.'][1],
        gam(my_range, Model['shapes'][1], np.divide(1, Model['rates'][1])))

plt1[my_range < 0] = 0

if Components_Model[2] == '-InvGamma':
    plt2 = np.multiply(
        Model['Mixing Prop.'][2],
        invgam(-my_range, Model['shapes'][2], Model['scales'][2]))
elif Components_Model[2] == '-Gamma':
    plt2 = np.multiply(
        Model['Mixing Prop.'][2],
        gam(-my_range, Model['shapes'][2], np.divide(1, Model['rates'][2])))
T = 10000
my_range = np.linspace(-10, 10, T)
PLTS = np.zeros([Number_of_Components, T])

for k in range(Number_of_Components):
    if Components_Model[k] == 'Gauss':
        PLTS[k, :] = np.multiply(
            Model['Mixing Prop.'][k],
            norm.pdf(my_range, Model['mu1'][k],
                     np.sqrt(np.divide(1, Model['taus1'][k]))))

    elif Components_Model[k] == 'InvGamma':
        PLTS[k, :] = np.multiply(
            Model['Mixing Prop.'][k],
            invgam(my_range, Model['shapes'][k], Model['scales'][k]))
        PLTS[k, my_range < 0] = 0

    elif Components_Model[k] == 'Gamma':
        PLTS[k, :] = np.multiply(
            Model['Mixing Prop.'][k],
            gam(my_range, Model['shapes'][k], np.divide(1, Model['rates'][k])))
        PLTS[k, my_range < 0] = 0

    elif Components_Model[2] == '-InvGamma':
        PLTS[k, :] = np.multiply(
            Model['Mixing Prop.'][k],
            invgam(-my_range, Model['shapes'][k], Model['scales'][k]))
        PLTS[k, my_range > 0] = 0

    elif Components_Model[2] == '-Gamma':
Exemple #5
0
def GaussGammas_Connectome_thresholding_pFDR(input_file, toolbox_path):

    #Add the toolbox to path
    sys.path.append(os.path.join(os.path.abspath(toolbox_path)))
    from Mixture_Model_1Dim import Mixture_Model_1Dim

    #load input conenctivity matrix
    #connectivity_matrix = np.loadtxt(input_file, delimiter=',')#, skiprows=1,skipcolumns=1)
    connectivity_matrix = np.genfromtxt(input_file, delimiter=',')

    #get updiagonal terms
    updiag_idx = np.triu_indices_from(connectivity_matrix, k=1)
    orig_data_vector = connectivity_matrix[updiag_idx]
    orig_data_vector = orig_data_vector[
        ~np.isnan(orig_data_vector
                  )]  #data_vector=orig_data_vector[orig_data_vector>0.05]

    #demean and divide for std to allow easy initialization
    mean_factor = np.mean(orig_data_vector)
    scaling_factor = 1.  #np.std(orig_data_vector)
    data_vector = np.divide(orig_data_vector - mean_factor, scaling_factor)

    #Define options for the mixture model fit
    Inference = 'Variational Bayes'  #'Method of moments'#'Variational Bayes' #'Variational Bayes'  #'Method of moments' OR 'Maximum Likelihood' OR 'Variational Bayes' ML NOT INCLUDED YET
    Number_of_Components = 3
    Components_Model = [
        'Gauss', 'InvGamma', '-InvGamma'
    ]  #,'-Gamma'] #Each component can be Gauss, Gamma, InvGamma, -Gamma, -InvGamma
    maxits = 500
    tol = 0.00001
    init_params = [0, 1, 6, 2, -6, 2]
    init_params = [
        0, 1,
        np.percentile(data_vector, 99), 2,
        np.percentile(data_vector, 1), 2
    ]
    opts = {
        'Inference': Inference,
        'Number_of_Components': Number_of_Components,
        'Components_Model': Components_Model,
        'init_params': init_params,
        'maxits': maxits,
        'tol': tol
    }
    # CALL TO FIT MIXTURE MODEL
    Model = Mixture_Model_1Dim(data_vector, opts)
    #if Model['Mixing Prop.'][0]<.95:
    #good_model=1

    # Visualizar fit
    visualize_model_fit = 1

    if visualize_model_fit == 1:

        my_range = np.linspace(-10, 10, 10000)

        plt0 = np.multiply(
            Model['Mixing Prop.'][0],
            norm.pdf(my_range, Model['mu1'][0],
                     np.sqrt(np.divide(1, Model['taus1'][0]))))
        #plt0=np.multiply( Model['Mixing Prop.'][0],norm.pdf(my_range,Model['mu1'][0],np.sqrt(Model['taus1'][0])  ) )
        #plt0=np.multiply( Model['Mixing Prop.'][0],norm.pdf(my_range,Model['mu1'][0],Model['taus1'][0])  )

        if Components_Model[1] == 'InvGamma':
            plt1 = np.multiply(
                Model['Mixing Prop.'][1],
                invgam(my_range, Model['shapes'][1], Model['scales'][1]))
        elif Components_Model[1] == 'Gamma':
            plt1 = np.multiply(
                Model['Mixing Prop.'][1],
                gam(my_range, Model['shapes'][1],
                    np.divide(1, Model['rates'][1])))

        plt1[my_range < 0] = 0

        if Components_Model[2] == '-InvGamma':
            plt2 = np.multiply(
                Model['Mixing Prop.'][2],
                invgam(-my_range, Model['shapes'][2], Model['scales'][2]))
        elif Components_Model[2] == '-Gamma':
            plt2 = np.multiply(
                Model['Mixing Prop.'][2],
                gam(-my_range, Model['shapes'][2],
                    np.divide(1, Model['rates'][2])))

        plt2[my_range > 0] = 0

        import matplotlib.pyplot as plt
        fig = plt.figure()
        #plt.plot(range(10))
        plt.hist(data_vector, bins=50, density=True, alpha=1, color='g')
        plt.plot(my_range, plt0, 'k', linewidth=2)
        plt.plot(my_range, plt1, 'k', linewidth=2)
        plt.plot(my_range, plt2, 'k', linewidth=2)
        plt.plot(my_range, plt0 + plt1 + plt2, 'r', linewidth=2)
        fig.savefig(os.path.expanduser('~/Desktop/temp.png'), dpi=fig.dpi)
        #plt.show()
        # Plot the resulting fit on a histogram of the data

    #Compute local FDR at positive and negative tail
    #f0(x)=gam(x,Model['shapes'][0],np.divide(1,Model['rates'][0])))
    p0 = Model['Mixing Prop.'][0]
    rho = data_vector.shape[0]

    #FDR at positive side
    sorted_data_vector = -np.sort(-data_vector)
    all_localFDR = np.ones(rho)
    flag = 0
    k = -1
    while flag == 0:
        k = k + 1
        point = sorted_data_vector[k]
        cdf = norm.cdf(point, Model['mu1'][0],
                       np.sqrt(np.divide(1, Model['taus1'][0])))
        numerator = np.multiply(float(p0), 1 - cdf)
        denominator = np.divide(float(k + 1), float(rho))
        all_localFDR[k] = np.divide(numerator, denominator)
        pFDR = all_localFDR[k]
        if pFDR > 0.001:
            if k == 0:
                threshold1 = sorted_data_vector[k]
            else:
                threshold1 = sorted_data_vector[k - 1]
                # np.multiply(sorted_data_vector[k-1],scaling_factor)

            flag = 1

            #print threshold1

    #FDR at negative side
    sorted_data_vector = -np.sort(data_vector)
    all_localFDR = np.ones(rho)
    flag = 0
    k = -1
    while flag == 0:
        k = k + 1
        point = sorted_data_vector[k]
        cdf = norm.cdf(-point, Model['mu1'][0],
                       np.sqrt(np.divide(1, Model['taus1'][0])))
        numerator = np.multiply(float(p0), 1 - cdf)
        denominator = np.divide(float(k + 1), float(rho))
        all_localFDR[k] = np.divide(numerator, denominator)
        pFDR = all_localFDR[k]
        if pFDR > 0.001:
            if k == 0:
                threshold2 = -sorted_data_vector[k]
            else:
                threshold2 = -sorted_data_vector[k - 1]
                # np.multiply(sorted_data_vector[k-1],scaling_factor)

            flag = 1

    #Rescale the thresholds using the data mean and std
    threshold1 = np.multiply(threshold1, scaling_factor) + mean_factor
    threshold2 = np.multiply(threshold2, scaling_factor) + mean_factor
    print threshold1
    print threshold2

    return threshold1, threshold2, Model
Exemple #6
0
def mmfit2(x, maxiters, tol, MM):
    print MM
    all_params = [0, 1, 2, 1]
    init_mu1 = all_params[0]
    init_v1 = all_params[1]
    init_mu2 = all_params[2]
    init_v2 = all_params[3]
    init_PI = np.zeros(2)
    init_PI[0] = 0.5
    init_PI[1] = 0.5
    #First estimation initial parameters for inv gammas: alphas y betas

    #if MM==1:
    #	1#fix for gmm
    #el
    if MM == 2:
        init_a1 = alb.alphaGm(init_mu2, init_v2)
        init_b1 = alb.betaGm(init_mu2, init_v2)
    elif MM == 3:
        init_a1 = alb.alphaIG(init_mu2, init_v2)
        init_b1 = alb.betaIG(init_mu2, init_v2)

    #rename parameters for iteration
    tmp_mu1 = init_mu1
    tmp_v1 = init_v1
    tmp_mu2 = init_mu2
    tmp_v2 = init_v2

    tmp_a1 = init_a1
    tmp_b1 = init_b1
    tmp_PI = init_PI
    #make structures to save the parameters estimates at each iteration
    mu1 = np.zeros(maxiters + 2)
    v1 = np.zeros(maxiters + 2)
    mu2 = np.zeros(maxiters + 2)
    v2 = np.zeros(maxiters + 2)

    a1 = np.zeros(maxiters + 2)
    b1 = np.zeros(maxiters + 2)
    tmp_lik = np.zeros(maxiters + 2)
    real_lik = np.zeros(maxiters + 2)
    PI = np.zeros(2 * (maxiters + 2))
    #3 because we fit 2 components
    PI = np.reshape(PI, [maxiters + 2, 2])
    #save first values of this structures as the initialized values
    mu1[0] = tmp_mu1
    v1[0] = tmp_v1
    mu2[0] = tmp_mu2
    v2[0] = tmp_v2

    a1[0] = tmp_a1
    b1[0] = tmp_b1
    PI[0, 0] = tmp_PI[0]
    PI[0, 1] = tmp_PI[1]
    flag = 0
    it = -1
    #indexes of samples to assign 0 prob wrt non-gauss components
    xneg = find(x < pow(10, -14))
    while flag == 0:
        it = it + 1
        #print it
        #for it in range (0,maxiters):
        #print 'it1',it

        Nobj = sc.stats.norm(tmp_mu1, pow(tmp_v1, (1 / 2)))
        pGa = Nobj.pdf(x)
        pGa[pGa == 0] = pow(10, -14)
        if MM == 1:
            1
        elif MM == 2:
            dum2 = alb.gam(x, tmp_a1, tmp_b1)
        elif MM == 3:
            dum2 = alb.invgam(x, tmp_a1, tmp_b1)

        dum2[xneg] = 0
        dum2[np.isnan(dum2)] = 0
        dum2[np.isinf(dum2)] = 0
        dum2[dum2 == 0] = pow(10, -14)
        D1 = np.multiply(np.ones(size(x)) * tmp_PI[0], pGa)
        D2 = np.multiply(np.ones(size(x)) * tmp_PI[1], dum2)
        D2[xneg] = 0
        D = D1 + D2
        R1 = np.divide(D1,
                       np.ones(size(x)) * D)
        R2 = np.divide(D2,
                       np.ones(size(x)) * D)
        resp = sc.column_stack([R1, R2])
        #tmp_lik[it]=sum( np.multiply(resp[:,0],(log(tmp_PI[0])+log(pGa))) + np.multiply(resp[:,1],(log(tmp_PI[1])+log(dum2))));#bishop
        real_lik[it] = sum(
            log(np.multiply(tmp_PI[0], pGa) + np.multiply(tmp_PI[1], dum2)))
        #N=np.ones(2)
        #N[0]=sum(R1)
        #N[1]=sum(R2)
        #PI[0,0]=N[0]/sum(N)
        #PI[0,1]=N[1]/sum(N)
        #if it < maxiters-1:
        #M step
        N = np.ones(2)
        N[0] = sum(R1)
        N[1] = sum(R2)
        tmp_PI[0] = N[0] / sum(N)
        tmp_PI[1] = N[1] / sum(N)
        #print tmp_PI
        #print tmp_lik[it]
        #update gaussian mean and variance
        tmp_mu1 = []
        tmp_mu1 = sum(np.multiply(resp[:, 0], x)) / N[0]
        tmp_v1 = []
        tmp_v1 = sum(np.multiply(resp[:, 0], pow(x - tmp_mu1, 2))) / N[0]
        #if tmp_v <= 0.5:
        #	tmp_v=0.5
        #UPDATE EACH INVERSE GAMMA.
        tmp_mu2 = []
        tmp_mu2 = sum(np.multiply(resp[:, 1], x)) / N[1]
        tmp_v2 = []
        tmp_v2 = sum(np.multiply(resp[:, 1], pow(x - tmp_mu2, 2))) / N[1]
        if tmp_v2 < 0.2:
            tmp_v2 = 0.2

        if MM == 2:
            tmp_a1 = alb.alphaGm(tmp_mu2, tmp_v2)
            tmp_b1 = alb.betaGm(tmp_mu2, tmp_v2)
        elif MM == 3:
            tmp_a1 = alb.alphaIG(tmp_mu2, tmp_v2)
            tmp_b1 = alb.betaIG(tmp_mu2, tmp_v2)

        if it > 20:
            if abs(real_lik[it] - real_lik[it - 1]) < tol:
                flag = 1
                print it

        if it > (maxiters - 1):
            flag = 1
            #print it

        if flag == 0:
            mu1[it + 1] = tmp_mu1
            v1[it + 1] = tmp_v1
            mu2[it + 1] = tmp_mu2
            v2[it + 1] = tmp_v2
            a1[it + 1] = alphaIG(tmp_mu2, tmp_v2)
            b1[it + 1] = betaIG(tmp_mu2, tmp_v2)
            PI[it + 1, :] = tmp_PI

    stmu1 = mu1[it]
    stv1 = v1[it]
    stmu2 = mu2[it]
    stv2 = v2[it]
    stPI = PI[it, :]
    lik = tmp_lik[it]

    stPI = PI[it, :]
    return stmu1, stv1, stmu2, stv2, stPI, lik, it, resp
Exemple #7
0
def mmfit3(x, maxiters, tol, MM):
    import copy
    import numpy as np
    all_params = [0, 1, 3, 1, -3, 1]
    init_mu1 = all_params[0]
    init_v1 = all_params[1]
    init_mu2 = all_params[2]
    init_v2 = all_params[3]
    init_mu3 = all_params[4]
    init_v3 = all_params[5]

    init_PI = np.zeros(3)
    init_PI[0] = np.true_divide(1, 3)
    init_PI[1] = np.true_divide(1, 3)
    init_PI[2] = np.true_divide(1, 3)
    #First estimation initial parameters for inv gammas: alphas y betas

    #if MM==1:
    #	1#fix for gmm
    #el
    if MM == 2:
        init_a1 = alb.alphaGm(init_mu2, init_v2)
        init_b1 = alb.betaGm(init_mu2, init_v2)
        init_a2 = alb.alphaGm(-1 * init_mu3, init_v3)
        init_b2 = alb.betaGm(-1 * init_mu3, init_v3)
    elif MM == 3:
        init_a1 = alb.alphaIG(init_mu2, init_v2)
        init_b1 = alb.betaIG(init_mu2, init_v2)
        init_a2 = alb.alphaIG(-1 * init_mu3, init_v3)
        init_b2 = alb.betaIG(-1 * init_mu3, init_v3)

    #rename parameters for iteration
    tmp_mu1 = copy.deepcopy(init_mu1)
    tmp_v1 = copy.deepcopy(init_v1)
    tmp_mu2 = copy.deepcopy(init_mu2)
    tmp_v2 = copy.deepcopy(init_v2)
    tmp_mu3 = copy.deepcopy(init_mu3)
    tmp_v3 = copy.deepcopy(init_v3)

    tmp_a1 = copy.deepcopy(init_a1)
    tmp_b1 = copy.deepcopy(init_b1)
    tmp_a2 = copy.deepcopy(init_a2)
    tmp_b2 = copy.deepcopy(init_b2)
    tmp_PI = copy.deepcopy(init_PI)
    #make structures to save the parameters estimates at each iteration
    mu1 = np.zeros(maxiters + 2)
    v1 = np.zeros(maxiters + 2)
    mu2 = np.zeros(maxiters + 2)
    v2 = np.zeros(maxiters + 2)
    mu3 = np.zeros(maxiters + 2)
    v3 = np.zeros(maxiters + 2)

    a1 = np.zeros(maxiters + 2)
    b1 = np.zeros(maxiters + 2)
    a2 = np.zeros(maxiters + 2)
    b2 = np.zeros(maxiters + 2)
    tmp_lik = np.zeros(maxiters + 2)
    real_lik = np.zeros(maxiters + 2)
    PI = np.zeros(3 * (maxiters + 2))
    #3 because we fit 3 components
    PI = np.reshape(PI, [maxiters + 2, 3])
    #save first values of this structures as the initialized values
    mu1[0] = tmp_mu1
    v1[0] = tmp_v1
    mu2[0] = tmp_mu2
    v2[0] = tmp_v2
    mu3[0] = tmp_mu2
    v3[0] = tmp_v2
    a1[0] = tmp_a1
    b1[0] = tmp_b1
    a2[0] = tmp_a2
    b2[0] = tmp_b2

    #indexes of samples to assign 0 prob wrt each inv gammas
    xneg = find(x < pow(10, -14))
    xpos = find(x > -pow(10, -14))
    eps = np.finfo(float).eps

    #First Expectation step to evaluate initilization it=0
    it = 0
    Nobj = sc.stats.norm(tmp_mu1, np.power(tmp_v1, 0.5))
    pGa = Nobj.pdf(x)
    pGa[pGa == 0] = 10**-14
    if MM == 1:
        1
    elif MM == 2:
        dum2 = alb.gam(x, tmp_a1, tmp_b1)
        dum3 = alb.gam(-1 * x, tmp_a2, tmp_b2)
    elif MM == 3:
        dum2 = alb.invgam(x, tmp_a1, tmp_b1)
        dum3 = alb.invgam(-1 * x, tmp_a2, tmp_b2)

    dum2[xneg] = 0
    dum3[xpos] = 0
    D1 = np.multiply(np.ones(size(x)) * tmp_PI[0], pGa)
    D1[np.where(D1 < 10**-14)] = eps
    D2 = np.multiply(np.ones(size(x)) * tmp_PI[1], dum2)
    D2[np.where(D2 < 10**-14)] = eps
    D3 = np.multiply(np.ones(size(x)) * tmp_PI[2], dum3)
    D3[np.where(D3 < 10**-14)] = eps
    D = D1 + D2 + D3
    R1 = np.divide(D1,
                   np.ones(size(x)) * D)
    R2 = np.divide(D2,
                   np.ones(size(x)) * D)
    R3 = np.divide(D3,
                   np.ones(size(x)) * D)
    resp = sc.column_stack([R1, R2, R3])
    #M step
    N = np.ones(3)
    N[0] = sum(R1)
    N[1] = sum(R2)
    N[2] = sum(R3)
    tmp_PI[0] = N[0] / sum(N)
    tmp_PI[1] = N[1] / sum(N)
    tmp_PI[2] = N[2] / sum(N)
    #tmp_lik[it]=sum( np.multiply(resp[:,0],(log(tmp_PI[0])+log(pGa))) + np.multiply(resp[:,1],(log(tmp_PI[1])+log(dum2)))  + np.multiply(resp[:,2],(log(tmp_PI[2])+log(dum3)))       );#bishop
    real_lik[it] = sum(
        log(
            np.multiply(tmp_PI[0], pGa) + np.multiply(tmp_PI[1], dum2) +
            np.multiply(tmp_PI[2], dum3)))
    trol = np.zeros([3, x.shape[0]])
    trol[0, :] = np.multiply(D1, tmp_PI[0])
    trol[1, :] = np.multiply(D2, tmp_PI[1])
    trol[2, :] = np.multiply(D3, tmp_PI[2])
    real_lik[it] = np.sum(np.log(trol.sum(0)))

    #ITERATE
    flag = 0
    while flag == 0:
        it = it + 1

        #update gaussian mean and variance
        tmp_mu1 = []
        tmp_mu1 = sum(np.multiply(resp[:, 0], x)) / N[0]
        tmp_v1 = []
        tmp_v1 = sum(np.multiply(resp[:, 0], pow(x - tmp_mu1, 2))) / N[0]
        #if tmp_v <= 0.5:
        #	tmp_v=0.5
        #UPDATE EACH INVERSE GAMMA.
        tmp_mu2 = []
        tmp_mu2 = sum(np.multiply(resp[:, 1], x)) / N[1]
        tmp_v2 = []
        tmp_v2 = sum(np.multiply(resp[:, 1], pow(x - tmp_mu2, 2))) / N[1]
        #if tmp_v2< 0.1:#pow(10,-1):
        #tmp_v2=0.1
        tmp_mu3 = []
        tmp_mu3 = sum(np.multiply(resp[:, 2], x)) / N[2]
        tmp_v3 = []
        tmp_v3 = sum(np.multiply(resp[:, 2], pow(x - tmp_mu3, 2))) / N[2]
        #if tmp_v3< 0.1:
        #tmp_v3=0.1
        if MM == 2:
            tmp_a1 = alb.alphaGm(tmp_mu2, tmp_v2)
            tmp_b1 = alb.betaGm(tmp_mu2, tmp_v2)
            tmp_a2 = alb.alphaGm(-1 * tmp_mu3, tmp_v3)
            tmp_b2 = alb.betaGm(-1 * tmp_mu3, tmp_v3)
        elif MM == 3:
            tmp_a1 = alb.alphaIG(tmp_mu2, tmp_v2)
            tmp_b1 = alb.betaIG(tmp_mu2, tmp_v2)
            tmp_a2 = alb.alphaIG(-1 * tmp_mu3, tmp_v3)
            tmp_b2 = alb.betaIG(-1 * tmp_mu3, tmp_v3)

        #print 'it_num',it
        Nobj = sc.stats.norm(tmp_mu1, np.power(tmp_v1, 0.5))
        pGa = Nobj.pdf(x)
        pGa[pGa == 0] = 10**-14
        if MM == 1:
            1
        elif MM == 2:
            dum2 = alb.gam(x, tmp_a1, tmp_b1)
            dum3 = alb.gam(-1 * x, tmp_a2, tmp_b2)
        elif MM == 3:
            dum2 = alb.invgam(x, tmp_a1, tmp_b1)
            dum3 = alb.invgam(-1 * x, tmp_a2, tmp_b2)

        dum2[xneg] = 0
        dum3[xpos] = 0

        dum2[np.isnan(dum2)] = 0
        dum2[np.isinf(dum2)] = 0
        dum2[dum2 == 0] = 10**-14
        dum3[np.isnan(dum3)] = 0
        dum3[np.isinf(dum3)] = 0
        dum3[dum3 == 0] = 10**-14

        D1 = np.multiply(np.ones(size(x)) * tmp_PI[0], pGa)
        D1[np.where(D1 < 10**-14)] = eps
        D2 = np.multiply(np.ones(size(x)) * tmp_PI[1], dum2)
        D2[np.where(D2 < 10**-14)] = eps
        D3 = np.multiply(np.ones(size(x)) * tmp_PI[2], dum3)
        D3[np.where(D3 < 10**-14)] = eps
        #D3[xpos]=0;
        #D2[xneg]=0;

        D = D1 + D2 + D3
        R1 = np.divide(D1,
                       np.ones(size(x)) * D)
        R2 = np.divide(D2,
                       np.ones(size(x)) * D)
        R3 = np.divide(D3,
                       np.ones(size(x)) * D)
        resp = sc.column_stack([R1, R2, R3])
        #M step
        N = np.ones(3)
        N[0] = sum(R1)
        N[1] = sum(R2)
        N[2] = sum(R3)
        tmp_PI[0] = N[0] / sum(N)
        tmp_PI[1] = N[1] / sum(N)
        tmp_PI[2] = N[2] / sum(N)
        tmp_PI[np.where(tmp_PI < 10**-14)] = eps
        #tmp_lik[it]=sum( np.multiply(resp[:,0],(log(tmp_PI[0])+log(pGa))) + np.multiply(resp[:,1],(log(tmp_PI[1])+log(dum2)))  + np.multiply(resp[:,2],(log(tmp_PI[2])+log(dum3)))       );#bishop
        real_lik[it] = sum(
            log(
                np.multiply(tmp_PI[0], pGa) + np.multiply(tmp_PI[1], dum2) +
                np.multiply(tmp_PI[2], dum3)))
        trol = np.zeros([3, x.shape[0]])
        trol[0, :] = np.multiply(D1, tmp_PI[0])
        trol[1, :] = np.multiply(D2, tmp_PI[1])
        trol[2, :] = np.multiply(D3, tmp_PI[2])
        real_lik[it] = np.sum(np.log(trol.sum(0)))

        if (abs((real_lik[it] - real_lik[it - 1]) / real_lik[it - 1]) <
                tol) | (it > maxiters):
            flag = 1

    stmu1 = tmp_mu1
    #mu1[it];
    stv1 = tmp_v1  #v1[it];
    stmu2 = tmp_mu2  #mu2[it];
    stv2 = tmp_v2  #v2[it];
    stmu3 = tmp_mu3  #mu3[it];
    stv3 = tmp_v3  #v3[it];
    stPI = tmp_PI  #PI[it,:];
    lik = real_lik[0:it]  #tmp_lik[it];

    return stmu1, stv1, stmu2, stv2, stmu3, stv3, stPI, lik, it, resp