Exemplo n.º 1
0
def potency(niftipathtask,
            niftipathrest,
            maskpathtask,
            maskpathrest,
            atlas4dpath,
            potency='indiv',
            savepath='',
            savelevel=1):
    '''author . roselyne chauvin
        niftipathtask : list of path path or path to the 4d nifti file corresponding to the preprocessed task acquisition
        niftipathrest : list of path or one path to 4d nifti file(s) corresponding to the preprocessed resting state acquisition of the same subject or to the population of interest
        potency : can be 'indiv' for 'individual potency' as the substraction of rest connectivity (**niftipathrest is a path to a unique file**) or 'population' for population potency' as the standardization by the resting distribution (**niftipath is a list of path**)
        atlas4dpath : path to the nifti file with the atlas (one area per volume) 
        savepath : folder location to save a .npy file with the potency matrix
        savelevel : if ==2 will return rest and task normalized matrices
        '''
    import sys
    import os, glob, random
    import numpy as np
    import subprocess
    import nibabel
    ##parameters for mixture modelling
    sys.path.append('/home/mrstats/roscha/Scripts/TEN/scripts/')
    import alb_MM_functions as alb
    maxiters = 100
    tol = 0.000001  #relative tolerance for convergence
    MM = 2  #2 is'GGM', 3 is 'GIM'

    ##verification
    if potency == 'indiv' and type(niftipathtask) == list:
        if len(niftipathtask) != len(niftipathrest):
            print stop
    if type(niftipathtask) != list:
        niftipathtask = np.array([niftipathtask])
    if type(maskpathtask) != list:
        maskpathtask = np.array([maskpathtask])
    if type(maskpathrest) != list:
        maskpathrest = np.array([maskpathrest])
    if type(niftipathrest) != list:
        niftipathrest = np.array([niftipathrest])
    if type(atlas4dpath) != list:
        atlas_path = atlas4dpath
        area = nibabel.load(atlas_path).shape
        indivatlas = False
    else:
        indivatlas = True
        area = nibabel.load(atlas_path[0]).shape

        #for each nifti task file if more than one
    taskmat = np.zeros((len(niftipathtask), area[3], area[3]))
    for i, f in enumerate(niftipathtask):

        #read data
        if indivatlas == True:
            atlas_path = atlas4dpath[i]
        a = nibabel.load(atlas_path).get_data()
        number = a.T.shape[0]
        tri = np.zeros((number, number))
        tri[np.triu_indices(number, 1)] = 1
        d = nibabel.load(niftipathtask[i]).get_data()
        m = nibabel.load(maskpathtask[i]).get_data()

        #extract time serie from atlas (reg weitghted)
        try:
            prov = regression(d, a, m)

            #demean
            ts = (prov - np.repeat([np.mean(prov, 1)], len(prov[0]), axis=0).T
                  ) / (np.repeat([np.std(prov, 1)], len(prov[0]), axis=0).T)
            #compute ledoit wolf covariance
            #compute partial correlation
            #compute Zpartial correlation
            [corM, ZparCor, arcparCor, covM, parCor,
             lw] = makemat(ts, 'ledoit_wolf')

            #mixture modelling on it
            prov = ZparCor

            prov2 = prov[np.triu_indices(number, 1)]
            prov2_norm = (prov2 - np.mean(prov2)) / np.std(prov2)
            output = alb.mmfit3(prov2_norm, maxiters, tol, MM)
            m_gaus = np.std(prov2) * output[0] + np.mean(prov2)
            var_gaus = (np.std(prov2)**2) * output[1]

            #normalize matrix by main gaussian
            prov[np.triu_indices(
                number, 1)] = ((prov[np.triu_indices(179, 1)] - m_gaus) /
                               np.sqrt(var_gaus))

            prov = prov * tri + (prov * tri).T
            #clean up
            taskmat[i] = prov
        except:
            print 'fail'

        #redo that for each resting state (or just one)
    restmat = np.zeros((len(niftipathrest), area[3], area[3]))
    if niftipathrest != '':
        for i, f in enumerate(niftipathrest):

            #read data
            if indivatlas == True:
                atlas_path = atlas4dpath[i]
            a = nibabel.load(atlas_path).get_data()
            number = a.T.shape[0]
            tri = np.zeros((number, number))
            tri[np.triu_indices(number, 1)] = 1
            d = nibabel.load(niftipathrest[i]).get_data()
            m = nibabel.load(maskpathrest[i]).get_data()

            #extract time serie from atlas (reg weitghted)
            prov = regression(d, a, m)

            #demean
            ts = (prov - np.repeat([np.mean(prov, 1)], len(prov[0]), axis=0).T
                  ) / (np.repeat([np.std(prov, 1)], len(prov[0]), axis=0).T)
            #compute ledoit wolf covariance
            #compute partial correlation
            #compute Zpartial correlation
            [corM, ZparCor, arcparCor, covM, parCor,
             lw] = makemat(ts, 'ledoit_wolf')

            #mixture modelling on it
            prov = ZparCor
            prov2 = prov[np.triu_indices(number, 1)]
            prov2_norm = (prov2 - np.mean(prov2)) / np.std(prov2)
            output = alb.mmfit3(prov2_norm, maxiters, tol, MM)
            m_gaus = np.std(prov2) * output[0] + np.mean(prov2)
            var_gaus = (np.std(prov2)**2) * output[1]

            #normalize matrix by main gaussian
            prov[np.triu_indices(
                number, 1)] = ((prov[np.triu_indices(179, 1)] - m_gaus) /
                               np.sqrt(var_gaus))
            prov = prov * tri + (prov * tri).T
            #clean up
            restmat[i] = prov

    if savelevel == 2:
        if savepath != '':
            np.save(savepath + '/rest_normalizedMat.npy', restmat)
            np.save(savepath + '/task_normalizedMat.npy', taskmat)
        return [taskmat, restmat]
    else:
        #potency
        if potency == 'indiv':  #substract one by one
            if savepath != '':
                np.save(savepath + '/indivStandardized_potency_mat.npy',
                        [taskmat[i] - restmat[i] for i in range(taskmat)])
            return [taskmat[i] - restmat[i] for i in range(taskmat)]
        else:
            #compute mean and standard deviation per edge and normaliwe task matrix one after another

            [m, st] = [np.mean(restmat, 0), np.std(restmat, 0)]
            if savepath != '':
                np.save(savepath + '/groupStandardized_potency_mat.npy',
                        [(taskmat[i] - m) / st for i in range(len(taskmat))])
            return [(taskmat[i] - m) / st for i in range(len(taskmat))]
Exemplo n.º 2
0
def dynamic_potency(niftipathtask,niftipathrest,maskpathtask,maskpathrest,atlas4dpath,TRrest,TRtask,name='dynamic_potency',task='indep',savepath='',toreturn=0, savecontent=[1,2,3,4]):
    '''author . roselyne chauvin
        potency is calculated for a single subject, the function needs to be run for each subject but a list of task/block can be computed at once
        task: can be "indep" independant list of tasks, or "related" one single fixed matrix will be calculated from concatenation of task list, dynamic will be calculated for each items of the task list (example use : block design)        
        niftipathtask : list of path or path to the 4d nifti file corresponding to the preprocessed task acquisition 
        niftipathrest : path to 4d nifti file corresponding to the preprocessed resting state acquisition of the subject
        
        atlas4dpath : path to the nifti file with the atlas (one area per volume) 
        TRrest : TR of the rest acquisition in ms
        TRtask : TR of the task acquisition in ms. If list of task with different TR, provide a list of TR []
        savepath : folder location to .txt files and .npy files
        toreturn : 0 save files 1 return files 2 both save and return
        savecontent :  1  task dynamic potency 2  task and rest dynamic 3  task and rest fixed matrices and 4 ledoit_wolf convergence parameter 
        '''
    import sys
    import os, glob, random
    import numpy as np
    import subprocess
    import nibabel
    ##parameters for mixture modelling
    sys.path.append('/home/mrstats/roscha/Scripts/TEN/scripts/')
    import alb_MM_functions as alb
    maxiters=100
    tol=0.000001 #relative tolerance for convergence
    MM=2 #2 is'GGM', 3 is 'GIM'


        ##verification
    
    if type(niftipathtask)!=list:
        niftipathtask=np.array([niftipathtask])
    if type(maskpathtask)!=list:
        maskpathtask=np.array([maskpathtask])
    if len(maskpathtask)==1:
        maskpathtask=np.array(np.repeat(maskpathtask,len(niftipathtask)))
    else:
        if len(niftipathtask)!=len(maskpathtask):
            print('mismatch between len of task list and len of mask list')
            exit()
    #if type(maskpathrest)!=list:
    #    maskpathrest=np.array([maskpathrest])#necessary?
    
        
    if type(TRtask)!=list:
        TRtask=np.array(np.repeat(TRtask,len(niftipathtask)))
    else:
        if len(niftipathtask)!=len(TRtask):
            print('mismatch between len of task list and len of TR list')
            exit()
    
            
    area=nibabel.load(atlas4dpath).shape
                
        #for each nifti task file if more than one
    
    a= nibabel.load(atlas4dpath).get_data()
    number=a.T.shape[0]
    tri=np.zeros((number,number))
    tri[np.triu_indices(number,1)]=1
        
    ####### compute Time Series and fixed connectivity matrices
    ts=[[] for i in range(len(niftipathtask))]
    for i,f in enumerate(niftipathtask):

            #read data
        
        
        d= nibabel.load(niftipathtask[i]).get_data()
        m= nibabel.load(maskpathtask[i]).get_data()
            
            #extract time serie from atlas (reg weitghted)
        prov=regression(d, a, m)
        
            #demean
        ts[i]=(prov-np.repeat([np.mean(prov,1)],len(prov[0]),axis=0).T)/(np.repeat([np.std(prov,1)],len(prov[0]),axis=0).T)
    #compute ledoit wolf covariance
    #compute partial correlation
    #compute Zpartial correlation
    
    if task=='indep':
        taskmat=np.zeros((len(niftipathtask),number,number))
        taskmatfordyn=np.zeros((len(niftipathtask),number,number))
        lwALL=np.zeros(len(niftipathtask))        
        for i,f in enumerate(niftipathtask):
            [corM,ZparCor,arcparCor,covM,parCor,lw]=makemat(ts[i],'ledoit_wolf')
            taskmatfordyn[i]=parCor
            #mixture modelling on it
            prov=ZparCor
            
            prov2=prov[np.triu_indices(number,1)]  
            prov2_norm=(prov2-np.mean(prov2))/np.std(prov2)   
            output=alb.mmfit3(prov2_norm, maxiters,tol,MM)   
            m_gaus=np.std(prov2)*output[0]+np.mean(prov2)     
            var_gaus=(np.std(prov2)**2)*output[1] 

                
            #normalize matrix by main gaussian
            prov[np.triu_indices(number,1)]=((prov[np.triu_indices(number,1)]-m_gaus)/np.sqrt(var_gaus))      



            prov=prov*tri+(prov*tri).T
        #clean up
            taskmat[i]=prov
            
            lwALL[i]=lw
        
    if task=='related':
        [corM,ZparCor,arcparCor,covM,parCor,lw]=makemat(np.concatenate(ts,axis=1),'ledoit_wolf')
        taskmatfordyn=parCor        
            #mixture modelling on it
        prov=ZparCor
            
        prov2=prov[np.triu_indices(number,1)]  
        prov2_norm=(prov2-np.mean(prov2))/np.std(prov2)   
        output=alb.mmfit3(prov2_norm, maxiters,tol,MM)   
        m_gaus=np.std(prov2)*output[0]+np.mean(prov2)     
        var_gaus=(np.std(prov2)**2)*output[1] 

                
            #normalize matrix by main gaussian
        prov[np.triu_indices(number,1)]=((prov[np.triu_indices(number,1)]-m_gaus)/np.sqrt(var_gaus))      



        prov=prov*tri+(prov*tri).T
        #clean up
        taskmat=prov
            
        lwALL=lw

    ######redo that for the resting state
    
            #read data
        
        
    d= nibabel.load(niftipathrest).get_data()
    m= nibabel.load(maskpathrest).get_data()
            
            #extract time serie from atlas (reg weitghted)
    prov=regression(d, a, m)
        
            #demean
    tsrest=(prov-np.repeat([np.mean(prov,1)],len(prov[0]),axis=0).T)/(np.repeat([np.std(prov,1)],len(prov[0]),axis=0).T)
    #compute ledoit wolf covariance
    #compute partial correlation
    #compute Zpartial correlation
    

    [corM,ZparCor,arcparCor,covM,parCor,lw]=makemat(tsrest,'ledoit_wolf')
    restmatfordyn=parCor
            #mixture modelling on it
    prov=ZparCor
            
    prov2=prov[np.triu_indices(number,1)]  
    prov2_norm=(prov2-np.mean(prov2))/np.std(prov2)   
    output=alb.mmfit3(prov2_norm, maxiters,tol,MM)   
    m_gaus=np.std(prov2)*output[0]+np.mean(prov2)     
    var_gaus=(np.std(prov2)**2)*output[1] 

                
            #normalize matrix by main gaussian
    prov[np.triu_indices(number,1)]=((prov[np.triu_indices(number,1)]-m_gaus)/np.sqrt(var_gaus))      



    prov=prov*tri+(prov*tri).T
        #clean up
    restmat=prov
            
    lwrest=lw

        
        
    ###### compute Multiplication of Temporal Derivative
    #compute temporal derivative 
    tsderiv=[np.zeros((number,len(ts[i][0])-1)) for i in range(len(ts))]
    tstime=[np.zeros(len(ts[i][0])-1) for i in range(len(ts))]
    for i,f in enumerate(ts):
        
        tsderiv[i]=np.array([ts[i].T[n]-ts[i].T[n-1] for n in range(1,len(ts[i][0]))]).T
        tstime[i]=[(n-0.5)*TRtask[i] for n in range(1,len(ts[i][0]))]
        
    tsderivrest=np.array([tsrest.T[n]-tsrest.T[n-1] for n in range(1,len(tsrest[0]))]).T
    tstimerest=[(n-0.5)*TRrest for n in range(1,len(tsrest[0]))]
        
    
    #compute multiplication of temporal derivative
    mtd=[np.zeros((len(ts[i][0])-1,number,number)) for i in range(len(ts))]
    
    for i,f in enumerate(tsderiv):    
        for n in range(len(tsderiv[i][0])):

            prov=np.outer(tsderiv[i].T[n],tsderiv[i].T[n])
            if task=="indep":
                prov3=corr_to_arc(-taskmatfordyn[i]*(prov)*taskmatfordyn[i],2)
            if task=='related':
                prov3=corr_to_arc(-taskmatfordyn*(prov)*taskmatfordyn,2)

                    #MMnormalized
            prov2=prov3[np.triu_indices(number,1)]  

            prov2[np.where(np.isnan(prov2))]=0#if the atlas is well defined, this should not be necessary
            prov2_norm=(prov2-np.mean(prov2))/np.std(prov2) 
            output=alb.mmfit3(prov2_norm, maxiters,tol,MM) 
            m_gaus=np.std(prov2)*output[0]+np.mean(prov2)     
            var_gaus=(np.std(prov2)**2)*output[1] 

                
                    #normalize matrix by main gaussian
            
            prov2=((prov2-m_gaus)/np.sqrt(var_gaus)) 
            prov3=np.zeros((number,number))
            prov3[np.triu_indices(number,1)]=prov2
            mtd[i][n]=prov3*tri+(prov3*tri).T
    
    mtdrest=np.zeros((len(tsrest[0])-1,number,number))
    
        
    for n in range(len(tsderivrest[0])):
            
        prov=np.outer(tsderivrest.T[n],tsderivrest.T[n])
        prov3=corr_to_arc(-restmatfordyn*(prov)*restmatfordyn,2)
            
                    #MMnormalized
        prov2=prov3[np.triu_indices(number,1)]  
        prov2[np.where(np.isnan(prov2))]=0#if the atlas is well defined, this should not be necessary

        prov2_norm=(prov2-np.mean(prov2))/np.std(prov2)   
        output=alb.mmfit3(prov2_norm, maxiters,tol,MM)   
        m_gaus=np.std(prov2)*output[0]+np.mean(prov2)     
        var_gaus=(np.std(prov2)**2)*output[1] 

                
                    #normalize matrix by main gaussian
        prov2=((prov2-m_gaus)/np.sqrt(var_gaus)) 
        prov3=np.zeros((number,number))
        prov3[np.triu_indices(number,1)]=prov2
        mtdrest[n]=prov3*tri+(prov3*tri).T


        
    ## compute the dynamic task potency
    meanrest=np.mean(mtdrest,0)
    stdrest=np.std(mtdrest,0)
    
    dynamictask=[[(i - meanrest)/stdrest for i in mtd[n]] for n in range(len(mtd))]
    
    ##adjust sign: we want to express the change in task connectivity relateive to rest
	#define rest connection that are positive, negative or no signal
	[selection,tmin,tmax]=rcthresh.selectionMM(restmat)###**###
def selectionMM(mat,step=50,iteration=3,method='weighted_pFDR'):
    number=len(mat)
    def norm2(x,stmu1,stv1):
        return scipy.integrate.quad(norminit,x,np.inf,args=(stv1,stmu1))[0]
    def norminit(x,stv1,stmu1):
        return scipy.stats.norm(stmu1,stv1).pdf(x)
    vecnorm2=np.vectorize(norm2)
    pmin=0.025
    maxiters=100
    tol=0.000001 #relative tolerance for convergence
    MM=2 #2 is'GGM', 3 is 'GIM'
    #initial normalization
    try:
        data=mat[np.triu_indices(number,1)]
        tri=1
    except:
        data=copy.deepcopy(mat)
        tri=0
    mini,stini=[np.mean(data),np.std(data)]
    data=(data-mini)/stini
    #run mm            
    output=alb.mmfit3(data, maxiters,tol,MM)
    stmu1,stv1=[output[0],output[1]]
    if method=='symmetric':
        init=stmu1+2*stv1
        last=np.max(np.abs(data))
        for i in range(iteration):
            t2=np.arange(init,last+(last-init)/step,(last-init)/step)
            t=t2[:step]
            #find the values closest to the threshold (above and bellow 0.05)
            test=np.divide(2*vecnorm2(t,stmu1,stv1) , (np.mean(np.repeat([np.abs(data- stmu1)],step,0).T>=t-stmu1,0))  ) < pmin
            
            limthr=np.max(np.where(test==False))
            init=t2[limthr]
            last=t2[limthr+1]
        

        selectionfromtriuIndex=[np.where(np.abs(data-stmu1)>init-stmu1)]
        tmax=np.min(mat[np.triu_indices(number,1)][np.where(mat[np.triu_indices(number,1)]>(init*stini+mini))])
        tmin=np.max(mat[np.triu_indices(number,1)][np.where(mat[np.triu_indices(number,1)]<((-init+2*stmu1)*stini+mini))])
    else:
        if method=='equal_pFDR':
            pmin1=0.025
            pmin2=0.025
        elif method=='weighted_pFDR':
            pmin1=0.05*output[6][1]/(output[6][1]+output[6][2])
            pmin2=0.05*output[6][2]/(output[6][1]+output[6][2])
        #up
        init=stmu1+2*stv1
        last=np.max(data)
        for i in range(iteration):
            t2=np.arange(init,last+(last-init)/step,(last-init)/step)
            t=t2[:step]
            #find the values closest to the threshold
            test=np.divide(vecnorm2(t,stmu1,stv1) , (np.mean(np.repeat([(data- stmu1)],step,0).T>=t-stmu1,0))  ) < pmin1
            
            if test[len(test)-1]==False:
                limthrMax=len(test)-1
            else:
                limthrMax=np.max(np.where(test==False))
            init=t2[limthrMax]
            last=t2[limthrMax+1]
        #down
        
        init2=-(stmu1-2*stv1)
        last2=np.max(-data)
        for i in range(iteration):
            t2=np.arange(init2,last2+(last2-init2)/step,(last2-init2)/step)
            t=t2[:step]
            #find the values closest to the threshold

            test=np.divide(vecnorm2(t,stmu1,stv1) , (np.mean(np.repeat([-(data- stmu1)],step,0).T>=t+stmu1,0))  ) < pmin2
            
            if test[len(test)-1]==False:
                limthrMin=len(test)-1
            else:
                limthrMin=np.max(np.where(test==False))
            
            init2=t2[limthrMin]
            last2=t2[limthrMin]+(last2-init2)/step
        

        selectionfromtriuIndex=[np.where((data>init)+(data<-init2)==1)]
        try:
            if tri==1:
                tmax=np.min(mat[np.triu_indices(number,1)][np.where(mat[np.triu_indices(number,1)]>(init*stini+mini))])
            else:
                tmax=np.min(mat[np.where(mat>(init*stini+mini))])
        except:
            tmax=np.inf
        try:
            if tri==1:
                tmin=np.max(mat[np.triu_indices(number,1)][np.where(mat[np.triu_indices(number,1)]<((-init2+2*stmu1)*stini+mini))])
            else:
                tmin=np.max(mat[np.where(mat<((-init2+2*stmu1)*stini+mini))])
        except:
            tmin=-np.inf
    
    return selectionfromtriuIndex,tmin,tmax
Exemplo n.º 4
0
def SIN_init_VB_MM(data, opts):
    import scipy.special as sp
    import copy
    #SIN_init_VB_MM does
    #              - fit a mixture model using ML (EM + MM algorithms (mmfit.m))
    #              - initialize VB parameters of mixture model using EM fit as
    #              initial posteriors
    #inputs: -data : vector normalized to mean zero and unit std
    #	 -opts: list with options and values
    #		-MM = GIM or GGM( default =GIM)
    #		-MLMMits = max number of iterations allowed to ML algorithm before initialize VB (default =1)
    #		-MLMMtol = tolerance for convergence of ML algorithm before initialize VB
    #ouptput: mix1 is a list containg the initialized priors, and the posterior estimations given the ML initialization
    #example:opts=[];opts.append({'MM': 'GIM', 'MLMMits': 1, 'MLMMtol': 10^-5});
    #mix=SIN_init_VB_MM(data,opts);
    #From matlab...IN PROGRESS

    if 'MM' not in opts[0]:
        MM = 'GIM'
    else:
        MM = opts[0]['MM']

    if 'MLMMits' not in opts[0]:
        MLMMits = 1
    else:
        MLMMits = opts[0]['MLMMits']

    if 'MLMMtol' not in opts[0]:
        MLMMtol = 0.00001
    else:
        MLMMtol = opts[0]['MLMMtol']
    #SET PRIORS
    #set mixing priors.
    prior = []
    mmm = 10
    #(the mean of component)
    vvv = 10
    #(the variance of the component)
    if MM == 'GGM':
        #set GAMMA prior on rates (shape and rate)
        Erate = np.true_divide(1, alb.betaGm(mmm, vvv))
        d_0 = copy.deepcopy(Erate)
        e_0 = 1
        Erate = np.true_divide(d_0, e_0)
        #set shapes conditional prior (fancy)
        Eshape = alb.alphaGm(mmm, vvv)
        dum_v = np.copy(Eshape)
        #allow variance on shape to be of size of mean shape
        dum_p = np.true_divide(1, dum_v)
        #from laplace approx b=prec/psi'(map(s))
        b_0 = np.true_divide(dum_p, sp.polygamma(1, Eshape))
        c_0 = copy.deepcopy(b_0)
        loga_0 = ((b_0 * sp.polygamma(0, Eshape)) - (c_0 * log(Erate)))
    elif MM == 'GIM':
        #set GAMMA prior on scale (shape d and rate e)
        Escale = alb.betaIG(mmm, vvv)
        d_0 = copy.deepcopy(Escale)
        #shape
        e_0 = 1
        #rate
        Escale = np.true_divide(d_0, e_0)

        #set component 2 and 3 shape conditional prior (fancy)
        Eshape = alb.alphaIG(mmm, vvv)
        dum_v = np.copy(Eshape)
        #allow variance on shape to be of size of mean shape
        dum_p = np.true_divide(1, dum_v)
        b_0 = np.true_divide(dum_p, sp.polygamma(1, Eshape))
        #from laplace approx b=prec/psi'(map(s))
        c_0 = copy.deepcopy(b_0)
        loga_0 = (-(b_0 * sp.polygamma(0, Eshape)) + (c_0 * np.log(Escale)))

    prior.append({
        'lambda_0': 5,
        'm_0': 0,
        'tau_0': 100,
        'c0': 0.001,
        'b0': 100,
        'd_0': d_0,
        'e_0': e_0,
        'loga_0': loga_0,
        'b_0': b_0,
        'c_0': c_0
    })
    prior = prior[0]

    #SET POSTERIORS initializations using ML mixture models
    if MM == 'GGM':
        mmtype = 2
    elif MM == 'GIM':
        mmtype = 3
    else:
        mmtype = 1
        #dummy, never used gmm
    ML = []
    [mu1, v1, mu2, v2, mu3, v3, pipi, lik, numits,
     resp] = alb.mmfit3(data, MLMMits, MLMMtol, mmtype)
    ML_param = [mu1, v1, mu2, v2, mu3, v3]
    ML.append({'init': ML_param, 'pi': pipi, 'LIK': lik})
    ML = ML[0]

    #mix1.ML

    #INIT POSTERIORS BASED IN ML MIX MODEL
    post = []
    #[dum; b]=max(resp)
    q = resp.argmax(1)
    gammas = np.copy(resp)
    #lambda=sum(resp,2)'

    lambdap = resp.sum(0)

    #COMPONENT 1: Gaussian component
    #hyperparam. on mean
    m0 = ML_param[0]
    tau0 = np.true_divide(
        1,
        np.true_divide(ML_param[0] + ML_param[2] + np.absolute(ML_param[4]),
                       3))

    #hyperparam. on precission
    init_prec = np.true_divide(1, ML_param[1])
    init_var_prec = np.var([
        np.true_divide(1, ML_param[1]),
        np.true_divide(1, ML_param[3]),
        np.true_divide(1, ML_param[5])
    ],
                           ddof=1)
    c0 = alb.alphaGm(init_prec, init_var_prec)
    #shape
    b0 = alb.betaGm(init_prec, init_var_prec)
    #scale

    #COMPONENTS 2 AND 3: gamma or inverse gamma
    if MM == 'GGM':
        #hyperparam. on rates
        init_rates = [
            np.true_divide(1, alb.betaGm(np.absolute(ML_param[2]),
                                         ML_param[3])),
            np.true_divide(1, alb.betaGm(np.absolute(ML_param[4]),
                                         ML_param[5]))
        ]
        dum_var_r = np.multiply(
            0.1, init_rates)  #(init_rates)* 0.1;#    var(init_rates);
        d_0 = alb.alphaGm(init_rates, dum_var_r)
        #shape
        e_0 = np.true_divide(1, alb.betaGm(init_rates, dum_var_r))
        #rate
        Erates = np.true_divide(d_0, e_0)  # == init_rates

        #hyperparam. on shapes
        init_shapes = [
            alb.alphaGm(np.absolute(ML_param[2]), ML_param[3]),
            alb.alphaGm(np.absolute(ML_param[4]), ML_param[5])
        ]
        #b_0=[1 1];c_0=b_0;
        #b_0=sum(resp(2:3,:),2)';c_0=b_0;
        b_0 = resp.sum(0)[1:3]
        c_0 = b_0
        #loga_0=((b_0* sp.polygamma(0,init_shapes)-(c_0*log(Erates)));
        loga_0 = np.multiply(b_0, sp.polygamma(0, init_shapes)) - (np.multiply(
            c_0, np.log(Erates)))
        #MAP_shapes=invpsi((loga_0+ (c_0 .* log(Erates))) ./ b_0) # == init_shapes

    elif MM == 'GIM':
        #hyperparam. on scales (inverse gamma) --> scale is r in the text,
        #r ~ inv gamma distr
        init_scales = [
            alb.betaIG(np.absolute(ML_param[2]), ML_param[3]),
            alb.betaIG(np.absolute(ML_param[4]), ML_param[5])
        ]
        dum_var_sc = np.multiply(0.1, init_scales)
        #var(init_scales);
        d_0 = alb.alphaGm(init_scales, dum_var_sc)
        #gamma shape
        e_0 = np.divide(1, alb.betaGm(init_scales, dum_var_sc))
        #gamma rate
        Escales = np.divide(d_0, e_0)
        # == init_scales

        #hyperparam. on shapes
        init_shapes = [
            alb.alphaIG(np.absolute(ML_param[2]), ML_param[3]),
            alb.alphaIG(np.absolute(ML_param[4]), ML_param[5])
        ]
        #b_0=[1 1];c_0=b_0;
        sumgam = resp.sum(0)
        b_0 = sumgam[1:3]
        c_0 = b_0
        loga_0 = -np.multiply(b_0, sp.polygamma(0, init_shapes)) + (
            np.multiply(c_0, np.log(Escales)))
        #MAP_shapes=invpsi((-loga_0+ (c_0 .* log(Escales))) ./ b_0) # == init_shapes

    post.append({
        'lambda': lambdap,
        'm_0': m0,
        'tau_0': tau0,
        'c0': c0,
        'b0': b0,
        'd_0': d_0,
        'e_0': e_0,
        'loga_0': loga_0,
        'b_0': b_0,
        'c_0': c_0
    })
    post = post[0]
    #Save posterior expectations for initialization of VB mixModel

    mix1 = []
    if MM == 'GGM':
        shapes = [0, 0]
        rates = [0, 0]
        #shapes=alphaGm(ML_param[2:3], ML_param(2:3,2))';
        #rates= 1./  betaGm(ML_param(2:3,1), ML_param(2:3,2))' ;
        shapes[0] = alb.alphaGm(abs(ML_param[2]), ML_param[3])
        shapes[1] = alb.alphaGm(abs(ML_param[4]), ML_param[5])
        rates[0] = np.divide(1, betaGm(abs(ML_param[2]), ML_param[3]))
        rates[1] = np.divide(1, betaGm(abs(ML_param[4]), ML_param[5]))
        mix1.append({
            'gammas': resp,
            'lambda': lambdap,
            'pi': pipi,
            'mu1': ML_param[0],
            'tau1': np.true_divide(1, ML_param[1]),
            'shapes': shapes,
            'rates': rates,
            'q': q,
            'prior': prior,
            'post': post,
            'ML': ML,
            'opts': opts
        })
    elif MM == 'GIM':
        shapes = [0, 0]
        scales = [0, 0]
        shapes[0] = [alphaIG(abs(ML_param[2]), ML_param[3])]
        shapes[1] = [alphaIG(abs(ML_param[4]), ML_param[5])]
        scales[0] = [betaIG(abs(ML_param[2]), ML_param[3])]
        #   betaIG(ML_param(3,1), ML_param(3,2))  ];
        scales[1] = [betaIG(abs(ML_param[4]), ML_param[5])]
        mix1.append({
            'gammas': resp,
            'lambda': lambdap,
            'pi': pipi,
            'mu1': ML_param[0],
            'tau1': np.true_divide(1, ML_param[1]),
            'shapes': shapes,
            'scales': scales,
            'q': q,
            'prior': prior,
            'post': post,
            'ML': ML,
            'opts': opts
        })

    mix1 = mix1[0]

    return mix1