def MM_E_step(x, K, opts, tmp_mu, tmp_v, tmp_PI, xpos, xneg): #PS=np.zeros([K,size(x)]) #D=np.zeros([K,size(x)]) # storages probability of samples wrt distributions PS = np.zeros([K, x.shape[0]]) D = np.zeros([K, x.shape[0] ]) # storages probability of samples wrt distributions tmp_a = np.zeros( K) #it will remain zero for non-gamma or inv gamma distributions tmp_b = np.zeros( K) #it will remain zero for non-gamma or inv gamma distributions for k in range(K): if opts['Components_Model'][k] == 'Gauss': Nobj = scipy.stats.norm(tmp_mu[k], np.power(tmp_v[k], 0.5)) PS[k, :] = Nobj.pdf(x) elif opts['Components_Model'][k] == 'Gamma': tmp_a[k] = alb.alphaGm(tmp_mu[k], tmp_v[k]) tmp_b[k] = alb.betaGm(tmp_mu[k], tmp_v[k]) PS[k, :] = alb.gam_self(x, tmp_a[k], tmp_b[k]) PS[k, xneg] = 0 elif opts['Components_Model'][k] == '-Gamma': tmp_a[k] = alb.alphaGm(-1 * tmp_mu[k], tmp_v[k]) tmp_b[k] = alb.betaGm(-1 * tmp_mu[k], tmp_v[k]) PS[k, :] = alb.gam_self(-1 * x, tmp_a[k], tmp_b[k]) PS[k, xpos] = 0 elif opts['Components_Model'][k] == 'InvGamma': tmp_a[k] = alb.alphaIG(tmp_mu[k], tmp_v[k]) tmp_b[k] = alb.betaIG(tmp_mu[k], tmp_v[k]) PS[k, :] = alb.invgam(x, tmp_a[k], tmp_b[k]) PS[k, xneg] = 0 elif opts['Components_Model'][k] == '-InvGamma': tmp_a[k] = alb.alphaIG(-1 * tmp_mu[k], tmp_v[k]) tmp_b[k] = alb.betaIG(-1 * tmp_mu[k], tmp_v[k]) PS[k, :] = alb.invgam(-1 * x, tmp_a[k], tmp_b[k]) PS[k, xpos] = 0 elif opts['Components_Model'][k] == 'Beta': tmp_a[k] = alb.a_beta_distr(tmp_mu[k], tmp_v[k]) tmp_b[k] = alb.b_beta_distr(tmp_mu[k], tmp_v[k]) PS[k, :] = scipy.stats.beta.pdf(x, tmp_a[k], tmp_b[k]) PS[np.isnan(PS)] = 0 PS[np.isinf(PS)] = 0 D = np.multiply(PS, np.matrix(tmp_PI).T) resp = np.divide(D, np.matrix(np.sum(D, 0))) N = np.sum(resp, 1) tmp_PI = np.divide(N, np.sum(resp)).T if 0: dum = np.add(np.log(PS), np.log(tmp_PI).T) dum[np.isinf(dum)] = 0 dum[np.isinf(dum)] = 0 Exp_lik = np.sum(np.multiply(resp, dum)) else: dum = np.multiply(tmp_PI.T, PS) #add(np.log(PS),np.log(tmp_PI).T) dum[np.isinf(dum)] = 1 dum[np.isinf(dum)] = 1 dum[dum == 0] = 1 Exp_lik = np.sum(np.log(dum)) return PS, resp, tmp_PI, N, Exp_lik
def init_ML(x, opts): tol = opts['tol'] maxiters = opts['maxits'] K = opts['Number_of_Components'] #Exp_lik=np.zeros(maxiters+1) opts_MM = copy.deepcopy(opts) #opts_MM['maxits']=np.int(1) Model = alb.Mix_Mod_MethodOfMoments(x, opts_MM) Exp_lik = Model['Likelihood'] tmp_mu = Model['mu1'] tmp_v = Model['variances'] tmp_PI = Model['Mixing Prop.'] param1 = np.zeros(K) param2 = np.zeros(K) for k in range(K): if opts['Components_Model'][k] == 'Gauss': param1[k] = tmp_mu[k] param2[k] = tmp_v[k] elif opts['Components_Model'][k] == 'Gamma': param1[k] = alb.alphaGm(tmp_mu[k], tmp_v[k]) param2[k] = np.divide(1., alb.betaGm(tmp_mu[k], tmp_v[k])) elif opts['Components_Model'][k] == '-Gamma': param1[k] = alb.alphaGm(-1 * tmp_mu[k], tmp_v[k]) param2[k] = np.divide(1., alb.betaGm(-1 * tmp_mu[k], tmp_v[k])) elif opts['Components_Model'][k] == 'InvGamma': param1[k] = alb.alphaIG(tmp_mu[k], tmp_v[k]) param2[k] = alb.betaIG(tmp_mu[k], tmp_v[k]) elif opts['Components_Model'][k] == '-InvGamma': param1[k] = alb.alphaIG(-1 * tmp_mu[k], tmp_v[k]) param2[k] = alb.betaIG(-1 * tmp_mu[k], tmp_v[k]) elif opts['Components_Model'][k] == 'Beta': param1[k] = alb.a_beta_distr(tmp_mu[k], tmp_v[k]) param2[k] = alb.b_beta_distr(tmp_mu[k], tmp_v[k]) return param1, param2, maxiters, tol, K, tmp_PI, Exp_lik
def Mix_Mod_MethodOfMoments( x, opts={ 'Number_of_Components': 3, 'Components_Model': ['Gauss', 'Gamma', '-Gamma'], 'init_params': [0, 1, 3, 1, -3, 1], 'maxits': np.int(100), 'tol': 0.00001, 'init_pi': np.true_divide(np.ones(3), 3) }): tmp_mu, tmp_v, maxiters, tol, K, tmp_PI, Exp_lik = init_MM(x, opts) #indexes of samples to assign 0 prob wrt each positive definite distr. #xneg=find(x<pow(10,-14)) #xpos=find(x>-pow(10,-14)) xneg = np.argwhere(x < pow(10, -14))[:, 0] xpos = np.argwhere(x > -pow(10, -14))[:, 0] #ITERATE flag = 0 it = 0 while flag == 0: # E-step PS, resp, tmp_PI, N, Exp_lik[it] = MM_E_step(x, K, opts, tmp_mu, tmp_v, tmp_PI, xpos, xneg) #M-step tmp_mu, tm_v = MM_M_step(x, K, tmp_mu, tmp_v, resp, N) #convergence criterium if it > 0: if (abs((Exp_lik[it] - Exp_lik[it - 1]) / Exp_lik[it - 1]) < tol) | (it > maxiters - 1): flag = 1 #print(it) it = it + 1 #gather output tmp_a = np.zeros( K) #it will remain zero for non-gamma or inv gamma distributions tmp_b = np.zeros( K) #it will remain zero for non-gamma or inv gamma distributions tmp_c = np.zeros(K) for k in range(K): if opts['Components_Model'][k] == 'Gamma': tmp_a[k] = alb.alphaGm(tmp_mu[k], tmp_v[k]) tmp_b[k] = alb.betaGm(tmp_mu[k], tmp_v[k]) elif opts['Components_Model'][k] == '-Gamma': tmp_a[k] = alb.alphaGm(-1 * tmp_mu[k], tmp_v[k]) tmp_b[k] = alb.betaGm(-1 * tmp_mu[k], tmp_v[k]) elif opts['Components_Model'][k] == 'InvGamma': tmp_a[k] = alb.alphaIG(tmp_mu[k], tmp_v[k]) tmp_c[k] = alb.betaIG(tmp_mu[k], tmp_v[k]) elif opts['Components_Model'][k] == '-InvGamma': tmp_a[k] = alb.alphaIG(-1 * tmp_mu[k], tmp_v[k]) tmp_c[k] = alb.betaIG(-1 * tmp_mu[k], tmp_v[k]) elif opts['Components_Model'][k] == 'Beta': tmp_a[k] = alb.a_beta_distr(tmp_mu[k], tmp_v[k]) tmp_c[k] = alb.b_beta_distr(tmp_mu[k], tmp_v[k]) output_dict = { 'means': tmp_mu, 'mu1': tmp_mu, 'variances': tmp_v, 'taus1': np.divide(1, tmp_v), 'Mixing Prop.': np.asarray(tmp_PI)[0], 'Likelihood': Exp_lik[0:it], 'its': it, 'Final responsibilities': resp, 'opts': opts, 'shapes': tmp_a, 'scales': tmp_c, 'rates': np.divide(1., tmp_b) } return output_dict