def E_step(Y_, pi_, lambda_):
    N_ = Y_.size
    K_ = lambda_.size
    logZ_ = np.zeros((N_, K_))
    logZ_ += nu.log(pi_)
    for k in range(K_):
        logZ_[:,k] += stats.poisson.logpmf(Y_, lambda_[k])
    logZ_ = np.nan_to_num(logZ_) # to avoid overflow in addtion
    Z_ = nu.normalize_log_across_row(logZ_)
    lower_bound_ = np.sum(Z_ * (logZ_ - nu.log(Z_)))
    return (Z_, lower_bound_)
def E_step(Y_, pi_, mu_, s_):
    N_ = Y_.shape[0]
    K_ = pi_.size
    logZ_ = np.zeros((N_, K_))
    for k in range(K_):
        logZ_[:,k] = stats.multivariate_normal.logpdf(Y_, mu_[k,:], s_[k,:])
        logZ_[:,k] += nu.log(pi_[k])
    logZ_ = np.nan_to_num(logZ_) # to avoid overflow in addtion
    Z_ = nu.normalize_log_across_row(logZ_)
    lower_bound_ = np.sum(Z_ * (logZ_ - nu.log(Z_)))
    return (Z_, lower_bound_)
def E_step(Y_, pi_, mu_):
    N_, D_ = Y_.shape
    K_ = mu_.shape[0]
    logZ_ = np.zeros((N_, K_))
    logZ_ += nu.log(pi_)
    for d in range(D_):
        y_ = Y_[:, d]
        for k in range(K_):
            logZ_[(y_ == 1), k] += nu.log(mu_[k, d])
            logZ_[(y_ == 0), k] += nu.log(1 - mu_[k, d])
    logZ_ = np.nan_to_num(logZ_)  # to avoid overflow in addtion
    Z_ = nu.normalize_log_across_row(logZ_)
    lower_bound_ = np.sum(Z_ * (logZ_ - nu.log(Z_)))
    return (Z_, lower_bound_)
def update_beta(Y_, phi_, K_, D_, V_):
    count_ = np.zeros((K_, V_))
    for k in range(K_):
        for d in range(D_):
            for i in range(len(Y_[d])):
                count_[k, Y_[d][i]] += phi_[d][i, k]
    beta_ = nu.log(nu.normalize(count_))
    return (beta_)
示例#5
0
def update_params(Y_, Q_):
    C_ = np.unique(Y_).size
    K_ = Q_.shape[1]
    Bstar_ = np.zeros((C_, K_))
    for c in range(C_):
        Bstar_[c,:] = np.sum(Q_[(Y_==c),:], axis = 0)
    Qsum_ = np.sum(Q_, axis = 0)
    Bstar_ = Bstar_ / Qsum_
    return (nu.log(Bstar_))
def E_step(Y_, theta_, beta_, K_, D_):
    lower_bound_ = 0
    phi_ = []
    for d in range(D_):
        logphid = np.zeros((Y_[d].size, K_))
        logphid += theta_[d, :]
        for k in range(K_):
            logphid[:, k] += beta_[k, :][Y_[d]]
        phid_ = nu.normalize_log_across_row(logphid)
        lower_bound_ += np.sum(phid_ * (logphid - nu.log(phid_)))
        #lower_bound_ += np.sum(np.logaddexp.reduce(logphid, axis = 1))
        phi_.append(phid_)
    return (phi_, lower_bound_)
示例#7
0
def M_step(Y_, Q_, N_):
    pi0_ = nu.log(Q_[0,:])
    A_ = sync_A(N_)
    Bstar_ = update_params(Y_, Q_)
    return (pi0_, A_, Bstar_)
示例#8
0
def sync_A(N_):
    A_ = nu.normalize_across_row(N_)
    return (nu.log(A_))
示例#9
0
def random_initialization(Y_, J_):
    C_ = np.unique(Y_).size
    pi0_ = nu.log(np.random.dirichlet(np.ones(J_)))
    A_ = nu.log(np.random.dirichlet(np.ones(J_), size = J_))
    Bstar_ = nu.log(np.random.dirichlet(np.ones(J_), size = C_))
    return (pi0_, A_, Bstar_)
示例#10
0
def random_initialization(K_, D_, V_):
    theta_ = nu.log(np.random.dirichlet(np.ones(K_), size=D_))
    beta_ = nu.log(np.random.dirichlet(np.ones(V_), size=K_))
    return (theta_, beta_)
示例#11
0
def update_theta(phi_, K_, D_):
    count_ = np.zeros((D_, K_))
    for d in range(D_):
        count_[d, :] = np.sum(phi_[d], axis=0)
    theta_ = nu.log(nu.normalize(count_))
    return (theta_)
def init_transition(K_):
    pi0_ = nu.log(np.random.dirichlet(np.ones(K_)))
    A_ = nu.log(np.random.dirichlet(np.ones(K_), size = K_))
    return (pi0_, A_)
def M_step(Y_, Q_, N_):
    pi0_ = nu.log(Q_[0,:])
    A_ = sync_A(N_)
    mu_, s_ = update_params(Y_, Q_, N_)
    return (pi0_, A_, mu_, s_)