def fake_signal():
    signals = get_signals()
    # assume all signals are same length
    signals = np.array(signals)
    hmm = HMM(5, 3, signals[0].shape[1]) # M, K, D
    hmm.init_random(signals)

    init = tf.global_variables_initializer()
    session = tf.InteractiveSession()
    session.run(init)
    hmm.set_session(session)

    hmm.fit(signals, max_iter=30)
    L = hmm.get_cost_multi(signals).sum()
    print "LL for fitted params:", L

    # test in actual params
    _, _, _, pi, A, R, mu, sigma = big_init()

    # turn these into their "pre-softmax" forms
    pi = np.log(pi)
    A = np.log(A)
    R = np.log(R)
    M, K, D, _ = sigma.shape # need to convert full cov into diag cov
    logSigma = np.zeros((M, K, D))
    for j in range(M):
        for k in range(D):
            logSigma[j,k] = np.log(np.diag(sigma[j,k]))

    hmm.set(pi, A, R, mu, logSigma)
    L = hmm.get_cost_multi(signals).sum()
    print "LL for actual params:", L
def fake_signal():
    signals = get_signals()
    # assume all signals are same length
    signals = np.array(signals)
    hmm = HMM(5, 3, signals[0].shape[1]) # M, K, D
    hmm.init_random(signals)

    init = tf.global_variables_initializer()
    session = tf.InteractiveSession()
    session.run(init)
    hmm.set_session(session)

    hmm.fit(signals, max_iter=30)
    L = hmm.get_cost_multi(signals).sum()
    print("LL for fitted params:", L)

    # test in actual params
    _, _, _, pi, A, R, mu, sigma = big_init()

    # turn these into their "pre-softmax" forms
    pi = np.log(pi)
    A = np.log(A)
    R = np.log(R)
    M, K, D, _ = sigma.shape # need to convert full cov into diag cov
    logSigma = np.zeros((M, K, D))
    for j in range(M):
        for k in range(D):
            logSigma[j,k] = np.log(np.diag(sigma[j,k]))

    hmm.set(pi, A, R, mu, logSigma)
    L = hmm.get_cost_multi(signals).sum()
    print("LL for actual params:", L)
def fake_signal():
    signals = get_signals()
    hmm = HMM(5, 3)
    hmm.fit(signals)
    L = hmm.log_likelihood_multi(signals).sum()
    print "LL for fitted params:", L

    # test in actual params
    _, _, _, pi, A, R, mu, sigma = big_init()
    hmm.set(pi, A, R, mu, sigma)
    L = hmm.log_likelihood_multi(signals).sum()
    print "LL for actual params:", L
Example #4
0
def fake_signal():
    signals = get_signals()
    hmm = HMM(5, 3)
    hmm.fit(signals)
    L = hmm.log_likelihood_multi(signals).sum()
    print "LL for fitted params:", L

    # test in actual params
    _, _, _, pi, A, R, mu, sigma = big_init()
    hmm.set(pi, A, R, mu, sigma)
    L = hmm.log_likelihood_multi(signals).sum()
    print "LL for actual params:", L
def fake_signal():
    signals = get_signals()
    hmm = HMM(5, 3)
    hmm.fit(signals, max_iter=3)
    L = hmm.log_likelihood_multi(signals).sum()
    print("LL for fitted params:", L)

    # test in actual params
    _, _, _, pi, A, R, mu, sigma = big_init()

    # turn these into their "pre-softmax" forms
    pi = np.log(pi)
    A = np.log(A)
    R = np.log(R)

    # decompose sigma using cholesky factorization
    sigma = np.linalg.cholesky(sigma)

    hmm.set(pi, A, R, mu, sigma)
    L = hmm.log_likelihood_multi(signals).sum()
    print("LL for actual params:", L)
Example #6
0
=======
    hmm.fit(signal.reshape(1, T, 1), learning_rate=1e-5, max_iter=20)
>>>>>>> upstream/master


def fake_signal():
    signals = get_signals()
    hmm = HMM(5, 3)
    hmm.fit(signals)
    L = hmm.log_likelihood_multi(signals).sum()
<<<<<<< HEAD
    print "LL for fitted params:", L
=======
    print("LL for fitted params:", L)
>>>>>>> upstream/master

    # test in actual params
    _, _, _, pi, A, R, mu, sigma = big_init()
    hmm.set(pi, A, R, mu, sigma)
    L = hmm.log_likelihood_multi(signals).sum()
<<<<<<< HEAD
    print "LL for actual params:", L
=======
    print("LL for actual params:", L)
>>>>>>> upstream/master

if __name__ == '__main__':
    # real_signal()
    fake_signal()