Пример #1
0
def run_matrix_completion():
    K = 12
    seed = 1
    mixture, post = common.init(X, K, seed)
    (mu, var, p), post, ll = em.run(X, mixture, post)
    # print('Mu:\n' + str(mu))
    # print('Var: ' + str(var))
    # print('P: ' + str(p))
    # print('post:\n' + str(post))
    # print('LL: ' + str(ll))
    X_pred = em.fill_matrix(X, common.GaussianMixture(mu, var, p))
    X_gold = np.loadtxt('netflix_complete.txt')
    print("MAE:", common.mae(X_gold, X_pred))
Пример #2
0
def data():
    return X, common.GaussianMixture(mu, var, p), post
Пример #3
0
# print('LL:' + str(ll))
# print()

# print("After first M-step:")
mu, var, p = em.mstep(X, post, mixture)
# print('Mu:\n' + str(mu))
# print('Var: ' + str(var))
# print('P: ' + str(p))
# print()

# print("After a run")
(mu, var, p), post, ll = em.run(X, mixture, post)
# print('Mu:\n' + str(mu))
# print('Var: ' + str(var))
# print('P: ' + str(p))
# print('post:\n' + str(post))
# print('LL: ' + str(ll))
X_pred = em.fill_matrix(X, common.GaussianMixture(mu, var, p))
# error = common.rmse(X_gold, X_pred)
# print("X_gold:\n" + str(X_gold))
# X_pred = np.round(X_pred)
fil = open(
    '/home/animesh/WTA/movie_recommendation/recommender/trainer/test_file.txt',
    'w')
fil.write(str(n) + ' ' + str(d) + '\n')
for i in X_pred:
    for j in i:
        fil.write(str(round(j, 5)) + " ")
    fil.write("\n")
# print("RMSE: " + str(error))
Пример #4
0
#  Input:
X = np.array([[2., 5., 3., 0., 0.], [3., 5., 0., 4., 3.], [2., 0., 3., 3., 1.],
              [4., 0., 4., 5., 2.], [3., 4., 0., 0., 4.], [1., 0., 4., 5., 5.],
              [2., 5., 0., 0., 1.], [3., 0., 5., 4., 3.], [0., 5., 3., 3., 3.],
              [2., 0., 0., 3., 3.], [3., 4., 3., 3., 3.], [1., 5., 3., 0., 1.],
              [4., 5., 3., 4., 3.], [1., 4., 0., 5., 2.], [1., 5., 3., 3., 5.],
              [3., 5., 3., 4., 3.], [3., 0., 0., 4., 2.], [3., 5., 3., 5., 1.],
              [2., 4., 5., 5., 0.], [2., 5., 4., 4., 2.]])

K = 4

mu = np.array([[2., 4., 5., 5., 0.], [3., 5., 0., 4., 3.],
               [2., 5., 4., 4., 2.], [0., 5., 3., 3., 3.]])
var = np.array([5.93, 4.87, 3.99, 4.51])
p = np.array([0.25, 0.25, 0.25, 0.25])
mixture_initial = common.GaussianMixture(mu, var, p)

#  After first E-step:
post_first_estep = np.array([[0.17713577, 0.12995693, 0.43161668, 0.26129062],
                             [0.08790299, 0.35848927, 0.41566414, 0.13794359],
                             [0.15529703, 0.10542632, 0.5030648, 0.23621184],
                             [0.23290326, 0.10485918, 0.58720619, 0.07503136],
                             [0.09060401, 0.41569201, 0.32452345, 0.16918054],
                             [0.07639077, 0.08473656, 0.41423836, 0.42463432],
                             [0.21838413, 0.20787523, 0.41319756, 0.16054307],
                             [0.16534478, 0.04759109, 0.63399833, 0.1530658],
                             [0.05486073, 0.13290982, 0.37956674, 0.43266271],
                             [0.08779356, 0.28748372, 0.37049225, 0.25423047],
                             [0.07715067, 0.18612696, 0.50647898, 0.23024339],
                             [0.16678427, 0.07789806, 0.45643509, 0.29888258],
                             [0.08544132, 0.24851049, 0.53837544, 0.12767275],
Пример #5
0
X_pred = em.fill_matrix(X, mixture)

print(common.rmse(X_gold, X_pred))

print(mixture)
#print(em.fill_matrix(X_test
### get the best seed and the best k size that minimizes the cost

## Best seed
# Get the lowest cost
#optimal_seed_cost = em_total_likelihood_dict[0]
#for k, v in em_total_likelihood_dict.items():
#    if v > optimal_seed_cost:
#        optimal_seed_cost = v
#    else:
#        optimal_seed_cost = optimal_seed_cost
# Get the seed associated with the lowest cost
#for k, v in em_total_likelihood_dict.items():
#    if v == optimal_seed_cost:
#        optimal_seed = k
#print(em_k_dict)

### Test case for exam

mixture = common.GaussianMixture(np.array([[1, 1], [1, 1]]),
                                 np.array([0.5, 0.5]), np.array([0.01, 0.99]))
post = np.ones((X_experiment.shape[0], 2)) / 2
mixture, post, loglike = em.run(X_experiment, mixture, post)

common.plot(X_experiment, mixture, post, "Test case")
print(post)
Пример #6
0
import numpy as np
import em
import common

X = np.loadtxt("test_incomplete.txt")
X_gold = np.loadtxt("test_complete.txt")

K = 4
n, d = X.shape
seed = 0

# TODO: Your code here

mu = np.array([[2., 4., 5., 5., 0.], [3., 5., 0., 4., 3.],
               [2., 5., 4., 4., 2.], [0., 5., 3., 3., 3.]])

var = np.array([5.93, 4.87, 3.99, 4.51])
pi = np.array([0.25, 0.25, 0.25, 0.25])

mixture = common.GaussianMixture(mu, var, pi)
post, ll = em.estep(X, mixture)

print(post)
print(ll)

mixture = em.mstep(X, post, mixture)
print(mixture)