예제 #1
0
                [self.mu, self.bu, self.bi, self.p, self.q])

    def load(self):
        self.mu, self.p, self.q, self.bu, self.bi = np.load(
            'latent_factor.npy')


user_names = np.genfromtxt('users_name.csv', dtype=str)
n_users = len(user_names)
item_names = np.genfromtxt('books_ISBN.csv', dtype=str)
n_books = len(item_names)
user_name2id = dict(zip(user_names, range(n_users)))
book_ISBN2id = dict(zip(item_names, range(n_books)))

R_train, _ = my_IO.read_ratings_train(user_name2id,
                                      book_ISBN2id,
                                      implicit=False)
test_user_ids, test_book_ids = my_IO.read_test(user_name2id, book_ISBN2id)
'''
model = Naive(N=n_users, M=n_books)
model.fit(ratings_train)
result = model.predict(test_user_ids, test_book_ids)
np.savetxt('naive_wo_implicit.csv', result.astype(int), fmt='%d')
'''
'''
model = Baseline(N=n_users, M=n_books)
model.fit(ratings_train)
result = model.predict(test_user_ids, test_book_ids)
np.savetxt('baseline_wo_implicit.csv', result.astype(int), fmt='%d')
'''
예제 #2
0
config.gpu_options.per_process_gpu_memory_fraction = 0.9

user_names = np.genfromtxt('users_name.csv', dtype=str)
user_embeds = my_IO.get_user_embeds('data/users.csv')
user_embeds = np.array([user_embeds[name] for name in user_names])
n_users = len(user_names)

book_ISBNs = np.genfromtxt('books_ISBN.csv', dtype=str)
n_books = len(book_ISBNs)

user_name2id = dict(zip(user_names, range(n_users)))
book_ISBN2id = dict(zip(book_ISBNs, range(n_books)))

split = 0
R_train, R_valid = my_IO.read_ratings_train(user_name2id,
                                            book_ISBN2id,
                                            implicit=False,
                                            split=split)

mu = R_train.sum() / R_train.nnz

with tf.Session(config=config) as sess:

    # Initializaing and building model
    model = Baseline(
        sess=sess,
        model_name=FLAGS.model_name,
        checkpoint_dirname=FLAGS.checkpoint_dirname,
    )
    model.build(mu=mu, N=n_users, M=n_books)
    model.build_loss()
    model.build_optimizer(optimizer='adam', lr=FLAGS.lr)
예제 #3
0
FLAGS = flags.FLAGS

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.9

users_name = np.genfromtxt('users_name.csv', dtype=str)
n_users = len(users_name)
books_ISBN = np.genfromtxt('books_ISBN.csv', dtype=str)
n_books = len(books_ISBN)
users_name2id = dict(zip(users_name, range(n_users)))
books_ISBN2id = dict(zip(books_ISBN, range(n_books)))

R_train, _ = my_IO.read_ratings_train(users_name2id,
                                      books_ISBN2id,
                                      implicit=False,
                                      split=0)

mu = R_train.sum() / R_train.nnz

with tf.Session(config=config) as sess:

    # Initializaing and building model
    model = Baseline(
        sess=sess,
        model_name=FLAGS.model_name,
        checkpoint_dirname=FLAGS.checkpoint_dirname,
    )
    model.build(mu=mu, N=n_users, M=n_books)
    model.build_loss()
    model.build_optimizer(optimizer='adam', lr=FLAGS.lr)