if W[i][j] != 0: Wi[j] = W[i][j] most_similar_items.append(set(x[0] for x in sorted(Wi.items(), key=lambda x: x[1], reverse=True)[:N])) scores = [[0 for _ in range(n_item)] for _ in range(n_user)] # scores[u][i]是用户u对物品i的评分 for user_id in range(n_user): user_item_set = train_user_items[user_id] for i in user_item_set: for j in most_similar_items[i]: if j not in user_item_set: scores[user_id][j] += W[i][j] #for i in set(range(n_item)) - user_item_set: # for j in user_item_set & most_similar_items[i]: # scores[user_id][i] += W[i][j] print('(耗时', time.time() - start_time, '秒)', sep='') return scores if __name__ == '__main__': n_user, n_item, train_data, test_data, topk_data = data_process.pack(data_loader.ml1m, negative_sample_ratio=0) W = _item_similarity(train_data, n_user, n_item) scores = _user_item_score(train_data, n_user, n_item, W, N=10) ks = [10, 36, 100] precisions, recalls = topk_evaluate(topk_data, lambda uis: [scores[u][i] for u, i in uis], ks) for k, precision, recall in zip(ks, precisions, recalls): print('[k=%d, precision=%.3f%%, recall=%.3f%%, f1=%.3f%%]' % (k, 100 * precision, 100 * recall, 200 * precision * recall / (precision + recall)))
if __name__ == '__main__': import Recommender_System.utility.gpu_memory_growth from Recommender_System.data import data_loader, data_process from Recommender_System.algorithm.MLP.model import MLP_model from Recommender_System.algorithm.train import train n_user, n_item, train_data, test_data, topk_data = data_process.pack( data_loader.ml100k) model = MLP_model(n_user, n_item, dim=32, layers=[64, 64, 64], l2=0, dropout=0.3) train(model, train_data, test_data, topk_data, epochs=30, batch=512)
if __name__ == '__main__': import Recommender_System.utility.gpu_memory_growth from Recommender_System.data import data_loader, data_process from Recommender_System.algorithm.SLIM.tool import get_user_item_matrix from Recommender_System.algorithm.SLIM.model import SLIM from Recommender_System.algorithm.SLIM.train import train n_user, n_item, train_data, test_data, topk_data = data_process.pack( data_loader.ml100k, negative_sample_ratio=0, split_test_ratio=0.125) A = get_user_item_matrix(n_user, n_item, train_data) model = SLIM(n_user, n_item, A) train(model, topk_data, epochs=1000)
from Recommender_System.data import data_loader, data_process from Recommender_System.algorithm.MLP.model import MLP from Recommender_System.algorithm.train import train if __name__ == '__main__': n_user, n_item, train_data, test_data, topk_data = data_process.pack( data_loader.ml1m, topk_sample_user=None) model = MLP(n_user, n_item, dim=32, layers=[32, 16, 8], l2=0) train(model, train_data, test_data, topk_data, epochs=30, batch=512) #model.save_weights('save/ml100k_40,[64,48,32],0.00001.h5')