def learnModel(self, X): learner = WARPMFRecommender(self.k, self.alpha, self.lmbda, self.batchSize, self.maxTrials) learner.fit(X) return learner.U, learner.V
def testPredictionMethods(train_filename, eval_item_filename, user_means_filename): ''' compare predictions generated by the different approaches computes pairwise list overlap and average recall for each method ''' logging.info('testing predictions with data files {0}; {1}; {2}...'.format(train_filename, eval_item_filename, user_means_filename)) mrec_train_data = load_fast_sparse_matrix('tsv', train_filename) mrec_recommender = CosineKNNRecommender(config.NEIGHBOURHOOD_SIZE) mrec_recommender.fit(mrec_train_data) warp_recommender = WARPMFRecommender(d=50, gamma=0.01, C=100.0) warp_recommender.fit(mrec_train_data.X) train_data = trainData.TrainData(train_filename, user_means_filename) _, _, Q = sparsesvd(train_data.rating_matrix.tocsc(), config.FACTOR_MODEL_SIZE) recalls = {} overlaps = {} top_recs = {} user_counter = 0.0 methods = ['mrec', 'warp', 'mf', 'ub_classic', 'ib_classic', 'ub_damping', 'ib_damping', 'ub_non', 'ib_non'] with open(eval_item_filename,'rb') as eval_file: for line in eval_file: data = line.split('\t') user_id = data[0] ground_truth_items = data[1].split(',') random_unrated_items = data[2].rstrip('\n').split(',') evaluation_item_ids = ground_truth_items + random_unrated_items # for each prediction method, compute topN recommendations once per user predictions1 = mrec_recommender.recommend_items(mrec_train_data.X, int(user_id)-config.MREC_INDEX_OFFSET, max_items=10000, return_scores=True) top_recs['mrec'] = topNLists.getTopNList(predictions1, evaluation_item_ids=evaluation_item_ids) predictions2 = warp_recommender.recommend_items(mrec_train_data.X, int(user_id)-config.MREC_INDEX_OFFSET, max_items=10000, return_scores=True) top_recs['warp'] = topNLists.getTopNList(predictions2, evaluation_item_ids=evaluation_item_ids) predictions3 = train_data.getFactorBasedRecommendations(user_id, Q, evaluation_item_ids) top_recs['mf'] = topNLists.getTopNList(predictions3) predictions4 = train_data.getUserBasedRecommendations(user_id, evaluation_item_ids, 'classic') top_recs['ub_classic'] = topNLists.getTopNList(predictions4) predictions5 = train_data.getItemBasedRecommendations(user_id, evaluation_item_ids, 'classic') top_recs['ib_classic'] = topNLists.getTopNList(predictions5) predictions6 = train_data.getUserBasedRecommendations(user_id, evaluation_item_ids, 'self_damping') top_recs['ub_damping'] = topNLists.getTopNList(predictions6) predictions7 = train_data.getItemBasedRecommendations(user_id, evaluation_item_ids, 'self_damping') top_recs['ib_damping'] = topNLists.getTopNList(predictions7) predictions8 = train_data.getUserBasedRecommendations(user_id, evaluation_item_ids, 'non_normalized') top_recs['ub_non'] = topNLists.getTopNList(predictions8) predictions9 = train_data.getItemBasedRecommendations(user_id, evaluation_item_ids, 'non_normalized') top_recs['ib_non'] = topNLists.getTopNList(predictions9) # then, use the computed topN lists to update recall and overlap values for method1 in methods: if method1 in recalls: recalls[method1] += topNLists.getRecall(ground_truth_items, top_recs[method1]) else: recalls[method1] = topNLists.getRecall(ground_truth_items, top_recs[method1]) for method2 in methods: dict_key = method1 + '_' + method2 if dict_key in overlaps: overlaps[dict_key] += topNLists.computeRecommendationListOverlap(top_recs[method1], top_recs[method2]) else: overlaps[dict_key] = topNLists.computeRecommendationListOverlap(top_recs[method1], top_recs[method2]) user_counter += 1.0 logging.info('Tested user {0}. Current recalls: {1}. Current overlaps: {2}'.\ format(user_id, [(k, v/user_counter) for k,v in recalls.items()], [(k, v/user_counter) for k,v in overlaps.items()])) return recalls, overlaps
filenames = dataPreprocessing.loadData(mode='beyond_accuracy') # 5-fold cross-validation for iteration, (train_filename, test_filename, user_means_filename, eval_item_filename) in enumerate(filenames, 1): mrec_train_data = load_fast_sparse_matrix('tsv', train_filename) # create the training data and required recommendation models train_data = trainData.TrainData(train_filename, user_means_filename) for factor_value, C_value, gamma_value in product( factor_values, C_values, gamma_values): warp_recommender = WARPMFRecommender(d=factor_value, gamma=gamma_value, C=C_value) warp_recommender.fit(mrec_train_data.X) logging.info('running fold {0} with f={1}, C={2}, g={3}...'.format( iteration, factor_value, C_value, gamma_value)) recall = 0 evaluation_cases = 0 with open(eval_item_filename, 'rb') as eval_file: for line in eval_file: data = line.split('\t') user_id = data[0] ground_truth_items = data[1].split(',') random_unrated_items = data[2].rstrip('\n').split(',') # THIS IS A FIX FOR THE FEW USERS IN LAST.FM WHO HAVE IDENTICAL RATINGS FOR ALL TRAIN ITEMS