def testPredictionMethods(train_filename, eval_item_filename, user_means_filename): ''' compare predictions generated by the different approaches computes pairwise list overlap and average recall for each method ''' logging.info('testing predictions with data files {0}; {1}; {2}...'.format(train_filename, eval_item_filename, user_means_filename)) mrec_train_data = load_fast_sparse_matrix('tsv', train_filename) mrec_recommender = CosineKNNRecommender(config.NEIGHBOURHOOD_SIZE) mrec_recommender.fit(mrec_train_data) warp_recommender = WARPMFRecommender(d=50, gamma=0.01, C=100.0) warp_recommender.fit(mrec_train_data.X) train_data = trainData.TrainData(train_filename, user_means_filename) _, _, Q = sparsesvd(train_data.rating_matrix.tocsc(), config.FACTOR_MODEL_SIZE) recalls = {} overlaps = {} top_recs = {} user_counter = 0.0 methods = ['mrec', 'warp', 'mf', 'ub_classic', 'ib_classic', 'ub_damping', 'ib_damping', 'ub_non', 'ib_non'] with open(eval_item_filename,'rb') as eval_file: for line in eval_file: data = line.split('\t') user_id = data[0] ground_truth_items = data[1].split(',') random_unrated_items = data[2].rstrip('\n').split(',') evaluation_item_ids = ground_truth_items + random_unrated_items # for each prediction method, compute topN recommendations once per user predictions1 = mrec_recommender.recommend_items(mrec_train_data.X, int(user_id)-config.MREC_INDEX_OFFSET, max_items=10000, return_scores=True) top_recs['mrec'] = topNLists.getTopNList(predictions1, evaluation_item_ids=evaluation_item_ids) predictions2 = warp_recommender.recommend_items(mrec_train_data.X, int(user_id)-config.MREC_INDEX_OFFSET, max_items=10000, return_scores=True) top_recs['warp'] = topNLists.getTopNList(predictions2, evaluation_item_ids=evaluation_item_ids) predictions3 = train_data.getFactorBasedRecommendations(user_id, Q, evaluation_item_ids) top_recs['mf'] = topNLists.getTopNList(predictions3) predictions4 = train_data.getUserBasedRecommendations(user_id, evaluation_item_ids, 'classic') top_recs['ub_classic'] = topNLists.getTopNList(predictions4) predictions5 = train_data.getItemBasedRecommendations(user_id, evaluation_item_ids, 'classic') top_recs['ib_classic'] = topNLists.getTopNList(predictions5) predictions6 = train_data.getUserBasedRecommendations(user_id, evaluation_item_ids, 'self_damping') top_recs['ub_damping'] = topNLists.getTopNList(predictions6) predictions7 = train_data.getItemBasedRecommendations(user_id, evaluation_item_ids, 'self_damping') top_recs['ib_damping'] = topNLists.getTopNList(predictions7) predictions8 = train_data.getUserBasedRecommendations(user_id, evaluation_item_ids, 'non_normalized') top_recs['ub_non'] = topNLists.getTopNList(predictions8) predictions9 = train_data.getItemBasedRecommendations(user_id, evaluation_item_ids, 'non_normalized') top_recs['ib_non'] = topNLists.getTopNList(predictions9) # then, use the computed topN lists to update recall and overlap values for method1 in methods: if method1 in recalls: recalls[method1] += topNLists.getRecall(ground_truth_items, top_recs[method1]) else: recalls[method1] = topNLists.getRecall(ground_truth_items, top_recs[method1]) for method2 in methods: dict_key = method1 + '_' + method2 if dict_key in overlaps: overlaps[dict_key] += topNLists.computeRecommendationListOverlap(top_recs[method1], top_recs[method2]) else: overlaps[dict_key] = topNLists.computeRecommendationListOverlap(top_recs[method1], top_recs[method2]) user_counter += 1.0 logging.info('Tested user {0}. Current recalls: {1}. Current overlaps: {2}'.\ format(user_id, [(k, v/user_counter) for k,v in recalls.items()], [(k, v/user_counter) for k,v in overlaps.items()])) return recalls, overlaps
user_id = data[0] ground_truth_items = data[1].split(',') random_unrated_items = data[2].rstrip('\n').split(',') # THIS IS A FIX FOR THE FEW USERS IN LAST.FM WHO HAVE IDENTICAL RATINGS FOR ALL TRAIN ITEMS # WE HAVE TO SKIP THOSE USERS, BECAUSE THEY LOOSE ALL RATINGS IN THE MEAN-CENTERED MATRIX! user_index = train_data.getUserIndex(user_id) if len(train_data.getUserProfileByIndex(user_index)) < 1: continue try: evaluation_item_ids = ground_truth_items + random_unrated_items rec_list_szie = config.RECOMMENDATION_LIST_SIZE * config.DIVERSIFICATION_CANDIDATES_FACTOR predictions = warp_recommender.recommend_items( mrec_train_data.X, int(user_id) - config.MREC_INDEX_OFFSET, max_items=10000, return_scores=True) top_recs = topNLists.getTopNList( predictions, rec_list_szie, evaluation_item_ids) recall += topNLists.getRecall(ground_truth_items, top_recs) evaluation_cases += 1 except Exception, e: logging.info( 'couldave shouldave, but didnt: {0}'.format(e)) if evaluation_cases == 300: break logging.info('...done recall={0}'.format(