def _evaluate(self, eval_dataset, num_recommendations, metrics, batch_size=1, num_users=None): if self.model is None: raise Exception('Model not initialized') self.model.eval() recommender = InferenceRecommender(self, num_recommendations) evaluator = RecommenderEvaluator(recommender, metrics) results = evaluator.evaluate(eval_dataset, batch_size=batch_size, num_users=num_users) return results
embeddings_index.load(index_file=index_file) cache_embeddings_index = MemCacheEmbeddingsIndex(embeddings_index) recommender = SimilarityRecommender(cache_embeddings_index, num_recommendations, scale=1, n=50) train_df = pd.read_csv(data_dir + 'train.csv') val_te_df = pd.read_csv(data_dir + 'test_te.csv') val_tr_df = pd.read_csv(data_dir + 'test_tr.csv') train_matrix, item_id_map, _ = dataframe_to_csr_matrix(train_df, **common_params) val_tr_matrix, _, user_id_map = dataframe_to_csr_matrix( val_tr_df, item_id_map=item_id_map, **common_params) val_te_matrix, _, _ = dataframe_to_csr_matrix(val_te_df, item_id_map=item_id_map, user_id_map=user_id_map, **common_params) val_tr_dataset = RecommendationDataset(val_tr_matrix, val_te_matrix) metrics = [Recall(k=20), Recall(k=50), NDCG(k=100)] evaluator = RecommenderEvaluator(recommender, metrics) metrics_accumulated = evaluator.evaluate(val_tr_dataset, batch_size=500) for metric in metrics_accumulated: log.info('{}: {}'.format(metric, np.mean(metrics_accumulated[metric])))