def test_recommendation_evaluation_15(model, interactions_ds): """Evaluation with invalid number of k (< 0).""" try: recommendation_evaluation(model, interactions_ds[1], cn_test_users=None, k=-1, n_pos_interactions=None, novelty=False, metrics=[NDCG(), HitRatio()], verbose=False) assert False except Exception as e: assert str(e) == 'k (-1) should be > 0.'
def test_recommendation_evaluation_16(model, interactions_ds): """Invalid metrics value (not a list).""" try: recommendation_evaluation(model, interactions_ds[1], cn_test_users=None, k=5, n_pos_interactions=None, novelty=False, metrics={}, verbose=False) assert False except Exception as e: assert str(e) == 'Expected "metrics" argument to be a list and found <class \'dict\'>. ' \ 'Should contain instances of RankingMetricABC.'
def test_recommendation_evaluation_13(model, interactions_ds): """Evaluation with invalid number of positive interactions (< 0).""" try: recommendation_evaluation(model, interactions_ds[1], cn_test_users=None, k=[1, 2], n_pos_interactions=-1, novelty=False, metrics=[NDCG(), HitRatio()], verbose=False) assert False except Exception as e: assert str( e ) == 'The number of positive interactions (-1) should be None or an integer > 0.'
def test_recommendation_evaluation_18(model, interactions_ds): """Evaluation with a custom ignore low predictions threshold.""" assert recommendation_evaluation(model, interactions_ds[1], cn_test_users=None, k=2, n_pos_interactions=None, novelty=False, ignore_low_predictions_threshold=2) == \ {'HitRatio@2': 0.0167, 'NDCG@2': 0.0189, 'Precision@2': 0.02, 'Recall@2': 0.0167}