class TestcaseBase(unittest.TestCase):
    def setUp(self):
        """
        Setup method that is called at the beginning of each test.
        """
        self.documents, self.users = 18, 10
        documents_cnt, users_cnt = self.documents, self.users
        self.n_iterations = 15
        self.k_folds = 3
        self.hyperparameters = {'n_factors': 5, '_lambda': 0.01}
        self.options = {'n_iterations': self.n_iterations, 'k_folds': self.k_folds}
        self.initializer = ModelInitializer(self.hyperparameters.copy(), self.n_iterations)
        self.n_recommendations = 1

        def mock_get_ratings_matrix(self=None):
            return [[int(not bool((article + user) % 3)) for article in range(documents_cnt)]
                    for user in range(users_cnt)]

        self.ratings_matrix = numpy.array(mock_get_ratings_matrix())
        setattr(DataParser, "get_ratings_matrix", mock_get_ratings_matrix)

        self.evaluator = Evaluator(self.ratings_matrix)
        self.cf = CollaborativeFiltering(self.initializer, self.evaluator, self.hyperparameters,
                                         self.options, load_matrices=True)
        self.cf.train()
        self.cf.evaluator.k_folds = self.k_folds
        self.test_data = self.cf.test_data
        self.predictions = self.cf.get_predictions()
        self.rounded_predictions = self.cf.rounded_predictions()
 def runTest(self):
     cf = CollaborativeFiltering(self.initializer, self.evaluator, self.hyperparameters,
                                 self.options, load_matrices=False)
     self.assertEqual(cf.n_factors, self.n_factors)
     self.assertEqual(cf.n_items, self.documents)
     cf.train()
     self.assertEqual(cf.get_predictions().shape, (self.users, self.documents))
     self.assertTrue(isinstance(cf, AbstractRecommender))
     shape = (self.users, self.documents)
     ratings = cf.get_ratings()
     self.assertLessEqual(numpy.amax(ratings), 1 + 1e-6)
     self.assertGreaterEqual(numpy.amin(ratings), -1e-6)
     self.assertTrue(ratings.shape == shape)
     rounded_predictions = cf.rounded_predictions()
     self.assertLessEqual(numpy.amax(rounded_predictions), 1 + 1e-6)
     self.assertGreaterEqual(numpy.amin(rounded_predictions), -1e-6)
     self.assertTrue(rounded_predictions.shape == shape)
     recall = cf.evaluator.calculate_recall(ratings, cf.get_predictions())
     self.assertTrue(-1e-6 <= recall <= 1 + 1e-6)
     random_user = int(numpy.random.random() * self.users)
     random_item = int(numpy.random.random() * self.documents)
     random_prediction = cf.predict(random_user, random_item)
     self.assertTrue(isinstance(random_prediction, numpy.float64))
Exemple #3
0
    def run_collaborative(self):
        """
        Runs collaborative filtering
        """
        ALS = CollaborativeFiltering(self.initializer, self.evaluator,
                                     self.hyperparameters, self.options,
                                     self.verbose, self.load_matrices,
                                     self.dump)

        ALS.train()
        ALS.get_evaluation_report()
        print(
            ALS.evaluator.calculate_recall(ALS.ratings,
                                           ALS.rounded_predictions()))
        print(
            ALS.evaluator.recall_at_x(1, ALS.get_predictions(), ALS.test_data,
                                      ALS.rounded_predictions()))