Ejemplo n.º 1
0
    def test_model_performance(self, model):
        train_predictions = model.predict(train.row, train.col)
        test_predictions = model.predict(test.row, test.col)

        trn_pred = roc_auc_score(train.data, train_predictions)
        tst_pred = roc_auc_score(test.data, test_predictions)

        # Performance is same as before when loaded from disk
        loaded_model = LightFM.load(TEST_FILE_PATH)

        train_predictions = loaded_model.predict(train.row, train.col)
        test_predictions = loaded_model.predict(test.row, test.col)

        # Use approximately equal because floating point math may make our summation slightly different.
        assert roc_auc_score(train.data, train_predictions) == pytest.approx(
            trn_pred, 0.0001)
        assert roc_auc_score(test.data, test_predictions) == pytest.approx(
            tst_pred, 0.0001)
Ejemplo n.º 2
0
    def test_all_loaded_weights_numpy_arrays(self, model):
        # Load a model onto an uninstanciated object
        loaded_model = LightFM.load(TEST_FILE_PATH)

        for weight_name in model_weights:
            assert callable(getattr(loaded_model, weight_name).any)