Exemple #1
0
 def test_excluded(self):
     for value, expected in zip(('', 'old', 'new'),
                                (set(), {('A', 'B')}, {('B', 'C'),
                                                       ('A', 'C')})):
         lp = linkpred.LinkPred(
             self.config_file(training=True, exclude=value))
         assert_equal({tuple(sorted(p)) for p in lp.excluded}, expected)
     with assert_raises(linkpred.exceptions.LinkPredError):
         lp = linkpred.LinkPred(self.config_file(exclude='bla'))
         lp.excluded
Exemple #2
0
 def test_excluded(self):
     for value, expected in zip(("", "old", "new"),
                                (set(), {("A", "B")}, {("B", "C"),
                                                       ("A", "C")})):
         lp = linkpred.LinkPred(
             self.config_file(training=True, exclude=value))
         assert {tuple(sorted(p)) for p in lp.excluded} == expected
     with pytest.raises(linkpred.exceptions.LinkPredError):
         lp = linkpred.LinkPred(self.config_file(exclude="bla"))
         lp.excluded
Exemple #3
0
    def test_init(self):
        lp = linkpred.LinkPred(self.config_file())
        assert_equal(lp.config['label'], 'testing')
        assert lp.training is None

        lp = linkpred.LinkPred(self.config_file(training=True))
        assert_is_instance(lp.training, nx.Graph)
        assert_equal(len(lp.training.nodes()), 3)
        assert_equal(len(lp.training.edges()), 1)
        assert lp.test is None
Exemple #4
0
    def test_init(self):
        lp = linkpred.LinkPred(self.config_file())
        assert lp.config["label"] == "testing"
        assert lp.training is None

        lp = linkpred.LinkPred(self.config_file(training=True))
        assert isinstance(lp.training, nx.Graph)
        assert len(lp.training.nodes()) == 3
        assert len(lp.training.edges()) == 1
        assert lp.test is None
Exemple #5
0
    def test_process_predictions(self):
        @smokesignal.on('prediction_finished')
        def a(scoresheet, dataset, predictor):
            assert scoresheet.startswith('scoresheet')
            assert predictor.startswith('pred')
            assert_equal(dataset, 'testing')
            a.called = True

        @smokesignal.on('dataset_finished')
        def b(dataset):
            assert_equal(dataset, 'testing')
            b.called = True

        @smokesignal.on('run_finished')
        def c():
            c.called = True

        a.called = b.called = c.called = False
        lp = linkpred.LinkPred(self.config_file())
        lp.predictions = [('pred1', 'scoresheet1'), ('pred2', 'scoresheet2')]
        lp.process_predictions()
        assert a.called
        assert b.called
        assert c.called
        smokesignal.clear_all()
Exemple #6
0
    def test_predict_all(self):
        # Mock out linkpred.predictors
        class Stub:
            def __init__(self, training, eligible, excluded):
                self.training = training
                self.eligible = eligible
                self.excluded = excluded

            def predict(self, **params):
                self.params = params
                return 'scoresheet'

        linkpred.predictors.A = Stub
        linkpred.predictors.B = Stub

        config = self.config_file(training=True)
        config['predictors'] = [{
            'name': 'A',
            'parameters': {
                'X': 'x'
            },
            'displayname': 'prettyA'
        }, {
            'name': 'B',
            'displayname': 'prettyB'
        }]
        lp = linkpred.LinkPred(config)
        results = list(lp.predict_all())
        assert_equal(results, [('A', 'scoresheet'), ('B', 'scoresheet')])
Exemple #7
0
    def test_process_predictions(self):
        @smokesignal.on("prediction_finished")
        def a(scoresheet, dataset, predictor):
            assert scoresheet.startswith("scoresheet")
            assert predictor.startswith("pred")
            assert dataset == "testing"
            a.called = True

        @smokesignal.on("dataset_finished")
        def b(dataset):
            assert dataset == "testing"
            b.called = True

        @smokesignal.on("run_finished")
        def c():
            c.called = True

        a.called = b.called = c.called = False
        lp = linkpred.LinkPred(self.config_file())
        lp.predictions = [("pred1", "scoresheet1"), ("pred2", "scoresheet2")]
        lp.process_predictions()
        assert a.called
        assert b.called
        assert c.called
        smokesignal.clear_all()
Exemple #8
0
    def test_predict_all(self):
        # Mock out linkpred.predictors
        class Stub:
            def __init__(self, training, eligible, excluded):
                self.training = training
                self.eligible = eligible
                self.excluded = excluded

            def predict(self, **params):
                self.params = params
                return "scoresheet"

        linkpred.predictors.A = Stub
        linkpred.predictors.B = Stub

        config = self.config_file(training=True)
        config["predictors"] = [
            {
                "name": "A",
                "parameters": {
                    "X": "x"
                },
                "displayname": "prettyA"
            },
            {
                "name": "B",
                "displayname": "prettyB"
            },
        ]
        lp = linkpred.LinkPred(config)
        results = list(lp.predict_all())
        assert results == [("A", "scoresheet"), ("B", "scoresheet")]
Exemple #9
0
    def test_setup_output(self):
        # Make sure this also works is $DISPLAY is not set.
        # Should probably mock this out...

        for name, klass in (
            ('recall-precision', RecallPrecisionPlotter),
            ('f-score', FScorePlotter),
                # Should be able to handle uppercase
            ('ROC', ROCPlotter),
            ('fmax', FMaxListener),
            ('cache-evaluations', CacheEvaluationListener)):
            config = self.config_file(training=True, test=True, output=[name])
            lp = linkpred.LinkPred(config)
            lp.setup_output()
            assert_is_instance(lp.listeners[0], klass)
            smokesignal.clear_all()
        # Has an evaluator been set up?
        assert_equal(len(lp.evaluator.params['relevant']), 1)
        assert_equal(lp.evaluator.params['universe'], 2)
        assert_is_instance(lp.evaluator.params['universe'], int)
Exemple #10
0
    def test_setup_output(self):
        # Make sure this also works is $DISPLAY is not set.
        # Should probably mock this out...

        for name, klass in (
            ("recall-precision", RecallPrecisionPlotter),
            ("f-score", FScorePlotter),
                # Should be able to handle uppercase
            ("ROC", ROCPlotter),
            ("fmax", FMaxListener),
            ("cache-evaluations", CacheEvaluationListener),
        ):
            config = self.config_file(training=True, test=True, output=[name])
            lp = linkpred.LinkPred(config)
            lp.setup_output()
            assert isinstance(lp.listeners[0], klass)
            smokesignal.clear_all()
        # Has an evaluator been set up?
        assert len(lp.evaluator.params["relevant"]) == 1
        assert lp.evaluator.params["universe"] == 2
        assert isinstance(lp.evaluator.params["universe"], int)
Exemple #11
0
 def test_setup_output_evaluating_without_test(self):
     lp = linkpred.LinkPred(self.config_file(training=True))
     lp.setup_output()
Exemple #12
0
 def test_preprocess_training_and_test(self):
     lp = linkpred.LinkPred(self.config_file(training=True, test=True))
     lp.preprocess()
     assert_equal(set(lp.training.nodes()), {"B"})
     assert_equal(set(lp.test.nodes()), {"B"})
Exemple #13
0
 def test_preprocess_only_training(self):
     lp = linkpred.LinkPred(self.config_file(training=True))
     lp.preprocess()
     assert_equal(set(lp.training.nodes()), set("AB"))
Exemple #14
0
def test_LinkPred_without_predictors():
    linkpred.LinkPred()
Exemple #15
0
 def test_setup_output_evaluating_without_test(self):
     lp = linkpred.LinkPred(self.config_file(training=True))
     with pytest.raises(linkpred.exceptions.LinkPredError):
         lp.setup_output()
Exemple #16
0
def test_LinkPred_without_predictors():
    with pytest.raises(linkpred.exceptions.LinkPredError):
        linkpred.LinkPred()