Ejemplo n.º 1
0
    def test_interpret_fails_when_embedding_layer_not_found(self):
        inputs = {"sentence": "It was the ending that I hated"}
        vocab = Vocabulary()
        vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
        model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
        predictor = TextClassifierPredictor(model, TextClassificationJsonReader())

        interpreter = SmoothGradient(predictor)
        with raises(RuntimeError):
            interpreter.saliency_interpret_from_json(inputs)
Ejemplo n.º 2
0
    def test_smooth_gradient(self):
        inputs = {"sentence": "It was the ending that I hated"}
        archive = load_archive(self.FIXTURES_ROOT / "basic_classifier" /
                               "serialization" / "model.tar.gz")
        predictor = Predictor.from_archive(archive, "text_classifier")

        interpreter = SmoothGradient(predictor)
        interpretation = interpreter.saliency_interpret_from_json(inputs)
        assert interpretation is not None
        assert "instance_1" in interpretation
        assert "grad_input_1" in interpretation["instance_1"]
        assert len(interpretation["instance_1"]
                   ["grad_input_1"]) == 7  # 7 words in input
Ejemplo n.º 3
0
    def test_smooth_gradient(self):
        inputs = {"sentence": "It was the ending that I hated"}
        archive = load_archive(self.FIXTURES_ROOT / 'basic_classifier' /
                               'serialization' / 'model.tar.gz')
        predictor = Predictor.from_archive(archive, 'text_classifier')

        interpreter = SmoothGradient(predictor)
        interpretation = interpreter.saliency_interpret_from_json(inputs)
        assert interpretation is not None
        assert 'instance_1' in interpretation
        assert 'grad_input_1' in interpretation['instance_1']
        assert len(interpretation['instance_1']
                   ['grad_input_1']) == 7  # 7 words in input
Ejemplo n.º 4
0
    def test_interpret_works_with_custom_embedding_layer(self):
        inputs = {"sentence": "It was the ending that I hated"}
        vocab = Vocabulary()
        vocab.add_tokens_to_namespace([w for w in inputs["sentence"].split(" ")])
        model = FakeModelForTestingInterpret(vocab, max_tokens=len(inputs["sentence"].split(" ")))
        predictor = FakePredictorForTestingInterpret(model, TextClassificationJsonReader())
        interpreter = SmoothGradient(predictor)

        interpretation = interpreter.saliency_interpret_from_json(inputs)

        assert interpretation is not None
        assert "instance_1" in interpretation
        assert "grad_input_1" in interpretation["instance_1"]
        grad_input_1 = interpretation["instance_1"]["grad_input_1"]
        assert len(grad_input_1) == 7  # 7 words in input