Exemplo n.º 1
0
 def output_spec(self) -> lit_types.Spec:
     """Return a spec describing model outputs."""
     output_spec_dict = dict(self._output_types)
     if self.attribution_explainer:
         output_spec_dict[
             "feature_attribution"] = lit_types.FeatureSalience(signed=True)
     return output_spec_dict
Exemplo n.º 2
0
    def test_set_up_and_open_lit_with_xai(self, set_up_sequential,
                                          set_up_pandas_dataframe_and_columns,
                                          widget_render_mock):
        pd_dataset, lit_columns = set_up_pandas_dataframe_and_columns
        feature_types, label_types, saved_model_path = set_up_sequential
        lit_dataset, lit_model = set_up_and_open_lit(pd_dataset, lit_columns,
                                                     saved_model_path,
                                                     feature_types,
                                                     label_types)

        expected_examples = [
            {
                "feature_1": 1.0,
                "feature_2": 3.0,
                "label": 1.0
            },
        ]
        test_inputs = [
            {
                "feature_1": 1.0,
                "feature_2": 2.0
            },
        ]
        outputs = lit_model.predict_minibatch(test_inputs)

        assert lit_dataset.spec() == dict(lit_columns)
        assert expected_examples == lit_dataset._examples

        assert lit_model.input_spec() == dict(feature_types)
        assert lit_model.output_spec() == dict({
            **label_types,
            "feature_attribution":
            lit_types.FeatureSalience(signed=True),
        })
        assert len(outputs) == 1
        for item in outputs:
            assert item.keys() == {"label", "feature_attribution"}
            assert len(item.values()) == 2

        widget_render_mock.assert_called_once()
Exemplo n.º 3
0
    def test_create_lit_model_from_endpoint_name_with_xai_returns_model(
            self, feature_types, label_types, model_id):
        lit_model = create_lit_model_from_endpoint(_TEST_ENDPOINT_NAME,
                                                   feature_types, label_types,
                                                   model_id)
        test_inputs = [
            {
                "feature_1": 1.0,
                "feature_2": 2.0
            },
        ]
        outputs = lit_model.predict_minibatch(test_inputs)

        assert lit_model.input_spec() == dict(feature_types)
        assert lit_model.output_spec() == dict({
            **label_types,
            "feature_attribution":
            lit_types.FeatureSalience(signed=True),
        })
        assert len(outputs) == 1
        for item in outputs:
            assert item.keys() == {"label", "feature_attribution"}
            assert len(item.values()) == 2
Exemplo n.º 4
0
    def test_create_lit_model_from_tensorflow_with_xai_returns_model(
            self, set_up_sequential):
        feature_types, label_types, saved_model_path = set_up_sequential
        lit_model = create_lit_model(saved_model_path, feature_types,
                                     label_types)
        test_inputs = [
            {
                "feature_1": 1.0,
                "feature_2": 2.0
            },
        ]
        outputs = lit_model.predict_minibatch(test_inputs)

        assert lit_model.input_spec() == dict(feature_types)
        assert lit_model.output_spec() == dict({
            **label_types,
            "feature_attribution":
            lit_types.FeatureSalience(signed=True),
        })
        assert len(outputs) == 1
        for item in outputs:
            assert item.keys() == {"label", "feature_attribution"}
            assert len(item.values()) == 2