コード例 #1
0
def compute_explanation(model: Model, x, z):
    """ generates an explanation how the model came to its result
    Parameters
    ----------
    model : Model
        a hassbrain model
    x : np.ndarray
        the raw array conatining the sensor values
    Returns
    -------

    """
    from matplotlib.pyplot import Figure
    wrapped_model = ModelWrapper(model)
    class_names = model.get_state_lbl_lst()
    feature_names = model.get_obs_lbl_lst()

    cat_idxs = [i for i in range(len(class_names))]
    categorical_names = {}
    for i in cat_idxs:
        categorical_names[i] = {}
        categorical_names[i][0] = "off"
        categorical_names[i][1] = "on"

    from skater.core.local_interpretation.lime.lime_tabular import LimeTabularExplainer
    exp = LimeTabularExplainer(x,
                               mode='classification',
                               training_labels=z,
                               feature_names=feature_names,
                               categorical_features=cat_idxs,
                               categorical_names=categorical_names,
                               class_names=class_names)

    fig = exp.explain_instance(x[0],
                               wrapped_model.predict_proba).as_pyplot_figure()
コード例 #2
0
ファイル: hmm.py プロジェクト: tcsvn/pyadlml
    def __init__(self, controller):
        #self._cm = controller # type: Controller
        # training parameters
        self._training_steps = 500
        self._epsilon = None
        self._use_q_fct = False

        Model.__init__(self, "test", controller)
コード例 #3
0
 def __init__(self, controller):
     self._activity_dur_dist = None
     self._cum_act_dur_dist = None
     Model.__init__(self, "test", controller)
コード例 #4
0
def compute_feature_importance(model: Model, X: np.ndarray, z: np.ndarray):
    """ calculates the feature importance, the impact on prediction on the dataset
    Parameters
    ----------
    model : Model
        a model of
    X : array-like
        the array the importance should be calculated on
    z : array-like
        the corresponding labels
    Returns
    -------
    res : pd.Dataframe (1, D)

    """
    from skater.model import InMemoryModel
    from skater.core.explanations import Interpretation
    from matplotlib.pyplot import Figure
    wrapped_model = ModelWrapper(model)
    class_names = model.get_state_lbl_lst()
    feature_names = model.get_obs_lbl_lst()

    # this has to be done in order for skater to recognize the values as categorical and not numerical
    X = _boolean2str(X)

    # create interpretation
    interpreter = Interpretation(
        X,
        #class_names=class_names,
        feature_names=feature_names)

    # create model
    # supports classifiers with or without probability scores
    examples = X[:10]
    skater_model = InMemoryModel(
        wrapped_model.predict,
        #target_names=class_names,
        feature_names=feature_names,
        model_type='classifier',
        unique_values=class_names,
        probability=False,
        examples=examples)

    # only do this for cross_entropy
    #train_z = onehot(train_z, model.K)
    interpreter.load_data(X, training_labels=z, feature_names=feature_names)

    # todo flag for deletion (3lines below)
    #    if this can savely be deleted
    tmp = interpreter.data_set.feature_info
    for key, val in tmp.items():
        val['numeric'] = False
    fig, axes = interpreter.feature_importance.save_plot_feature_importance(
        skater_model,
        ascending=True,
        ax=None,
        progressbar=False,
        # model-scoring: difference in log_loss or MAE of training_labels
        # given perturbations. Note this vary rarely makes any significant
        # differences
        method='model-scoring')
    # corss entropy or f1 ('f1', 'cross_entropy')
    #scorer_type='cross_entropy') # type: Figure, axes
    #scorer_type='f1') # type: Figure, axes

    # cross_entropy yields zero

    fig.show()