示例#1
0
    features=automl_explainer_setup_obj.engineered_feature_names,
    feature_maps=[automl_explainer_setup_obj.feature_map],
    classes=automl_explainer_setup_obj.classes)

# Compute the engineered explanations
engineered_explanations = explainer.explain(
    ['local', 'global'],
    tag='engineered explanations',
    eval_dataset=automl_explainer_setup_obj.X_test_transform)

# Compute the raw explanations
raw_explanations = explainer.explain(
    ['local', 'global'],
    get_raw=True,
    tag='raw explanations',
    raw_feature_names=automl_explainer_setup_obj.raw_feature_names,
    eval_dataset=automl_explainer_setup_obj.X_test_transform)

print("Engineered and raw explanations computed successfully")

# Initialize the ScoringExplainer
scoring_explainer = TreeScoringExplainer(
    explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map])

# Pickle scoring explainer locally
save(scoring_explainer, exist_ok=True)

# Upload the scoring explainer to the automl run
automl_run.upload_file('outputs/scoring_explainer.pkl',
                       'scoring_explainer.pkl')
original_model = run.register_model(model_name='amlcompute_deploy_model',
                                    model_path='original_model.pkl')

# create an explainer to validate or debug the model
tabular_explainer = TabularExplainer(model,
                                     initialization_examples=x_train,
                                     features=attritionXData.columns,
                                     classes=["Not leaving", "leaving"],
                                     transformations=transformations)

# explain overall model predictions (global explanation)
# passing in test dataset for evaluation examples - note it must be a representative sample of the original data
# more data (e.g. x_train) will likely lead to higher accuracy, but at a time cost
global_explanation = tabular_explainer.explain_global(x_test)

# uploading model explanation data for storage or visualization
comment = 'Global explanation on classification model trained on IBM employee attrition dataset'
client.upload_model_explanation(global_explanation, comment=comment)

# also create a lightweight explainer for scoring time
scoring_explainer = LinearScoringExplainer(tabular_explainer)
# pickle scoring explainer locally
save(scoring_explainer, directory=OUTPUT_DIR, exist_ok=True)

# register scoring explainer
run.upload_file('IBM_attrition_explainer.pkl',
                os.path.join(OUTPUT_DIR, 'scoring_explainer.pkl'))
scoring_explainer_model = run.register_model(
    model_name='IBM_attrition_explainer',
    model_path='IBM_attrition_explainer.pkl')