예제 #1
0
def get_model_info():
    try:
        logger.info('Retrieving model data...')
        fmi = get_webapp_config().get("trainedModelFullModelId")
        if fmi is None:
            model = Model(get_webapp_config()["modelId"])
            version_id = get_webapp_config().get("versionId")
            original_model_handler = get_model_handler(model, version_id)
        else:
            original_model_handler = PredictionModelInformationHandler.from_full_model_id(
                fmi)
        stressor.model_accessor = ModelAccessor(original_model_handler)

        return jsonify(
            target_classes=stressor.model_accessor.get_target_classes(),
            pred_type=stressor.model_accessor.get_prediction_type(),
            features={
                feature: preprocessing["type"]
                for (feature, preprocessing
                     ) in stressor.model_accessor.get_per_feature().items()
                if preprocessing["role"] == "INPUT"
            },
            metric=stressor.model_accessor.get_evaluation_metric())
    except:
        logger.error(traceback.format_exc())
        return traceback.format_exc(), 500
예제 #2
0
def get_drift_metrics():
    try:
        model_id = request.args.get('model_id')
        version_id = request.args.get('version_id')
        test_set = request.args.get('test_set')
        new_test_df = dataiku.Dataset(test_set).get_dataframe(
            bool_as_str=True, limit=ModelDriftConstants.MAX_NUM_ROW)

        fmi = get_webapp_config().get("trainedModelFullModelId")
        if fmi is None:
            model = dataiku.Model(model_id)
            model_handler = get_model_handler(model, version_id=version_id)
            model_accessor = ModelAccessor(model_handler)
        else:
            original_model_handler = PredictionModelInformationHandler.from_full_model_id(
                fmi)
            model_accessor = ModelAccessor(original_model_handler)

        drifter = DriftAnalyzer()
        drifter.fit(new_test_df, model_accessor=model_accessor)
        return json.dumps(drifter.get_drift_metrics_for_webapp(),
                          allow_nan=False,
                          default=convert_numpy_int64_to_int)
    except:
        logger.error(traceback.format_exc())
        return traceback.format_exc(), 500
예제 #3
0
def get_histograms(model_id, version_id, advantageous_outcome,
                   sensitive_column):

    fmi = get_webapp_config().get("trainedModelFullModelId")
    if fmi is None:
        model = dataiku.Model(model_id)
        model_handler = get_model_handler(model, version_id=version_id)
        model_accessor = ModelAccessor(model_handler)
    else:
        original_model_handler = PredictionModelInformationHandler.from_full_model_id(
            fmi)
        model_accessor = ModelAccessor(original_model_handler)

    raw_test_df = model_accessor.get_original_test_df()
    test_df = raw_test_df.dropna(subset=[sensitive_column])
    target_variable = model_accessor.get_target_variable()

    y_true = test_df.loc[:, target_variable]
    pred_df = model_accessor.predict(test_df)
    y_pred = pred_df.loc[:, DkuWebappConstants.PREDICTION]

    advantageous_outcome_proba_col = 'proba_{}'.format(advantageous_outcome)
    y_pred_proba = pred_df.loc[:, advantageous_outcome_proba_col]
    sensitive_feature_values = test_df[sensitive_column]

    return get_histogram_data(y_true, y_pred, y_pred_proba,
                              advantageous_outcome, sensitive_feature_values)
def get_value_list(model_id, version_id, column):
    try:
        if column == 'undefined' or column == 'null':
            raise ValueError('Please choose a column.')

        fmi = get_webapp_config().get("trainedModelFullModelId")
        if fmi is None:
            model = dataiku.Model(model_id)
            model_handler = get_model_handler(model, version_id=version_id)
            model_accessor = ModelAccessor(model_handler)
        else:
            original_model_handler = PredictionModelInformationHandler.from_full_model_id(
                fmi)
            model_accessor = ModelAccessor(original_model_handler)

        test_df = model_accessor.get_original_test_df()
        value_list = test_df[column].unique().tolist(
        )  # should check for categorical variables ?
        filtered_value_list = remove_nan_from_list(value_list)

        if len(filtered_value_list) > DkuWebappConstants.MAX_NUM_CATEGORIES:
            raise ValueError(
                'Column "{2}" is either of numerical type or has too many categories ({0}). Max {1} are allowed.'
                .format(len(filtered_value_list),
                        DkuWebappConstants.MAX_NUM_CATEGORIES, column))

        return simplejson.dumps(filtered_value_list,
                                ignore_nan=True,
                                default=convert_numpy_int64_to_int)
    except:
        logger.error("When trying to call get-value-list endpoint: {}.".format(
            traceback.format_exc()))
        return "{}Check backend log for more details.".format(
            traceback.format_exc()), 500
def check_model_type(model_id, version_id):
    try:
        fmi = get_webapp_config().get("trainedModelFullModelId")
        if fmi is None:
            model = dataiku.Model(model_id)
            model_handler = get_model_handler(model, version_id=version_id)
            model_accessor = ModelAccessor(model_handler)
        else:
            original_model_handler = PredictionModelInformationHandler.from_full_model_id(
                fmi)
            model_accessor = ModelAccessor(original_model_handler)

        if model_accessor.get_prediction_type() in [
                DkuModelAccessorConstants.REGRRSSION_TYPE,
                DkuModelAccessorConstants.CLUSTERING_TYPE
        ]:
            raise ValueError(
                'Model Fairness Report only supports binary classification model.'
            )
        return 'ok'
    except:
        logger.error(
            "When trying to call check-model-type endpoint: {}.".format(
                traceback.format_exc()))
        return "{}Check backend log for more details.".format(
            traceback.format_exc()), 500
def get_outcome_list(model_id, version_id):
    try:
        fmi = get_webapp_config().get("trainedModelFullModelId")
        if fmi is None:
            model = dataiku.Model(model_id)
            model_handler = get_model_handler(model, version_id=version_id)
            model_accessor = ModelAccessor(model_handler)
        else:
            original_model_handler = PredictionModelInformationHandler.from_full_model_id(
                fmi)
            model_accessor = ModelAccessor(original_model_handler)

        # note: sometimes when the dataset is very unbalanced, the original_test_df does not have all the target values
        test_df = model_accessor.get_original_test_df()
        target = model_accessor.get_target_variable()
        outcome_list = test_df[target].unique().tolist()
        filtered_outcome_list = remove_nan_from_list(outcome_list)
        return simplejson.dumps(filtered_outcome_list,
                                ignore_nan=True,
                                default=convert_numpy_int64_to_int)
    except:
        logger.error(
            "When trying to call get-outcome-list endpoint: {}.".format(
                traceback.format_exc()))
        return "{}Check backend log for more details.".format(
            traceback.format_exc()), 500
예제 #7
0
def get_original_model_info():
    try:
        fmi = get_webapp_config().get("trainedModelFullModelId")
        if fmi is None:
            model = Model(get_webapp_config()["modelId"])
            version_id = get_webapp_config().get("versionId")
            original_model_handler = get_model_handler(model, version_id)
            name = model.get_name()
        else:
            original_model_handler = PredictionModelInformationHandler.from_full_model_id(
                fmi)
            name = DSSMLTask.from_full_model_id(
                api_client(),
                fmi).get_trained_model_snippet(fmi).get("userMeta",
                                                        {}).get("name", fmi)
        handler.set_error_analyzer(original_model_handler)
        return jsonify(modelName=name,
                       isRegression='REGRESSION'
                       in original_model_handler.get_prediction_type())
    except:
        LOGGER.error(traceback.format_exc())
        return traceback.format_exc(), 500
def get_feature_list(model_id, version_id):
    try:
        fmi = get_webapp_config().get("trainedModelFullModelId")
        if fmi is None:
            model = dataiku.Model(model_id)
            model_handler = get_model_handler(model, version_id=version_id)
            model_accessor = ModelAccessor(model_handler)
        else:
            original_model_handler = PredictionModelInformationHandler.from_full_model_id(
                fmi)
            model_accessor = ModelAccessor(original_model_handler)

        column_list = model_accessor.get_selected_and_rejected_features()
        return simplejson.dumps(column_list,
                                ignore_nan=True,
                                default=convert_numpy_int64_to_int)
    except:
        logger.error(
            "When trying to call get-feature-list endpoint: {}.".format(
                traceback.format_exc()))
        return "{}Check backend log for more details.".format(
            traceback.format_exc()), 500
예제 #9
0
def get_metrics(model_id, version_id, advantageous_outcome, sensitive_column,
                reference_group):

    fmi = get_webapp_config().get("trainedModelFullModelId")
    if fmi is None:
        model = dataiku.Model(model_id)
        model_handler = get_model_handler(model, version_id=version_id)
        model_accessor = ModelAccessor(model_handler)
    else:
        original_model_handler = PredictionModelInformationHandler.from_full_model_id(
            fmi)
        model_accessor = ModelAccessor(original_model_handler)

    test_df = model_accessor.get_original_test_df()
    target_variable = model_accessor.get_target_variable()
    test_df.dropna(subset=[sensitive_column, target_variable],
                   how='any',
                   inplace=True)

    y_true = test_df.loc[:, target_variable]
    pred_df = model_accessor.predict(test_df)
    y_pred = pred_df.loc[:, DkuWebappConstants.PREDICTION]

    try:  # check whether or not the column can be casted to int
        if np.array_equal(test_df[sensitive_column],
                          test_df[sensitive_column].astype(int)):
            test_df[sensitive_column] = test_df[sensitive_column].astype(int)
        if test_df[sensitive_column].dtypes == int:
            reference_group = int(reference_group)
        if test_df[sensitive_column].dtypes == float:
            reference_group = float(reference_group)
    except Exception as e:
        logger.info('Sensitive column can not be casted to int. ', e)
        pass

    sensitive_feature_values = test_df[sensitive_column]
    model_report = ModelFairnessMetricReport(y_true, y_pred,
                                             sensitive_feature_values,
                                             advantageous_outcome)
    population_names = sensitive_feature_values.unique()

    metric_dct = {}
    disparity_dct = {}
    for metric_func in ModelFairnessMetric.get_available_metric_functions():
        metric_summary = model_report.compute_metric_per_group(
            metric_function=metric_func)
        metric_dct[metric_func.__name__] = metric_summary.get(
            DkuFairnessConstants.BY_GROUP)
        metric_diff = model_report.compute_group_difference_from_summary(
            metric_summary, reference_group=reference_group)
        v = np.array(
            list(metric_diff.get(
                DkuFairnessConstants.BY_GROUP).values())).reshape(
                    1, -1).squeeze()
        v_without_nan = [x for x in v if not np.isnan(x)]
        if len(v_without_nan) > 0:
            max_disparity = max(v_without_nan, key=abs)
            disparity_dct[metric_func.__name__] = max_disparity
        else:
            disparity_dct[metric_func.__name__] = 'N/A'  # for display purpose

    populations = []
    for name in population_names:
        dct = {
            DkuWebappConstants.NAME:
            name,
            DkuWebappConstants.SIZE:
            len(test_df[test_df[sensitive_column] == name])
        }
        for m, v in metric_dct.items():
            # the following strings are used only here, too lazy to turn them into constant variables
            if m == 'demographic_parity':
                dct['positive_rate'] = v[name]
            if m == 'equalized_odds':
                dct['true_positive_rate'], dct['false_positive_rate'] = v[name]
            if m == 'predictive_rate_parity':
                dct['positive_predictive_value'] = v[name]

        # make sure that NaN is replaced by a string (a dot here), for display purpose
        for k, v in dct.items():
            if not isinstance(v, str) and np.isnan(v):
                dct[k] = '.'
        populations.append(dct)

    label_list = model_report.get_label_list()

    sorted_populations = sorted(
        populations,
        key=lambda population: population[DkuWebappConstants.SIZE],
        reverse=True)

    return sorted_populations, disparity_dct, label_list
예제 #10
0
from flask import request
from distutils.util import strtobool
import json
import traceback
import dataiku
from dataiku.customwebapp import get_webapp_config

from design_experiment.sample_size import min_sample_size, z_value
from helpers import save_parameters
from constants import Parameters
from dku_tools import get_output_folder

config_settings = get_webapp_config()
project_key = dataiku.default_project_key()
client = dataiku.api_client()


@app.route('/sample_size', methods=['POST'])
def get_sample_size():
    try:
        config = json.loads(request.data)
        baseline_conversion_rate = float(config.get(Parameters.BCR.value))/100
        minimum_detectable_effect = float(config.get(Parameters.MDE.value))/100
        alpha = 1-float(config.get(Parameters.SIG_LEVEL.value))/100
        power = float(config.get(Parameters.POWER.value))/100
        ratio = float(config.get(Parameters.RATIO.value))/100
        reach = float(config.get(Parameters.REACH.value))/100
        two_tailed = strtobool(config.get(Parameters.TAIL.value))
        sample_size_A, sample_size_B = min_sample_size(baseline_conversion_rate, minimum_detectable_effect, alpha, power, ratio, two_tailed)
        sample_size_A = round(sample_size_A / reach)
        sample_size_B = round(sample_size_B / reach)
import dataiku
from dataiku.customwebapp import get_webapp_config
from lal.api import define_endpoints
from lal.app_configuration import prepare_datasets
from lal.classifiers.image_object_classifier import ImageObjectClassifier
from lal.handlers.dataiku_lal_handler import DataikuLALHandler

config = get_webapp_config()

labels_schema = [{"name": "path", "type": "string"}]
prepare_datasets(config, labels_schema)

unlabeled_mf = dataiku.Folder(config["unlabeled"])

queries_df = dataiku.Dataset(
    config["queries_ds"]).get_dataframe() if "queries_ds" in config else None

define_endpoints(
    app,
    DataikuLALHandler(ImageObjectClassifier(unlabeled_mf, queries_df, config),
                      config))
예제 #12
0
from dku_idtb_decision_tree.tree import Tree
from dku_idtb_decision_tree.tree_factory import TreeFactory
from dku_idtb_decision_tree.node import Node
from dku_idtb_decision_tree.autosplit import autosplit
from dku_idtb_compatibility.utils import safe_str, safe_write_json

from dataiku.core.dkujson import DKUJSONEncoder
app.json_encoder = DKUJSONEncoder

logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO,
                    format="IDTB %(levelname)s - %(message)s")

# initialization of the backend
try:
    folder_name = get_webapp_config()["input_folder"]
except KeyError:
    raise SystemError(
        "No folder has been chosen in the settings of the webapp")

folder = dataiku.Folder(folder_name)
factory = TreeFactory()

#cache = {}


@app.route("/get-datasets")
def get_datasets():
    try:
        return jsonify(datasets=dataiku.Dataset.list())
    except: