Esempio n. 1
0
def get_anchor(request, pk):
    job = Job.objects.filter(pk=pk)[0]
    exp, _ = Explanation.objects.get_or_create(type=ExplanationTypes.ANCHOR.value, split=job.split,
                                               predictive_model=job.predictive_model, job=job)
    exp.save()
    result = explanation(exp.id)
    return Response(result, status=200)
Esempio n. 2
0
def get_shap(request, pk, explanation_target, prefix_target):
    job = Job.objects.filter(pk=pk)[0]
    exp, _ = Explanation.objects.get_or_create(
        type=ExplanationTypes.SHAP.value,
        split=job.split,
        predictive_model=job.predictive_model,
        job=job)

    exp.save()

    if 'shap' not in exp.results:
        exp.results.update({'shap': dict()})

    if explanation_target not in exp.results['shap']:
        exp.results['shap'] = {explanation_target: dict()}

    if explanation_target in exp.results[
            'shap'] and prefix_target in exp.results['shap'][
                explanation_target].keys():
        return Response(pd.read_json(
            exp.results['shap'][explanation_target][prefix_target],
            typ='series',
            orient='records'),
                        status=200)

    else:
        result = explanation(exp.id, explanation_target, prefix_target)
        exp.results['shap'][explanation_target].update(
            {prefix_target: pd.Series(result).to_json(orient='values')})
        exp.save()
        return Response(result, status=200)
Esempio n. 3
0
def get_lime(request, pk, explanation_target):
    job = Job.objects.filter(pk=pk)[0]
    exp, _ = Explanation.objects.get_or_create(
        type=ExplanationTypes.LIME.value,
        split=job.split,
        predictive_model=job.predictive_model,
        job=job)
    exp.save()

    if 'lime' not in exp.results:
        exp.results.update({'lime': dict()})

    if explanation_target in exp.results['lime']:
        return Response(exp.results['lime'][explanation_target], status=200)

    else:
        error, result = explanation(exp.id, explanation_target)

        exp.results['lime'].update({explanation_target: result})
        exp.save()
        if error == 'True':
            return Response(
                {
                    'error':
                    'Explanation Target cannot be greater than ' + str(result)
                },
                status=status.HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE)
        else:
            return Response(result, status=200)
Esempio n. 4
0
def get_cmfeedback(request, pk, top_k):
    job = Job.objects.filter(pk=pk)[0]
    exp, _ = Explanation.objects.get_or_create(
        type=ExplanationTypes.CMFEEDBACK.value,
        split=job.split,
        predictive_model=job.predictive_model,
        job=job)
    exp.save()
    result = explanation(exp.id, int(top_k))
    return Response(result, status=200)
Esempio n. 5
0
def get_shap(request, pk, explanation_target):
    job = Job.objects.filter(pk=pk)[0]
    exp, _ = Explanation.objects.get_or_create(
        type=ExplanationTypes.SHAP.value,
        split=job.split,
        predictive_model=job.predictive_model,
        job=job)
    exp.save()
    result = explanation(exp.id, explanation_target)
    return Response(result, status=200)
Esempio n. 6
0
def get_retrain(request, pk):
    job = Job.objects.filter(pk=pk)[0]
    exp, _ = Explanation.objects.get_or_create(
        type=ExplanationTypes.RETRAIN.value,
        split=job.split,
        predictive_model=job.predictive_model,
        job=job)
    exp.save()
    target = request.data
    result = explanation(exp.id, target)
    return Response(result, status=200)
Esempio n. 7
0
def get_anchor(request, pk):
    job = Job.objects.filter(pk=pk)[0]
    exp, _ = Explanation.objects.get_or_create(
        type=ExplanationTypes.ANCHOR.value,
        split=job.split,
        predictive_model=job.predictive_model,
        job=job)
    exp.save()
    if 'anchor' in exp.results:
        return Response(exp.results['anchor'], status=200)
    else:
        result = explanation(exp.id, explanation_target=None)
        exp.results['anchor'] = result
        exp.save()
        return Response(result, status=200)
Esempio n. 8
0
def get_ice(request, pk, explanation_target):
    job = Job.objects.filter(pk=pk)[0]
    exp, _ = Explanation.objects.get_or_create(
        type=ExplanationTypes.ICE.value,
        split=job.split,
        predictive_model=job.predictive_model,
        job=job)
    exp.save()

    if 'ice' not in exp.results:
        exp.results.update({'ice': dict()})

    if explanation_target in exp.results['ice']:
        return Response(exp.results['ice'][explanation_target], status=200)

    else:
        result = explanation(exp.id, explanation_target)

        exp.results['ice'].update({explanation_target: result})
        exp.save()
        return Response(result, status=200)
Esempio n. 9
0
def progetto_padova():
    JOB = Job.objects.get_or_create(
        status=JobStatuses.CREATED.value,
        type=JobTypes.PREDICTION.value,
        split=Split.objects.get_or_create(  # this creates the split of the log
            type=SplitTypes.SPLIT_DOUBLE.value,
            train_log=create_log(  # this imports the log
                import_log(BASE_DIR + RELATIVE_TRAIN_PATH),
                RELATIVE_TRAIN_PATH,
                BASE_DIR,
                import_in_cache=False),
            test_log=create_log(  # this imports the log
                import_log(BASE_DIR + RELATIVE_VALIDATION_PATH),
                RELATIVE_VALIDATION_PATH,
                BASE_DIR,
                import_in_cache=False))[0],
        encoding=Encoding.objects.
        get_or_create(  # this defines the encoding method
            data_encoding=DataEncodings.LABEL_ENCODER.value,
            value_encoding=ValueEncodings.SIMPLE_INDEX.value,
            add_elapsed_time=False,
            add_remaining_time=False,
            add_executed_events=False,
            add_resources_used=False,
            add_new_traces=False,
            prefix_length=5,
            padding=True,
            task_generation_type=TaskGenerationTypes.ALL_IN_ONE.value,
            features=[])[0],
        labelling=Labelling.objects.get_or_create(  # this defines the label
            type=LabelTypes.ATTRIBUTE_STRING.value,
            attribute_name='label',
            threshold_type=None,
            threshold=None)[0],
        clustering=Clustering.init(ClusteringMethods.NO_CLUSTER.value,
                                   configuration={}),
        predictive_model=PredictiveModel.
        init(  # this defines the predictive model
            get_prediction_method_config(
                PredictiveModels.CLASSIFICATION.value,
                ClassificationMethods.DECISION_TREE.value,
                payload={
                    'max_depth': 2,
                    'min_samples_split': 2,
                    'min_samples_leaf': 2
                })),
        hyperparameter_optimizer=HyperparameterOptimization.init(
            {  # this defines the hyperparameter optimisation procedure
                'type': HyperparameterOptimizationMethods.HYPEROPT.value,
                'max_evaluations': 10,
                'performance_metric': HyperOptAlgorithms.TPE.value,
                'algorithm_type': HyperOptLosses.AUC.value
            }),
        create_models=True)[0]

    # load log
    train_log, test_log, additional_columns = get_train_test_log(JOB.split)

    # encode
    train_df, test_df = encode_label_logs(train_log, test_log, JOB)

    # train + evaluate
    results, model_split = MODEL[JOB.predictive_model.predictive_model][
        ModelActions.BUILD_MODEL_AND_TEST.value](train_df, test_df,
                                                 _init_clusterer(
                                                     JOB.clustering, train_df),
                                                 JOB)

    if JOB.create_models:
        save_models(model_split, JOB)

    # predict
    data_df = pd.concat([train_df, test_df])
    results = MODEL[JOB.predictive_model.predictive_model][
        ModelActions.PREDICT.value](JOB, data_df)
    results = MODEL[JOB.predictive_model.predictive_model][
        ModelActions.PREDICT_PROBA.value](JOB, data_df)

    # lime
    exp = Explanation.objects.get_or_create(
        type=ExplanationTypes.LIME.value,
        split=JOB.
        split,  # this defines the analysed log, you can use a different one from the training one
        predictive_model=JOB.predictive_model,
        job=JOB)[0]
    error, result = explanation(exp.id, int(EXPLANATION_TARGET))