def fmax(param_dict):
            print(param_dict)

            support, confidence = param_dict["support"] / 1000, param_dict[
                "confidence"] / 1000
            print(dict(support=support, confidence=confidence))

            cba = CBA(support=support, confidence=confidence)
            cba.fit(txns)

            cba_clf = cba.clf

            ids = IDS()
            ids_clf = IDSClassifier(
                IDSRuleSet.from_cba_rules(cba_clf.rules).ruleset)
            ids_clf.quant_dataframe_train = quant_df
            ids_clf.calculate_default_class()

            ids.clf = ids_clf

            metrics = ids.score_interpretability_metrics(quant_df_test)
            if not is_solution_interpretable(metrics):
                distance = solution_interpretability_distance(metrics)
                print(-distance)

                return -distance

            auc = ids.score_auc(quant_df_test)

            print(auc)

            return auc
Ejemplo n.º 2
0
        def fmax(param_dict):
            print(param_dict)

            support, confidence = param_dict["support"] / 1000, param_dict["confidence"] / 1000
            print(dict(support=support, confidence=confidence))

            cba = CBA(support=support, confidence=confidence)
            cba.fit(txns)

            cba_clf = cba.clf

            ids = IDS()
            ids_clf = IDSClassifier(IDSRuleSet.from_cba_rules(cba_clf.rules).ruleset)
            ids_clf.quant_dataframe_train = quant_df
            ids_clf.calculate_default_class()

            ids.clf = ids_clf

            auc = ids.score_auc(quant_df_test)

            print(auc)

            return auc
        best_pars = coord_asc.fit()

        print("best_pars:", best_pars)
        support, confidence = best_pars[0] / 1000, best_pars[1] / 1000

        cba = CBA(support=support, confidence=confidence)
        cba.fit(txns)
        cba_clf = cba.clf

        ids = IDS()
        ids_clf = IDSClassifier(
            IDSRuleSet.from_cba_rules(cba_clf.rules).ruleset)
        ids_clf.quant_dataframe_train = quant_df
        ids_clf.calculate_default_class()

        ids.clf = ids_clf

        data = dict(dataset_name=dataset_name,
                    algorithm="pyARC",
                    auc=ids.score_auc(quant_df_test, order_type="cba"),
                    rule_cutoff=rule_cutoff)

        data.update(ids.score_interpretability_metrics(quant_df_test))

        print(data)

        benchmark_data.append(data)

        benchmark_data_df = pd.DataFrame(benchmark_data)
        benchmark_data_df.to_csv(
            "output_data/cba_auc_interpretability_distance_coats_benchmark_lymph_anneal.csv"
Ejemplo n.º 4
0
    df_train = pd.read_csv(os.path.join(dataset_path_train, dataset_filename))
    df_test = pd.read_csv(os.path.join(dataset_path_test, dataset_filename))

    txns_train = TransactionDB.from_DataFrame(df_train)
    txns_test = TransactionDB.from_DataFrame(df_test)

    quant_df_train = QuantitativeDataFrame(df_train)
    quant_df_test = QuantitativeDataFrame(df_test)

    cba = CBA(support=0.1, confidence=0.1)
    cba.fit(txns_train)

    rules = cba.clf.rules
    ids_ruleset = IDSRuleSet.from_cba_rules(rules)

    ids = IDS()
    ids.clf = IDSClassifier(ids_ruleset.ruleset)
    ids.clf.default_class = cba.clf.default_class

    metrics_dict = ids.score_interpretability_metrics(quant_df_test)

    benchmark_dict = dict(dataset_filename=dataset_filename, algorithm="cba")

    benchmark_dict.update(metrics_dict)
    print(benchmark_dict)

    benchmark_list.append(benchmark_dict)

benchmark_df = pd.DataFrame(benchmark_list)
benchmark_df.to_csv("output_data/cba_interpretability_benchmark.csv",
                    index=False)