def test_SelectPercentile(self):
        scenario, sampler, rules = get_configuration_SelectPercentile()

        def evaluate(config, bestconfig):
            try:
                for name, params in config:
                    if name == "SelectPercentile":
                        pipeline = Pipeline(
                            steps=[(name,
                                    feature_selection.SelectPercentile(
                                        **params)),
                                   ("logistic_regression",
                                    linear_model.LogisticRegression())])
                        pipeline = fit_model(pipeline, X_train, y_train)
                        return pipeline.score(X_test, y_test)
                raise Exception("Classifier not found")
            except Exception:
                return 0
            except TimedOutExc:
                return 0

        searcher = Search(scenario, sampler, rules, evaluate)
        searcher.run(
            nb_simulation=10,
            generate_image_path="out/data_preprocessing/SelectPercentile")
Esempio n. 2
0
    def test_DictionaryLearning(self):
        scenario, sampler, rules = get_configuration_DictionaryLearning()

        def evaluate(config, bestconfig):
            try:
                for name, params in config:
                    if name == "DictionaryLearning":
                        pipeline = Pipeline(
                            steps=[(name,
                                    decomposition.DictionaryLearning(
                                        **params)),
                                   ("logistic_regression",
                                    linear_model.LogisticRegression())])
                        pipeline = fit_model(pipeline, X_train, y_train)
                        return pipeline.score(X_test, y_test)
                raise Exception("Classifier not found")
            except ValueError:
                return 0
            except TimedOutExc:
                return 0

        searcher = Search(scenario, sampler, rules, evaluate)
        searcher.run(
            nb_simulation=10,
            generate_image_path="out/data_preprocessing/DictionaryLearning")
Esempio n. 3
0
    def test_FunctionTransformer(self):
        scenario, sampler, rules = get_configuration_FunctionTransformer()

        def evaluate(config, bestconfig):
            try:
                for name, params in config:
                    if name == "FunctionTransformer":
                        pipeline = Pipeline(
                            steps=[(name,
                                    preprocessing.FunctionTransformer(
                                        **params)),
                                   ("logistic_regression",
                                    linear_model.LogisticRegression())])
                        pipeline = fit_model(pipeline, X_train, y_train)
                        return pipeline.score(X_test, y_test)
                raise Exception("Classifier not found")
            except ValueError:
                return 0
            except pynisher.TimeoutException:
                return 0

        searcher = Search(scenario, sampler, rules, evaluate)
        searcher.run(
            nb_simulation=10,
            generate_image_path="out/data_preprocessing/FunctionTransformer")
Esempio n. 4
0
    return r2


if __name__ == "__main__":
    # iris = datasets.load_iris()
    # X_train, X_test, y_train, y_test = train_test_split(
    #     iris.data, iris.target, test_size=0.33, random_state=42)

    # environment = Environment(svm_from_cfg,
    #                           config_space=configuration_space.cs,
    #                           mem_in_mb=2048,
    #                           cpu_time_in_s=30,
    #                           seed=42)

    environment = Environment(run_pipeline,
                              config_space=configuration_space.cs,
                              mem_in_mb=8000,
                              cpu_time_in_s=300000000,
                              seed=42)

    mosaic = Search(environment=environment,
                    bandit_policy={
                        "policy_name": "uct",
                        "c_ucb": 1.1
                    },
                    coef_progressive_widening=0.6,
                    verbose=True)
    best_config, best_score = mosaic.run(nb_simulation=10000)
    print("Best config: ", best_config, "best score", best_score)
Esempio n. 5
0
    # For deactivated parameters, the configuration stores None-values.
    # This is not accepted by the SVM, so we remove them.
    cfg = {k: cfg[k] for k in cfg if cfg[k]}
    # We translate boolean values:
    cfg["shrinking"] = True if cfg["shrinking"] == "true" else False
    # And for gamma, we set it to a fixed value or to "auto" (if used)
    if "gamma" in cfg:
        cfg["gamma"] = cfg["gamma_value"] if cfg["gamma"] == "value" else "auto"
        cfg.pop("gamma_value", None)  # Remove "gamma_value"

    clf = svm.SVC(**cfg, random_state=42)

    scores = cross_val_score(clf, X_train, y_train, cv=5)
    return np.mean(scores)  # Minimize!


environment = Environment(svm_from_cfg,
                          config_space=cs,
                          mem_in_mb=2048,
                          cpu_time_in_s=30,
                          seed=42)

mosaic = Search(environment=environment,
                policy_arg={
                    "c_ucb": 1.1,
                    "coef_progressive_widening": 0.6
                },
                verbose=True)
best_config, best_score = mosaic.run(nb_simulation=100)
print("Best config: ", best_config, "best score", best_score)
Esempio n. 6
0
    result = np.mean(results)
    print(result)

    return result


pb = st.progress(0)
status_txt = st.empty()
chart = st.line_chart()
status_txt2 = st.empty()

environment = Environment(rf_from_cfg,
                          config_space=cs,
                          mem_in_mb=2048,
                          cpu_time_in_s=30,
                          seed=42,
                          data=x)

mosaic = Search(environment=environment,
                policy_arg={
                    "c_ucb": 1.1,
                    "coef_progressive_widening": 0.6
                },
                verbose=True)
best_config, best_score = mosaic.run(nb_simulation=n_iteration,
                                     pb=pb,
                                     status_txt=status_txt,
                                     chart=chart,
                                     status_txt2=status_txt2)
print("Best config: ", best_config, "best score", best_score)