Example #1
0
 def test_save_top_level(self, monkeypatch):
     with monkeypatch.context() as m:
         m.setattr(sys, "argv",
                   ["", "--config", "./tests/conf/yaml/test_optuna.yaml"])
         # Optuna config -- this will internally spawn the study object for the define-and-run style which will be returned
         # as part of the call to sample()
         optuna_config = OptunaTunerConfig(
             study_name="Iris Logistic Regression Tests",
             direction="maximize")
         now = datetime.datetime.now()
         curr_int_time = int(
             f"{now.year}{now.month}{now.day}{now.hour}{now.second}")
         config = (ConfigArgBuilder(LogisticRegressionHP).tuner(
             optuna_config).save(
                 user_specified_path="/tmp",
                 file_name=f"pytest.{curr_int_time}",
             ).sample())
         # Verify the sample was written out to file
         yaml_regex = re.compile(
             fr"pytest.{curr_int_time}."
             fr"[a-fA-F0-9]{{8}}-[a-fA-F0-9]{{4}}-[a-fA-F0-9]{{4}}-"
             fr"[a-fA-F0-9]{{4}}-[a-fA-F0-9]{{12}}.spock.cfg.yaml")
         matches = [
             re.fullmatch(yaml_regex, val) for val in os.listdir("/tmp")
             if re.fullmatch(yaml_regex, val) is not None
         ]
         fname = f"/tmp/{matches[0].string}"
         assert os.path.exists(fname)
         with open(fname, "r") as fin:
             print(fin.read())
         # Clean up if assert is good
         if os.path.exists(fname):
             os.remove(fname)
         return config
Example #2
0
 def arg_builder(monkeypatch):
     with monkeypatch.context() as m:
         m.setattr(
             sys,
             "argv",
             [
                 "",
                 "--config",
                 "./tests/conf/yaml/test_hp.yaml",
                 "--HPOne.hp_int.bounds",
                 "(1, 1000)",
                 "--HPOne.hp_int_log.bounds",
                 "(1, 1000)",
                 "--HPOne.hp_float.bounds",
                 "(1.0, 1000.0)",
                 "--HPOne.hp_float_log.bounds",
                 "(1.0, 1000.0)",
                 "--HPTwo.hp_choice_int.choices",
                 "[1, 2, 4, 8]",
                 "--HPTwo.hp_choice_float.choices",
                 "[1.0, 2.0, 4.0, 8.0]",
                 "--HPTwo.hp_choice_str.choices",
                 "['is', 'it ', 'me', 'youre', 'looking', 'for']",
             ],
         )
         optuna_config = OptunaTunerConfig(study_name="Tests",
                                           direction="maximize")
         config = ConfigArgBuilder(HPOne, HPTwo).tuner(optuna_config)
         return config
Example #3
0
 def test_invalid_cast_choice(self, monkeypatch):
     with monkeypatch.context() as m:
         m.setattr(sys, "argv",
                   ["", "--config", "./tests/conf/yaml/test_hp_cast.yaml"])
         optuna_config = OptunaTunerConfig(study_name="Basic Tests",
                                           direction="maximize")
         with pytest.raises(TypeError):
             config = ConfigArgBuilder(HPOne, HPTwo).tuner(optuna_config)
Example #4
0
 def test_sample_before_tuner(self, monkeypatch):
     with monkeypatch.context() as m:
         m.setattr(sys, "argv",
                   ["", "--config", "./tests/conf/yaml/test_hp.yaml"])
         optuna_config = OptunaTunerConfig(study_name="Basic Tests",
                                           direction="maximize")
         with pytest.raises(RuntimeError):
             config = ConfigArgBuilder(HPOne, HPTwo).sample()
Example #5
0
 def arg_builder(monkeypatch):
     with monkeypatch.context() as m:
         m.setattr(sys, "argv",
                   ["", "--config", "./tests/conf/yaml/test_hp.yaml"])
         optuna_config = OptunaTunerConfig(study_name="Sample Tests",
                                           direction="maximize")
         config = ConfigArgBuilder(HPOne, HPTwo).tuner(optuna_config)
         return config
Example #6
0
 def test_unknown_arg(self, monkeypatch):
     with monkeypatch.context() as m:
         m.setattr(
             sys, "argv",
             ["", "--config", "./tests/conf/yaml/test_hp_unknown_arg.yaml"])
         optuna_config = OptunaTunerConfig(study_name="Basic Tests",
                                           direction="maximize")
         with pytest.raises(ValueError):
             config = ConfigArgBuilder(HPOne, HPTwo).tuner(optuna_config)
             return config
Example #7
0
def main():
    # Load the iris data
    X, y = load_iris(return_X_y=True)

    # Split the Iris data
    X_train, X_valid, y_train, y_valid = train_test_split(X, y)

    # Optuna config -- this will internally spawn the study object for the define-and-run style which will be returned
    # by accessing the tuner_status property on the ConfigArgBuilder object
    optuna_config = OptunaTunerConfig(study_name="Iris Logistic Regression",
                                      direction="maximize")

    # Use the builder to setup
    # Call tuner to indicate that we are going to do some HP tuning -- passing in an optuna study object
    attrs_obj = (SpockBuilder(
        LogisticRegressionHP,
        BasicParams,
        desc=
        "Example Logistic Regression Hyper-Parameter Tuning -- Optuna Backend",
    ).tuner(tuner_config=optuna_config).save(
        user_specified_path="/tmp/optuna"))

    # Here we need some of the fixed parameters first so we can just call the generate fnc to grab all the fixed params
    # prior to starting the sampling process
    fixed_params = attrs_obj.generate()

    # Now we iterate through a bunch of optuna trials
    for _ in range(fixed_params.BasicParams.n_trials):
        # The crux of spock support -- call save w/ the add_tuner_sample flag to write the current draw to file and
        # then call sample to return the composed Spockspace of the fixed parameters and the sampled parameters
        # Under the hood spock uses the define-and-run Optuna interface -- thus it handled the underlying 'ask' call
        # and returns the necessary trial object in the return dictionary to call 'tell' with the study object
        hp_attrs = attrs_obj.save(add_tuner_sample=True,
                                  user_specified_path="/tmp/optuna").sample()
        # Use the currently sampled parameters in a simple LogisticRegression from sklearn
        clf = LogisticRegression(
            C=hp_attrs.LogisticRegressionHP.c,
            solver=hp_attrs.LogisticRegressionHP.solver,
            max_iter=hp_attrs.BasicParams.max_iter,
        )
        clf.fit(X_train, y_train)
        val_acc = clf.score(X_valid, y_valid)
        # Get the status of the tuner -- this dict will contain all the objects needed to update
        tuner_status = attrs_obj.tuner_status
        # Pull the study and trials object out of the return dictionary and pass it to the tell call using the study
        # object
        tuner_status["study"].tell(tuner_status["trial"], val_acc)
        # Always save the current best set of hyper-parameters
        attrs_obj.save_best(user_specified_path="/tmp/optuna")

    # Grab the best config and metric
    best_config, best_metric = attrs_obj.best
    print(f"Best HP Config:\n{best_config}")
    print(f"Best Metric: {best_metric}")
Example #8
0
 def arg_builder(monkeypatch):
     with monkeypatch.context() as m:
         m.setattr(sys, "argv",
                   ["", "--config", "./tests/conf/yaml/test_optuna.yaml"])
         # Optuna config -- this will internally spawn the study object for the define-and-run style which will be returned
         # as part of the call to sample()
         optuna_config = OptunaTunerConfig(
             study_name="Iris Logistic Regression Tests",
             direction="maximize")
         config = ConfigArgBuilder(LogisticRegressionHP).tuner(
             optuna_config)
         return config