コード例 #1
0
    def take_action(self, parsed_args: Namespace) -> None:

        storage_url = _check_storage_url(self.app_args.storage)

        if parsed_args.study and parsed_args.study_name:
            raise ValueError(
                "Both `--study-name` and the deprecated `--study` was specified. "
                "Please remove the `--study` flag."
            )
        elif parsed_args.study:
            message = "The use of `--study` is deprecated. Please use `--study-name` instead."
            warnings.warn(message, FutureWarning)
            study = optuna.load_study(storage=storage_url, study_name=parsed_args.study)
        elif parsed_args.study_name:
            study = optuna.load_study(storage=storage_url, study_name=parsed_args.study_name)
        else:
            raise ValueError("Missing study name. Please use `--study-name`.")

        if parsed_args.out is None:
            optuna.dashboard._serve(study, parsed_args.bokeh_allow_websocket_origins)
        else:
            optuna.dashboard._write(study, parsed_args.out)
            self.logger.info("Report successfully written to: {}".format(parsed_args.out))
コード例 #2
0
ファイル: cli.py プロジェクト: ritvik1512/optuna
    def take_action(self, parsed_args):
        # type: (Namespace) -> None

        storage_url = _check_storage_url(self.app_args.storage)

        if parsed_args.study and parsed_args.study_name:
            raise ValueError(
                "Both `--study-name` and the deprecated `--study` was specified. "
                "Please remove the `--study` flag.")
        elif parsed_args.study:
            message = "The use of `--study` is deprecated. Please use `--study-name` instead."
            warnings.warn(message, FutureWarning)
            study = optuna.load_study(storage=storage_url,
                                      study_name=parsed_args.study)
        elif parsed_args.study_name:
            study = optuna.load_study(storage=storage_url,
                                      study_name=parsed_args.study_name)
        else:
            raise ValueError("Missing study name. Please use `--study-name`.")

        study.set_user_attr(parsed_args.key, parsed_args.value)

        self.logger.info("Attribute successfully written.")
コード例 #3
0
def richards_hyperstudy():
    study_id = str(int(time.time()))
    print("study id: " + study_id)
    # dir_path = os.path.dirname(os.path.realpath(__file__))
    # storage_file = os.path.join(dir_path, f"optuna/net.pt")

    # study = optuna.create_study(
    #     direction="minimize",
    #     storage=f"sqlite:///examples/richards/optuna/study_{study_id}.db"
    # )
    study = optuna.load_study(
        study_name="kubernetes",
        storage=
        f"postgresql://{os.environ['POSTGRES_USER']}:{os.environ['POSTGRES_PASSWORD']}@{os.environ['STUDREUM_HYPER_POSTGRES_SERVICE_HOST']}:5432/{os.environ['POSTGRES_DB']}"
    )
    study.optimize(
        objective,
        n_trials=2,
        # timeout=(30 * 60),  # in seconds, for complete study not per trial
        show_progress_bar=True)

    pruned_trials = [
        t for t in study.trials if t.state == optuna.study.TrialState.PRUNED
    ]
    complete_trials = [
        t for t in study.trials if t.state == optuna.study.TrialState.COMPLETE
    ]

    print("Study statistics: ")
    print("  Number of finished trials: ", len(study.trials))
    print("  Number of pruned trials: ", len(pruned_trials))
    print("  Number of complete trials: ", len(complete_trials))

    print("Best trial:")
    trial = study.best_trial

    print("  Value: ", trial.value)

    print("  Params: ")
    for key, value in trial.params.items():
        print("    {}: {}".format(key, value))

    # time
    now = str(time.time())
    studyname = f"study_{study_id}_t_{now}"
    # export study
    # as csv
    df = study.trials_dataframe()
    df.to_csv(dir_path.joinpath(f"optuna/{studyname}.csv"))
コード例 #4
0
def load_study_and_return_best_params(optuna_study_name, optuna_storage):
    """Load (or create if do not exist) an Optuna study (https://optuna.readthedocs.io)

	Parameters
	----------
	optuna_study_name :
		Study’s name. Each study has a unique name as an identifier.

	optuna_storage :
		Database URL such as sqlite:///example.db. Please see also the documentation of create_study() for further details.

	Returns
	-------
	params : object
		Such as
		{
			"data": {
				"center_decay": 0.14281578186170577,
				"use_cyclical": True,
				"vehicle_decay": 0.17590059703294494,
			},
			"model": {
				"boosting_type": "gbdt",
				"colsample_bytree": 0.5279207022532362,
				"learning_rate": 0.012081577123096265,
				"min_child_samples": 45,
				"min_child_weight": 0.007084184412851127,
				"n_estimators": 568,
				"num_leaves": 483,
				"reg_alpha": 0.10389662610302736,
				"reg_lambda": 0.026121337399318097,
				"subsample": 0.9076986626277991,
				"subsample_freq": 0,
			},
		} 
	"""
    if __debug__: print("In load_study_and_return_best_params()")
    start_time = time()

    # Create a study if do not exist
    study = optuna.load_study(study_name=optuna_study_name,
                              storage=optuna_storage)
    print("Optuna study loaded in", utils.time_me(time() - start_time), "\n")

    # Retrieve best parameters
    trial = study.best_trial
    params = utils.sample_params(optuna.trial.FixedTrial(trial.params))

    return params
コード例 #5
0
    def take_action(self, parsed_args):
        # type: (Namespace) -> None

        config = optuna.config.load_optuna_config(self.app_args.config)
        storage_url = get_storage_url(self.app_args.storage, config)
        study = optuna.load_study(storage=storage_url,
                                  study_name=parsed_args.study)

        if parsed_args.out is None:
            optuna.dashboard.serve(study,
                                   parsed_args.bokeh_allow_websocket_origins)
        else:
            optuna.dashboard.write(study, parsed_args.out)
            self.logger.info('Report successfully written to: {}'.format(
                parsed_args.out))
コード例 #6
0
def test_loaded_trials(storage_url: str) -> None:
    # Please create the tables by placing this function before the multi-process tests.

    N_TRIALS = 20
    study = optuna.create_study(study_name=_STUDY_NAME, storage=storage_url)
    # Run optimization
    study.optimize(objective, n_trials=N_TRIALS)

    trials = study.trials
    assert len(trials) == N_TRIALS

    _check_trials(trials)

    # Create a new study to confirm the study can load trial properly.
    loaded_study = optuna.load_study(study_name=_STUDY_NAME, storage=storage_url)
    _check_trials(loaded_study.trials)
コード例 #7
0
def load_study_and_optimize(st_name, st_storage, n_func_evals, objective_func):
    """
    Function to target for a multiprocessing.Process in class OptunaOptimizer
    :param st_name: str
        Name of the optuna study.
    :param st_storage: str
        Database URL.
    :param n_func_evals: int
        Number of function evaluations for this multiprocessing.Process
    :param objective_func:
        The objective function to be evaluated by the HPO-method
    :return:
    """
    this_study = optuna.load_study(st_name, st_storage)
    this_study.optimize(objective_func, n_func_evals)
    return
コード例 #8
0
def _load_and_run(study_name, storage_name, objective, n_trials, seed):

    pruner = optuna.pruners.MedianPruner(n_warmup_steps=n_trials // 3, interval_steps=5)
    sampler = optuna.samplers.TPESampler(seed=seed)
    study = optuna.load_study(
        study_name=study_name,
        storage=storage_name,
        sampler=sampler,
        pruner=pruner,
    )
    study.optimize(
        objective,
        n_trials=n_trials,
        catch=(ModelParameterError, ZeroDivisionError, ValueError),
        callbacks=[objective.callback],
    )
コード例 #9
0
ファイル: test_trial.py プロジェクト: y0z/optuna
def test_persisted_param() -> None:
    study_name = "my_study"

    with tempfile.NamedTemporaryFile() as fp:
        storage = f"sqlite:///{fp.name}"
        study = create_study(storage=storage, study_name=study_name)
        assert isinstance(study._storage, storages._CachedStorage), "Pre-condition."

        # Test more than one trial. The `_CachedStorage` does a cache miss for the first trial and
        # thus behaves differently for the first trial in comparisons to the following.
        for _ in range(3):
            trial = study.ask()
            trial.suggest_float("x", 0, 1)

        study = load_study(storage=storage, study_name=study_name)

        assert all("x" in t.params for t in study.trials)
コード例 #10
0
ファイル: testing.py プロジェクト: YuzhouLin/current_proj
def test(params):
    #  load_data
    device = torch.device('cpu')
    test_trial = params['outer_f']
    sb_n = params['sb_n']

    # Load testing Data
    inputs, targets = pre.load_data_test_cnn(DATA_PATH, sb_n, test_trial)

    # Load trained model
    model = utils.Model()
    model.load_state_dict(
        torch.load(params['saved_model'], map_location=device))
    model.eval()

    # Get Results
    outputs = model(inputs.to(device)).detach()

    # Load the Testing Engine
    eng = utils.EngineTest(outputs, targets)

    common_keys_for_update_results = ['sb_n', 'edl_used', 'outer_f']

    dict_for_update_acc = \
        {key: params[key] for key in common_keys_for_update_results}
    dict_for_update_R = copy.deepcopy(dict_for_update_acc)

    eng.update_result_acc(dict_for_update_acc)

    # Get the optimal activation function
    if EDL_USED == 0:
        dict_for_update_R['acti_fun'] = 'softmax'
    else:
        # Get from hyperparameter study
        core_path = f'study/ecnn{EDL_USED}/sb{sb_n}'
        study_path = "sqlite:///" + core_path + f"/t{test_trial}.db"
        loaded_study = optuna.load_study(study_name="STUDY",
                                         storage=study_path)
        temp_best_trial = loaded_study.best_trial
        dict_for_update_R['acti_fun'] = temp_best_trial.params['evi_fun']

    print(dict_for_update_R)
    eng.update_result_R(dict_for_update_R)

    return
コード例 #11
0
 def get_study(self, study_name: Optional[str], cache: bool = True) -> Study:
     if study_name is None:
         raise FileNotFoundError()
     with self.lock:
         if study_name not in [study.study_name for study in self._get_study_summary()]:
             raise FileNotFoundError
         if study_name not in self.study_cache:
             study = Study(
                 optuna.load_study(
                     study_name=study_name,
                     storage=self.database_url,
                 )
             )
             if cache:
                 self.study_cache = {study_name: study}
             else:
                 return study
         return self.study_cache[study_name]
コード例 #12
0
    def initialize_optuna(self, should_create: bool = False):
        self.study_name = f'{self.model.__class__.__name__}__{self.policy.__class__.__name__}__{self.reward_strategy}'

        if should_create:
            self.optuna_study = optuna.create_study(
                study_name=self.study_name,
                storage=self.params_db_path,
                load_if_exists=True)
        else:
            self.optuna_study = optuna.load_study(study_name=self.study_name,
                                                  storage=self.params_db_path)

        self.logger.debug('Initialized Optuna:')

        try:
            self.logger.debug(
                f'Best reward in ({len(self.optuna_study.trials)}) trials: {-self.optuna_study.best_value}'
            )
        except:
            self.logger.debug('No trials have been finished yet.')
コード例 #13
0
def test_study_optimize_command():
    # type: () -> None

    with StorageSupplier('new') as storage:
        assert isinstance(storage, RDBStorage)
        storage_url = str(storage.engine.url)

        study_name = storage.get_study_name_from_id(storage.create_new_study())
        command = [
            'optuna', 'study', 'optimize', '--study', study_name, '--n-trials', '10', __file__,
            'objective_func', '--storage', storage_url
        ]
        subprocess.check_call(command)

        study = optuna.load_study(storage=storage_url, study_name=study_name)
        assert len(study.trials) == 10
        assert 'x' in study.best_params

        # Check if a default value of study_name is stored in the storage.
        assert storage.get_study_name_from_id(study.study_id).startswith(DEFAULT_STUDY_NAME_PREFIX)
コード例 #14
0
ファイル: test_cli.py プロジェクト: x10-utils/optuna
def test_study_optimize_command(options):
    # type: (List[str]) -> None

    with StorageConfigSupplier(TEST_CONFIG_TEMPLATE) as (storage_url, config_path):
        storage = RDBStorage(storage_url)

        study_name = storage.get_study_name_from_id(storage.create_new_study_id())
        command = [
            'optuna', 'study', 'optimize', '--study', study_name, '--n-trials', '10', __file__,
            'objective_func'
        ]
        command = _add_option(command, '--storage', storage_url, 'storage' in options)
        command = _add_option(command, '--config', config_path, 'config' in options)
        subprocess.check_call(command)

        study = optuna.load_study(storage=storage_url, study_name=study_name)
        assert len(study.trials) == 10
        assert 'x' in study.best_params

        # Check if a default value of study_name is stored in the storage.
        assert storage.get_study_name_from_id(study.study_id).startswith(DEFAULT_STUDY_NAME_PREFIX)
コード例 #15
0
def training(train_x: pd.DataFrame, train_y: pd.DataFrame,
             parameters: Dict[str, Any]):
    """[summary]

    Args:
        train_x (pd.DataFrame): [description]
        train_y (pd.DataFrame): [description]
        model ([type]): [description]
        parameters (Dict[str, Any]): [description]
    """
    def objective(trial):
        model = create_lightgbm_model_from_suggested_params(trial)
        score = fitting(train_x, train_y, model, parameters)

    study = optuna.load_study(study_name=parameters["study_name"],
                              storage=parameters['database_path'])
    study.optimize(objective, n_trials=100, callbacks=[mlflc])

    # best_model = create_lightgbm_model(study.best_params)
    best_model = lgb.LGBMRegressor(**study.best_params)

    return best_model
コード例 #16
0
def worker(
    db_url: str,
    db_dir: str,
    n_trials: int,
    i_gpu: int,
    n_startup_trials: int,
    objective_func: Callable,
):

    sampler = optuna.samplers.TPESampler(
        n_startup_trials=
        n_startup_trials,  # random trials before activating sampler
    )

    pruner = optuna.pruners.MedianPruner(
        n_startup_trials=60,  # trials before enabling pruner
        n_warmup_steps=5,  # epochs before enabling pruner
        n_min_trials=15,  # min. trials at each epoch to enable pruner
    )

    # load study
    study = optuna.load_study(
        study_name='study',
        storage=db_url,
        sampler=sampler,
        pruner=pruner,
    )

    lambda_objective = lambda trial: objective_wrapper(trial, db_dir, i_gpu,
                                                       objective_func)

    # run an optimization process
    study.optimize(
        lambda_objective,
        n_trials=n_trials,  # trials for this study.optimize() call
        gc_after_trial=True,
        catch=(AssertionError,
               )  # fail trials with assertion error and continue study
    )
コード例 #17
0
ファイル: cli.py プロジェクト: nzw0301/optuna
    def take_action(self, parsed_args: Namespace) -> None:

        warnings.warn(
            "'trials' is an experimental CLI command. The interface can change in the future.",
            ExperimentalWarning,
        )

        storage_url = _check_storage_url(self.app_args.storage)
        study = optuna.load_study(storage=storage_url, study_name=parsed_args.study_name)
        attrs = (
            "number",
            "value" if not study._is_multi_objective() else "values",
            "datetime_start",
            "datetime_complete",
            "duration",
            "params",
            "user_attrs",
            "state",
        )

        records, columns = _dataframe._create_records_and_aggregate_column(study, attrs)
        print(_format_output(records, columns, parsed_args.format, parsed_args.flatten))
コード例 #18
0
ファイル: test_study.py プロジェクト: smly/optuna
def test_copy_study_to_study_name(from_storage_mode: str, to_storage_mode: str) -> None:
    with StorageSupplier(from_storage_mode) as from_storage, StorageSupplier(
        to_storage_mode
    ) as to_storage:
        from_study = create_study(study_name="foo", storage=from_storage)
        _ = create_study(study_name="foo", storage=to_storage)

        with pytest.raises(DuplicatedStudyError):
            copy_study(
                from_study_name=from_study.study_name,
                from_storage=from_storage,
                to_storage=to_storage,
            )

        copy_study(
            from_study_name=from_study.study_name,
            from_storage=from_storage,
            to_storage=to_storage,
            to_study_name="bar",
        )

        _ = load_study(study_name="bar", storage=to_storage)
コード例 #19
0
ファイル: test_cli.py プロジェクト: nuka137/optuna
def test_tell() -> None:
    study_name = "test_study"

    with tempfile.NamedTemporaryFile() as tf:
        db_url = "sqlite:///{}".format(tf.name)

        output: Any = subprocess.check_output(
            [
                "optuna",
                "ask",
                "--storage",
                db_url,
                "--study-name",
                study_name,
                "--format",
                "json",
            ]
        )
        output = output.decode("utf-8")
        output = json.loads(output)
        trial_number = output["number"]

        output = subprocess.check_output(
            [
                "optuna",
                "tell",
                "--storage",
                db_url,
                "--trial-number",
                str(trial_number),
                "--values",
                "1.2",
            ]
        )

        study = optuna.load_study(storage=db_url, study_name=study_name)
        assert len(study.trials) == 1
        assert study.trials[0].state == TrialState.COMPLETE
        assert study.trials[0].values == [1.2]
コード例 #20
0
    def pull_scalars(self, force_update=False):
        r"""
        Pull scalars from the ``optuna`` storage

        Args:
            :force_update: Force cache update (Default: ``False``)
        """
        import optuna
        if self.cache and os.path.exists(self.cache_file) and not force_update:
            if self.verbose:
                print("[!] Using cache file %s" % (self.cache_file, ))
            self.scalars = pd.read_csv(self.cache_file)
        else:
            study = optuna.load_study(study_name=self.study_name,
                                      storage=self.storage)
            self.scalars = study.trials_dataframe()
            if self.cache:
                if not os.path.isdir(self.cache_dir): os.mkdir(self.cache_dir)
                if not os.path.isfile(self.cache_file) or force_update:
                    if self.verbose:
                        print("[!] Saving cache file %s" % (self.cache_file, ))
                    self.scalars.to_csv(self.cache_file)
コード例 #21
0
def test_optuna_example(with_commandline: bool, tmpdir: Path) -> None:
    storage = "sqlite:///" + os.path.join(str(tmpdir), "test.db")
    study_name = "test-optuna-example"
    cmd = [
        "example/sphere.py",
        "--multirun",
        "hydra.sweep.dir=" + str(tmpdir),
        "hydra.job.chdir=True",
        "hydra.sweeper.n_trials=20",
        "hydra.sweeper.n_jobs=1",
        f"hydra.sweeper.storage={storage}",
        f"hydra.sweeper.study_name={study_name}",
        "hydra/sweeper/sampler=tpe",
        "hydra.sweeper.sampler.seed=123",
        "~z",
    ]
    if with_commandline:
        cmd += [
            "x=choice(0, 1, 2)",
            "y=0",  # Fixed parameter
        ]
    run_python_script(cmd)
    returns = OmegaConf.load(f"{tmpdir}/optimization_results.yaml")
    study = optuna.load_study(storage=storage, study_name=study_name)
    best_trial = study.best_trial
    assert isinstance(returns, DictConfig)
    assert returns.name == "optuna"
    assert returns["best_params"]["x"] == best_trial.params["x"]
    if with_commandline:
        assert "y" not in returns["best_params"]
        assert "y" not in best_trial.params
    else:
        assert returns["best_params"]["y"] == best_trial.params["y"]
    assert returns["best_value"] == best_trial.value
    # Check the search performance of the TPE sampler.
    # The threshold is the 95th percentile calculated with 1000 different seed values
    # to make the test robust against the detailed implementation of the sampler.
    # See https://github.com/facebookresearch/hydra/pull/1746#discussion_r681549830.
    assert returns["best_value"] <= 2.27
コード例 #22
0
def main():
    parser = ArgumentParser()
    parser.add_argument("target", type=Path, help="target binary file")
    parser.add_argument("--study")
    parser.add_argument("--storage")

    args = parser.parse_args()
    assert args.target.is_file()

    entry = round_robin.hash(args.target)

    if args.study and args.storage:
        print("LOAD STUDY")
        study = optuna.load_study(study_name=args.study, storage=args.storage)
    else:
        print("CREATE STUDY")
        study = optuna.create_study()

    controller = Path('./prime_daihinmin.py').resolve()
    entries = Path('./opt_param').resolve()

    study.optimize(lambda trial: objective(trial, controller, entries, entry),
                   n_jobs=-1)
コード例 #23
0
ファイル: test_study.py プロジェクト: smly/optuna
def test_copy_study(from_storage_mode: str, to_storage_mode: str) -> None:
    with StorageSupplier(from_storage_mode) as from_storage, StorageSupplier(
        to_storage_mode
    ) as to_storage:
        from_study = create_study(storage=from_storage, directions=["maximize", "minimize"])
        from_study.set_system_attr("foo", "bar")
        from_study.set_user_attr("baz", "qux")
        from_study.optimize(
            lambda t: (t.suggest_float("x0", 0, 1), t.suggest_float("x1", 0, 1)), n_trials=3
        )

        copy_study(
            from_study_name=from_study.study_name,
            from_storage=from_storage,
            to_storage=to_storage,
        )

        to_study = load_study(study_name=from_study.study_name, storage=to_storage)

        assert to_study.study_name == from_study.study_name
        assert to_study.directions == from_study.directions
        assert to_study.system_attrs == from_study.system_attrs
        assert to_study.user_attrs == from_study.user_attrs
        assert len(to_study.trials) == len(from_study.trials)
コード例 #24
0
def worker():
    ''' Run a single worker '''
    study = op.load_study(storage=storage, study_name=name)
    output = study.optimize(objective, n_trials=n_trials)
    return output
コード例 #25
0
        if restart:
            print(f'About to delete {storage}:{name}, you have 5 seconds to intervene!')
            sc.timedsleep(5.0)
            op.delete_study(storage=storage, study_name=name)
    except:
        pass

    output = op.create_study(storage=storage, study_name=name, load_if_exists=not(restart))
    return output


if __name__ == '__main__':
    t0 = sc.tic()
    make_study(restart=False)
    run_workers()
    study = op.load_study(storage=storage, study_name=name)
    best_pars = study.best_params
    T = sc.toc(t0, output=True)
    print(f'Output: {best_pars}, time: {T}')

    sc.heading('Loading data...')
    best = cs.define_pars('best')
    bounds = cs.define_pars('bounds')

    sc.heading('Making results structure...')
    results = []

    failed_trials = []
    for trial in study.trials:
        data = {'index': trial.number, 'mismatch': trial.value}
        for key, val in trial.params.items():
コード例 #26
0
    BATCH_SIZE = 64
    N_EPOCHS = 100
    INPUT_SHAPE = (32, 128)

    tfms = Compose([Resize(INPUT_SHAPE[1], INPUT_SHAPE[0]), ToTensor()])
    train_ds = IAMWords(ROOT, split='train', transform=tfms)
    train_dl = DataLoader(train_ds,
                          batch_size=BATCH_SIZE,
                          shuffle=True,
                          num_workers=2)
    valid_ds = IAMWords(ROOT, split='valid', transform=tfms)
    valid_dl = DataLoader(valid_ds,
                          batch_size=BATCH_SIZE,
                          shuffle=False,
                          num_workers=2)
    test_ds = IAMWords(ROOT, split='test', transform=tfms)
    test_dl = DataLoader(test_ds,
                         batch_size=BATCH_SIZE,
                         shuffle=False,
                         num_workers=2)
    n_classes = len(CHARACTERS) + 1

    study = optuna.load_study('hparam_search',
                              storage='sqlite:///hparam_search.db')
    trial = optuna.trial.FixedTrial(study.best_params)
    net = Net(INPUT_SHAPE, n_classes=n_classes, trial=trial).to(DEVICE)
    trainer = Trainer(trial, n_epochs=N_EPOCHS, device=DEVICE)
    trainer.fit(net, train_dl, valid_dl)
    trainer.test(net, test_dl)
    torch.save(net, 'best_model.pkl')
コード例 #27
0
if __name__ == "__main__":
    # Parse command line arguments
    args = get_argparser().parse_args()

    # Get the experiment's directory to load from
    ex_dir = ask_for_experiment(
        hparam_list=args.show_hparams) if args.dir is None else args.dir

    # Find and load the Optuna data base
    study, study_name = None, None
    for file in os.listdir(ex_dir):
        if file.endswith(".db"):
            study_name = file[:
                              -3]  # the file is named like the study, just need to cut the ending
            storage = f"sqlite:////{osp.join(ex_dir, file)}"
            study = optuna.load_study(study_name, storage)
            break  # assuming there is only one database

    if study is None:
        pyrado.PathErr(msg=f"No Optuna study found in {ex_dir}!")

    # Extract the values of all trials (Optuna was set to solve a minimization problem)
    trials = [t for t in study.trials
              if t.value is not None]  # broken trials return None
    values = np.array([t.value for t in trials])
    if study.direction == StudyDirection.MINIMIZE:
        idcs_best = values.argsort()
    else:
        idcs_best = values.argsort()[::-1]

    # Print the best parameter configurations
コード例 #28
0
def plot_study(
    db_file,
    save=False,
    block=True,
):
    db_file = Path(db_file)
    print(f'Opening RDB: {db_file.as_posix()}')

    db_url = f'sqlite:///{db_file.as_posix()}'

    study = optuna.load_study('study', db_url)
    trials = study.get_trials(states=(optuna.trial.TrialState.COMPLETE, ), )
    print(f'Completed trials: {len(trials)}')

    params = tuple(trials[0].params.keys())
    n_params = len(params)

    distribution_limits = {}
    for param in params:
        low = np.amin([trial.distributions[param].low for trial in trials])
        high = np.amax([trial.distributions[param].high for trial in trials])
        distribution_limits[param] = np.array([low, high], dtype=int)

    top_quantile = np.quantile([trial.value for trial in trials], 0.8)
    top_trials = [trial for trial in trials if trial.value >= top_quantile]

    ncols = 4
    nrows = n_params // ncols if n_params % ncols == 0 else (n_params //
                                                             ncols) + 1
    fig, axes = plt.subplots(nrows=nrows,
                             ncols=ncols,
                             figsize=(ncols * 3.5, nrows * 2.625))

    for i_param, param in enumerate(params):

        p_lim = distribution_limits[param]
        p_axis = np.arange(p_lim[0], p_lim[1] + 1)

        mean = np.empty(0)
        lb = np.empty(0)
        ub = np.empty(0)
        plt.sca(axes.flat[i_param])
        max_value = np.max(np.array([trial.value for trial in trials]))
        for p_value in p_axis:
            values = np.array([
                trial.value for trial in trials
                if trial.params[param] == p_value
            ])
            for value in values:
                plt.plot(p_value, value, c='0.6', ms=2, marker='o', mfc=None)
            values = np.array([
                trial.value for trial in top_trials
                if trial.params[param] == p_value
            ])
            for value in values:
                plt.plot(p_value, value, 'ok', ms=4)
            if values.size == 0:
                mean = np.append(mean, np.NaN)
                lb = np.append(lb, np.NaN)
                ub = np.append(ub, np.NaN)
                continue
            mean = np.append(mean, np.mean(values))
            if values.size >= 3:
                std = np.std(values)
                lb = np.append(lb, std)
                ub = np.append(ub, std)
            else:
                lb = np.append(lb, mean[-1] - values.min())
                ub = np.append(ub, values.max() - mean[-1])
        plt.errorbar(p_axis,
                     mean,
                     yerr=(lb, ub),
                     marker='s',
                     capsize=6,
                     capthick=1.5,
                     ms=6,
                     lw=1.5,
                     elinewidth=1.5,
                     zorder=1)
        plt.xlabel(param)
        plt.ylabel('Objective value')
        plt.ylim([max_value * 0.5, max_value])
    plt.tight_layout()
    if save:
        filepath = db_file.parent.parent / (db_file.stem + '.pdf')
        print(f'Saving file: {filepath.as_posix()}')
        plt.savefig(filepath, transparent=True)
    plt.show(block=block)
コード例 #29
0
ファイル: train.py プロジェクト: ineedaspo1/agent_trade
from stable_baselines.common.vec_env import SubprocVecEnv, DummyVecEnv
from stable_baselines import A2C, ACKTR, PPO2

from pathlib import Path

from env.BitcoinTradingEnv import BitcoinTradingEnv
from util.indicators import add_indicators


curr_idx = -1
reward_strategy = 'sortino'
input_data_file = 'data/coinbase_hourly.csv'
params_db_file = 'sqlite:///params.db'

study_name = 'ppo2_' + reward_strategy
study = optuna.load_study(study_name=study_name, storage=params_db_file)
params = study.best_trial.params

print("Training PPO2 agent with params:", params)
print("Best trial reward:", -1 * study.best_trial.value)

df = pd.read_csv(input_data_file)
df = df.drop(['Symbol'], axis=1)
df = df.sort_values(['Date'])
df = add_indicators(df.reset_index())

test_len = int(len(df) * 0.2)
train_len = int(len(df)) - test_len

train_df = df[:train_len]
test_df = df[train_len:]
コード例 #30
0
    # Evaluate.
    evaluator = chainer.training.extensions.Evaluator(valid_iter, model)
    evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
    report = evaluator()

    return report["main/accuracy"]


if __name__ == "__main__":
    # Please make sure common study and storage are shared among nodes.
    study_name = sys.argv[1]
    storage_url = sys.argv[2]

    study = optuna.load_study(study_name,
                              storage_url,
                              pruner=optuna.pruners.MedianPruner())
    comm = chainermn.create_communicator("naive")
    if comm.rank == 0:
        print("Study name:", study_name)
        print("Storage URL:", storage_url)
        print("Number of nodes:", comm.size)

    # Run optimization!
    chainermn_study = optuna.integration.ChainerMNStudy(study, comm)
    chainermn_study.optimize(objective, n_trials=25)

    if comm.rank == 0:
        pruned_trials = study.get_trials(deepcopy=False,
                                         states=[TrialState.PRUNED])
        complete_trials = study.get_trials(deepcopy=False,