def make_plots(logdir, study): logdir = f'{logdir}/plots' os.makedirs(logdir, exist_ok=True) plot_optimization_history(study).write_image(f'{logdir}/history.svg') plot_intermediate_values(study).write_image(f'{logdir}/intermediates.svg') plot_parallel_coordinate(study).write_image(f'{logdir}/parallel_coordinates.png') plot_slice(study).write_image(f'{logdir}/slices.svg') plot_param_importances(study).write_image(f'{logdir}/importances.svg')
def test_plot_intermediate_values(): # type: () -> None # Test with no trials. study = prepare_study_with_trials(no_trials=True) figure = plot_intermediate_values(study) assert not figure.data def objective(trial, report_intermediate_values): # type: (Trial, bool) -> float if report_intermediate_values: trial.report(1.0, step=0) trial.report(2.0, step=1) return 0.0 # Test with a trial with intermediate values. study = create_study() study.optimize(lambda t: objective(t, True), n_trials=1) figure = plot_intermediate_values(study) assert len(figure.data) == 1 assert figure.data[0].x == (0, 1) assert figure.data[0].y == (1.0, 2.0) # Test a study with one trial with intermediate values and # one trial without intermediate values. # Expect the trial with no intermediate values to be ignored. study.optimize(lambda t: objective(t, False), n_trials=1) assert len(study.trials) == 2 figure = plot_intermediate_values(study) assert len(figure.data) == 1 assert figure.data[0].x == (0, 1) assert figure.data[0].y == (1.0, 2.0) # Test a study of only one trial that has no intermediate values. study = create_study() study.optimize(lambda t: objective(t, False), n_trials=1) figure = plot_intermediate_values(study) assert not figure.data # Ignore failed trials. def fail_objective(_): # type: (Trial) -> float raise ValueError study = create_study() study.optimize(fail_objective, n_trials=1, catch=(ValueError,)) figure = plot_intermediate_values(study) assert not figure.data
def plot_intermediate_values(self, interactive=False, legend=False): ''' Plot optimization trials history. Shows successful and terminated trials. Parameters ---------- interactive : bool, optional Create & show in default browsersave to current wd interactive html plot. The default is False. legend : bool, optional Flag to include legend in the static (not interactive) plot. The default is False. Returns ------- None. ''' self._check_refit_status('plot_intermediate_values()') validate_plotting_interactive_argument(interactive) validate_plotting_legend_argument(legend) if interactive: from optuna.visualization import plot_intermediate_values fig = plot_intermediate_values(self._study) fig.write_html("intermediate_values_plot.html") try: self._display_html('intermediate_values_plot.html') except Exception as e: print(f'Display html error: {e}') print( f'Intermediate Values Plot is saved to {os.path.join(os.getcwd(), "intermediate_values_plot.html")}' ) else: from optuna.visualization.matplotlib import plot_intermediate_values import matplotlib.pyplot as plt fig = plot_intermediate_values(self._study) if not legend: fig.get_legend().remove() plt.show()
study = optuna.create_study( direction="maximize", sampler=optuna.samplers.TPESampler(seed=SEED), pruner=optuna.pruners.MedianPruner(n_warmup_steps=10), ) study.optimize(objective, n_trials=100, timeout=600) ################################################################################################### # Plot functions # -------------- # Visualize the optimization history. See :func:`~optuna.visualization.plot_optimization_history` for the details. plot_optimization_history(study) ################################################################################################### # Visualize the learning curves of the trials. See :func:`~optuna.visualization.plot_intermediate_values` for the details. plot_intermediate_values(study) ################################################################################################### # Visualize high-dimensional parameter relationships. See :func:`~optuna.visualization.plot_parallel_coordinate` for the details. plot_parallel_coordinate(study) ################################################################################################### # Select parameters to visualize. plot_parallel_coordinate(study, params=["bagging_freq", "bagging_fraction"]) ################################################################################################### # Visualize hyperparameter relationships. See :func:`~optuna.visualization.plot_contour` for the details. plot_contour(study) ################################################################################################### # Select parameters to visualize.
raise optuna.TrialPruned() return value if __name__ == "__main__": study = optuna.create_study(direction="maximize", pruner=optuna.pruners.MedianPruner()) study.optimize(objective, n_trials=100, timeout=600) # Visualize the optimization history. plot_optimization_history(study).show() # Visualize the learning curves of the trials. plot_intermediate_values(study).show() # Visualize high-dimensional parameter relationships. plot_parallel_coordinate(study).show() # Select parameters to visualize. plot_parallel_coordinate(study, params=["lr_init", "n_units_l0"]).show() # Visualize hyperparameter relationships. plot_contour(study).show() # Select parameters to visualize. plot_contour(study, params=["n_units_l0", "n_units_l1"]).show() # Visualize individual hyperparameters. plot_slice(study).show()
def ml_mlp_mul_ms(station_name="종로구"): print("Start Multivariate MLP Mean Seasonality Decomposition (MSE) Model") targets = ["PM10", "PM25"] # targets = ["SO2", "CO", "O3", "NO2", "PM10", "PM25", # "temp", "u", "v", "pres", "humid", "prep", "snow"] # 24*14 = 336 #sample_size = 336 sample_size = 48 output_size = 24 # If you want to debug, fast_dev_run = True and n_trials should be small number fast_dev_run = False n_trials = 128 # fast_dev_run = True # n_trials = 1 # Hyper parameter epoch_size = 500 batch_size = 64 learning_rate = 1e-3 # Blocked Cross Validation # neglect small overlap between train_dates and valid_dates # 11y = ((2y, 0.5y), (2y, 0.5y), (2y, 0.5y), (2.5y, 1y)) train_dates = [(dt.datetime(2008, 1, 4, 1).astimezone(SEOULTZ), dt.datetime(2009, 12, 31, 23).astimezone(SEOULTZ)), (dt.datetime(2010, 7, 1, 0).astimezone(SEOULTZ), dt.datetime(2012, 6, 30, 23).astimezone(SEOULTZ)), (dt.datetime(2013, 1, 1, 0).astimezone(SEOULTZ), dt.datetime(2014, 12, 31, 23).astimezone(SEOULTZ)), (dt.datetime(2015, 7, 1, 0).astimezone(SEOULTZ), dt.datetime(2017, 12, 31, 23).astimezone(SEOULTZ))] valid_dates = [(dt.datetime(2010, 1, 1, 0).astimezone(SEOULTZ), dt.datetime(2010, 6, 30, 23).astimezone(SEOULTZ)), (dt.datetime(2012, 7, 1, 0).astimezone(SEOULTZ), dt.datetime(2012, 12, 31, 23).astimezone(SEOULTZ)), (dt.datetime(2015, 1, 1, 0).astimezone(SEOULTZ), dt.datetime(2015, 6, 30, 23).astimezone(SEOULTZ)), (dt.datetime(2018, 1, 1, 0).astimezone(SEOULTZ), dt.datetime(2018, 12, 31, 23).astimezone(SEOULTZ))] train_valid_fdate = dt.datetime(2008, 1, 3, 1).astimezone(SEOULTZ) train_valid_tdate = dt.datetime(2018, 12, 31, 23).astimezone(SEOULTZ) # Debug if fast_dev_run: train_dates = [(dt.datetime(2015, 7, 1, 0).astimezone(SEOULTZ), dt.datetime(2017, 12, 31, 23).astimezone(SEOULTZ))] valid_dates = [(dt.datetime(2018, 1, 1, 0).astimezone(SEOULTZ), dt.datetime(2018, 12, 31, 23).astimezone(SEOULTZ))] train_valid_fdate = dt.datetime(2015, 7, 1, 0).astimezone(SEOULTZ) train_valid_tdate = dt.datetime(2018, 12, 31, 23).astimezone(SEOULTZ) test_fdate = dt.datetime(2019, 1, 1, 0).astimezone(SEOULTZ) test_tdate = dt.datetime(2020, 10, 31, 23).astimezone(SEOULTZ) # check date range assumption assert len(train_dates) == len(valid_dates) for i, (td, vd) in enumerate(zip(train_dates, valid_dates)): assert vd[0] > td[1] assert test_fdate > train_dates[-1][1] assert test_fdate > valid_dates[-1][1] train_features = [ "SO2", "CO", "NO2", "PM10", "PM25", "temp", "wind_spd", "wind_cdir", "wind_sdir", "pres", "humid", "prep" ] train_features_periodic = [ "SO2", "CO", "NO2", "PM10", "PM25", "temp", "wind_spd", "wind_cdir", "wind_sdir", "pres", "humid" ] train_features_nonperiodic = ["prep"] for target in targets: print("Training " + target + "...") output_dir = Path( f"/mnt/data/MLPMSMultivariate/{station_name}/{target}/") Path.mkdir(output_dir, parents=True, exist_ok=True) model_dir = output_dir / "models" Path.mkdir(model_dir, parents=True, exist_ok=True) log_dir = output_dir / "log" Path.mkdir(log_dir, parents=True, exist_ok=True) _df_h = data.load_imputed(HOURLY_DATA_PATH) df_h = _df_h.query('stationCode == "' + str(SEOUL_STATIONS[station_name]) + '"') if station_name == '종로구' and \ not Path("/input/python/input_jongno_imputed_hourly_pandas.csv").is_file(): # load imputed result df_h.to_csv("/input/python/input_jongno_imputed_hourly_pandas.csv") # construct dataset for seasonality print("Construct Train/Validation Sets...", flush=True) train_valid_dataset = construct_dataset(train_valid_fdate, train_valid_tdate, filepath=HOURLY_DATA_PATH, station_name=station_name, target=target, sample_size=sample_size, output_size=output_size, transform=False) # compute seasonality train_valid_dataset.preprocess() # For Block Cross Validation.. # load dataset in given range dates and transform using scaler from train_valid_set # all dataset are saved in tuple print("Construct Training Sets...", flush=True) train_datasets = tuple( construct_dataset(td[0], td[1], scaler_X=train_valid_dataset.scaler_X, scaler_Y=train_valid_dataset.scaler_Y, filepath=HOURLY_DATA_PATH, station_name=station_name, target=target, sample_size=sample_size, output_size=output_size, features=train_features, features_periodic=train_features_periodic, features_nonperiodic=train_features_nonperiodic, transform=True) for td in train_dates) print("Construct Validation Sets...", flush=True) valid_datasets = tuple( construct_dataset(vd[0], vd[1], scaler_X=train_valid_dataset.scaler_X, scaler_Y=train_valid_dataset.scaler_Y, filepath=HOURLY_DATA_PATH, station_name=station_name, target=target, sample_size=sample_size, output_size=output_size, features=train_features, features_periodic=train_features_periodic, features_nonperiodic=train_features_nonperiodic, transform=True) for vd in valid_dates) # just single test set print("Construct Test Sets...", flush=True) test_dataset = construct_dataset( test_fdate, test_tdate, scaler_X=train_valid_dataset.scaler_X, scaler_Y=train_valid_dataset.scaler_Y, filepath=HOURLY_DATA_PATH, station_name=station_name, target=target, sample_size=sample_size, output_size=output_size, features=train_features, features_periodic=train_features_periodic, features_nonperiodic=train_features_nonperiodic, transform=True) # convert tuple of datasets to ConcatDataset train_dataset = ConcatDataset(train_datasets) val_dataset = ConcatDataset(valid_datasets) # num_layer == number of hidden layer hparams = Namespace(num_layers=1, layer_size=128, learning_rate=learning_rate, batch_size=batch_size) def objective(trial): model = BaseMLPModel( trial=trial, hparams=hparams, input_size=sample_size * len(train_features), sample_size=sample_size, output_size=output_size, station_name=station_name, target=target, features=train_features, features_periodic=train_features_periodic, features_nonperiodic=train_features_nonperiodic, train_dataset=train_dataset, val_dataset=val_dataset, test_dataset=test_dataset, scaler_X=train_valid_dataset.scaler_X, scaler_Y=train_valid_dataset.scaler_Y, output_dir=output_dir) # most basic trainer, uses good defaults trainer = Trainer(gpus=1 if torch.cuda.is_available() else None, precision=32, min_epochs=1, max_epochs=20, default_root_dir=output_dir, fast_dev_run=fast_dev_run, logger=True, checkpoint_callback=False, callbacks=[ PyTorchLightningPruningCallback( trial, monitor="valid/MSE") ]) trainer.fit(model) # Don't Log # hyperparameters = model.hparams # trainer.logger.log_hyperparams(hyperparameters) return trainer.callback_metrics.get("valid/MSE") if n_trials > 1: study = optuna.create_study(direction="minimize") study.enqueue_trial({ 'sigma': 1.3, 'num_layers': 4, 'layer_size': 8, 'learning_rate': learning_rate, 'batch_size': batch_size }) study.enqueue_trial({ 'sigma': 1.3, 'num_layers': 4, 'layer_size': 32, 'learning_rate': learning_rate, 'batch_size': batch_size }) study.enqueue_trial({ 'sigma': 1.3, 'num_layers': 4, 'layer_size': 64, 'learning_rate': learning_rate, 'batch_size': batch_size }) study.enqueue_trial({ 'sigma': 1.3, 'num_layers': 4, 'layer_size': 32, 'learning_rate': learning_rate, 'batch_size': batch_size }) study.enqueue_trial({ 'sigma': 1.3, 'num_layers': 8, 'layer_size': 32, 'learning_rate': learning_rate, 'batch_size': batch_size }) study.enqueue_trial({ 'sigma': 1.3, 'num_layers': 12, 'layer_size': 32, 'learning_rate': learning_rate, 'batch_size': batch_size }) study.enqueue_trial({ 'sigma': 0.7, 'num_layers': 4, 'layer_size': 32, 'learning_rate': learning_rate, 'batch_size': batch_size }) study.enqueue_trial({ 'sigma': 2.0, 'num_layers': 4, 'layer_size': 32, 'learning_rate': learning_rate, 'batch_size': batch_size }) # timeout = 3600*36 = 36h study.optimize(objective, n_trials=n_trials, timeout=3600 * 36) trial = study.best_trial print(" Value: ", trial.value) print(" Params: ") for key, value in trial.params.items(): print(" {}: {}".format(key, value)) print("sample_size : ", sample_size) print("output_size : ", output_size) # plot optmization results fig_cont1 = optv.plot_contour(study, params=['num_layers', 'layer_size']) fig_cont1.write_image( str(output_dir / "contour_num_layers_layer_size.png")) fig_cont1.write_image( str(output_dir / "contour_num_layers_layer_size.svg")) fig_edf = optv.plot_edf(study) fig_edf.write_image(str(output_dir / "edf.png")) fig_edf.write_image(str(output_dir / "edf.svg")) fig_iv = optv.plot_intermediate_values(study) fig_iv.write_image(str(output_dir / "intermediate_values.png")) fig_iv.write_image(str(output_dir / "intermediate_values.svg")) fig_his = optv.plot_optimization_history(study) fig_his.write_image(str(output_dir / "opt_history.png")) fig_his.write_image(str(output_dir / "opt_history.svg")) fig_pcoord = optv.plot_parallel_coordinate( study, params=['num_layers', 'layer_size']) fig_pcoord.write_image(str(output_dir / "parallel_coord.png")) fig_pcoord.write_image(str(output_dir / "parallel_coord.svg")) fig_slice = optv.plot_slice(study, params=['num_layers', 'layer_size']) fig_slice.write_image(str(output_dir / "slice.png")) fig_slice.write_image(str(output_dir / "slice.svg")) # set hparams with optmized value hparams.num_layers = trial.params['num_layers'] hparams.layer_size = trial.params['layer_size'] dict_hparams = copy.copy(vars(hparams)) dict_hparams["sample_size"] = sample_size dict_hparams["output_size"] = output_size with open(output_dir / 'hparams.json', 'w') as f: print(dict_hparams, file=f) with open(output_dir / 'hparams.csv', 'w') as f: print(pd.DataFrame.from_dict(dict_hparams, orient='index'), file=f) model = BaseMLPModel(hparams=hparams, input_size=sample_size * len(train_features), sample_size=sample_size, output_size=output_size, station_name=station_name, target=target, features=train_features, features_periodic=train_features_periodic, features_nonperiodic=train_features_nonperiodic, train_dataset=train_dataset, val_dataset=val_dataset, test_dataset=test_dataset, scaler_X=train_valid_dataset.scaler_X, scaler_Y=train_valid_dataset.scaler_Y, output_dir=output_dir) # record input for i, _train_set in enumerate(train_datasets): _train_set.to_csv( model.data_dir / ("df_trainset_{0}_".format(str(i).zfill(2)) + target + ".csv")) for i, _valid_set in enumerate(valid_datasets): _valid_set.to_csv( model.data_dir / ("df_validset_{0}_".format(str(i).zfill(2)) + target + ".csv")) train_valid_dataset.to_csv(model.data_dir / ("df_trainvalidset_" + target + ".csv")) test_dataset.to_csv(model.data_dir / ("df_testset_" + target + ".csv")) checkpoint_callback = pl.callbacks.ModelCheckpoint(os.path.join( model_dir, "train_{epoch}_{valid/MSE:.2f}"), monitor="valid/MSE", period=10) early_stop_callback = EarlyStopping(monitor='valid/MSE', min_delta=0.001, patience=30, verbose=True, mode='min') log_version = dt.date.today().strftime("%y%m%d-%H-%M") loggers = [ \ TensorBoardLogger(log_dir, version=log_version), CSVLogger(log_dir, version=log_version)] # most basic trainer, uses good defaults trainer = Trainer(gpus=1 if torch.cuda.is_available() else None, precision=32, min_epochs=1, max_epochs=epoch_size, default_root_dir=output_dir, fast_dev_run=fast_dev_run, logger=loggers, log_every_n_steps=5, flush_logs_every_n_steps=10, callbacks=[early_stop_callback], checkpoint_callback=checkpoint_callback) trainer.fit(model) # run test set trainer.test(ckpt_path=None) shutil.rmtree(model_dir)