コード例 #1
0
def hyperparam_optimization( x, y ) :
    nBits = x.shape[1]
    frac_test = 0.3
    x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=frac_test )

    TRAIN_EPOCHS = 20
    MAX_TRIALS=20
    EXECUTIONS_PER_TRIAL = 5

    b_tuner = BayesianOptimization(
        make_optimizer_model(nBits),
        objective = 'val_mean_squared_error',
        max_trials = MAX_TRIALS,
        executions_per_trial=EXECUTIONS_PER_TRIAL,
        directory='test_dir',
        project_name='tune_optimizer',
        seed=1
    )
    b_tuner.search_space_summary()

    b_tuner.search( x=x_train, y=y_train, epochs=TRAIN_EPOCHS, validation_data=(x_test, y_test))
    b_tuner.results_summary()
    best_model = b_tuner.get_best_models()[0]

    return best_model
コード例 #2
0
    def bayesian(self):
        tuner = BayesianOptimization(
            self.build_model,
            objective='mean_squared_error',
            max_trials=self.max_trials,  # more than 2 and it crashes
            num_initial_points=self.initial_points,
            seed=self.seed,
            overwrite=True,
            directory=self.kt_dir)

        tuner.search(
            x=self.data,
            y=self.train_labels,
            epochs=self.epochs,
            batch_size=self.batch_size,
            validation_data=(self.test_data, self.test_labels),
        )

        return tuner
コード例 #3
0
def train(args):
    print(args)
    global_conf.config_tf2(args)
    checkpoint_dir, log_dir, export_dir = create_env_directories(
        args, get_experiment_name(args))

    train_dataset = dataloader.get_dataset(
        args['dataloader'],
        transformation_list=args['dataloader']['train_list'],
        num_classes=args["num_classes"],
        split=args['dataloader']['train_split_id'])
    val_dataset = dataloader.get_dataset(
        args['dataloader'],
        transformation_list=args['dataloader']['val_list'],
        num_classes=args["num_classes"],
        split=args['dataloader']['val_split_id'])

    setup_mp(args)
    build_model_fn = get_model(args)
    callbacks = get_callbacks(args, log_dir)

    # tuner = Hyperband(build_model_fn,
    #                   objective='val_accuracy',
    #                   max_epochs=args['num_epochs'],
    #                   hyperband_iterations=10e100,
    #                   directory=checkpoint_dir)

    tuner = BayesianOptimization(build_model_fn,
                                 objective='val_accuracy',
                                 max_trials=100000,
                                 num_initial_points=10,
                                 directory=checkpoint_dir)

    tuner.search_space_summary()
    tuner.search(x=train_dataset,
                 validation_data=val_dataset,
                 callbacks=callbacks,
                 epochs=args['num_epochs'])
    tuner.results_summary()
コード例 #4
0
random_accuracy_df = pd.DataFrame(random_model.history.history)

random_accuracy_df[['loss', 'accuracy']].plot()
plt.title('Loss & Accuracy Per EPOCH For Random Model')
plt.xlabel('EPOCH')
plt.ylabel('Accruacy')
plt.show()

bayesian_tuner = BayesianOptimization(hypermodel,
                                      objective='accuracy',
                                      max_trials=10,
                                      seed=10,
                                      project_name='divorce test')

bayesian_tuner.search(X_train.values,
                      y_train.values.flatten(),
                      epochs=10,
                      validation_data=(X_test.values, y_test.values.flatten()))

bayesian_params = bayesian_tuner.get_best_hyperparameters()[0]

bayesian_model = bayesian_tuner.hypermodel.build(bayesian_params)

bayesian_model.fit(X.values, y.values.flatten(), epochs=15)

bayesian_accuracy_df = pd.DataFrame(bayesian_model.history.history)

bayesian_accuracy_df[['loss', 'accuracy']].plot()
plt.title('Loss & Accuracy Per EPOCH For Bayesian Optimisation Model')
plt.xlabel('EPOCH')
plt.ylabel('Accruacy')
plt.show()
コード例 #5
0
def BuildDNN(train_samples,
             dev_samples,
             test_samples,
             norm_id,
             model_path,
             lags=None,
             seed=None,
             batch_size=512,
             n_epochs=5,
             max_trials=5,
             executions_per_trial=3,
             max_hidden_layers=3,
             min_units=16,
             max_units=64,
             unit_step=16,
             min_droprate=0.0,
             max_droprate=0.5,
             droprate_step=0.05,
             min_learnrate=1e-4,
             max_learnrate=1e-1,
             n_tune_epochs=5,
             cast_to_zero=True,
             early_stop=True,
             early_stop_patience=10,
             retrain=False,
             warm_up=False,
             initial_epoch=None,
             measurement_time='day',
             measurement_unit='$m^3/s$'):
    if not os.path.exists(model_path):
        os.makedirs(model_path)

    setting_info = {
        "model_path": model_path,
        "lags": lags,
        "seed": seed,
        "batch_size": batch_size,
        "n_epoch": n_epochs,
        "max_trials": max_trials,
        "executions_per_trial": executions_per_trial,
        "max_hidden_layers": max_hidden_layers,
        "min_units": min_units,
        "max_units": max_units,
        "unit_step": unit_step,
        "min_droprate": min_droprate,
        "max_droprate": max_droprate,
        "droprate_step": droprate_step,
        "min_learnrate": min_learnrate,
        "max_learnrate": max_learnrate,
        "n_tune_epochs": n_tune_epochs,
        "cast_to_zero": cast_to_zero,
        "early_stop": early_stop,
        "early_stop_patience": early_stop_patience,
        "retrain": retrain,
    }

    with open(model_path + 'setting.json', 'w') as outfile:
        json.dump(setting_info, outfile)

    sMin = norm_id['series_min']
    sMax = norm_id['series_max']
    # sMin = train_samples.min(axis=0)
    # sMax = train_samples.max(axis=0)
    # train_samples = 2*(train_samples-sMin)/(sMax-sMin)-1
    # dev_samples = 2*(dev_samples-sMin)/(sMax-sMin)-1
    # test_samples = 2*(test_samples-sMin)/(sMax-sMin)-1
    cal_samples = pd.concat([train_samples, dev_samples], axis=0)
    cal_samples = cal_samples.sample(frac=1)
    cal_samples = cal_samples.reset_index(drop=True)
    train_samples = cal_samples.iloc[:train_samples.shape[0]]
    dev_samples = cal_samples.iloc[train_samples.shape[0]:]
    X = cal_samples
    y = (cal_samples.pop('Y')).values
    train_x = train_samples
    train_y = train_samples.pop('Y')
    train_y = train_y.values
    dev_x = dev_samples
    dev_y = dev_samples.pop('Y')
    dev_y = dev_y.values
    test_x = test_samples
    test_y = test_samples.pop('Y')
    test_y = test_y.values

    # Config path to save optimal results
    opt_path = model_path + '\\optimal\\'
    cp_path = model_path + '\\optimal\\checkpoints\\'
    if not os.path.exists(cp_path):
        os.makedirs(cp_path)
    # restore only the latest checkpoint after every update
    checkpoint_path = cp_path + 'cp.h5'
    checkpoint_dir = os.path.dirname(checkpoint_path)
    # Define callbacks
    cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path,
                                                  save_best_only=True,
                                                  mode='min',
                                                  save_weights_only=True,
                                                  verbose=1)
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  min_lr=0.00001,
                                                  factor=0.2,
                                                  verbose=1,
                                                  patience=10,
                                                  mode='min')
    early_stopping = keras.callbacks.EarlyStopping(
        monitor='val_loss',
        mode='min',
        verbose=1,
        patience=early_stop_patience,
        restore_best_weights=True)

    def build_model(hp):
        input_shape = (train_x.shape[1], )
        model = keras.Sequential()
        num_layers = hp.Int('num_layers',
                            min_value=1,
                            max_value=max_hidden_layers,
                            step=1,
                            default=1)
        for i in range(num_layers):
            units = hp.Int('units_' + str(i),
                           min_value=min_units,
                           max_value=max_units,
                           step=unit_step)
            dropout_rate = hp.Float('drop_rate_' + str(i),
                                    min_value=min_droprate,
                                    max_value=max_droprate,
                                    step=droprate_step)
            if i == 0:
                model.add(
                    layers.Dense(units=units,
                                 activation='relu',
                                 input_shape=input_shape))
            else:
                model.add(layers.Dense(units=units, activation='relu'))
            model.add(
                layers.Dropout(rate=dropout_rate, noise_shape=None, seed=seed))
        model.add(layers.Dense(1))
        model.compile(optimizer=keras.optimizers.Adam(
            hp.Float('learning_rate',
                     min_value=min_learnrate,
                     max_value=max_learnrate,
                     sampling='LOG',
                     default=1e-2)),
                      loss='mean_squared_error',
                      metrics=['mean_absolute_error', 'mean_squared_error'])
        return model

    tuner = BayesianOptimization(build_model,
                                 objective='mean_squared_error',
                                 max_trials=max_trials,
                                 executions_per_trial=executions_per_trial,
                                 directory=model_path,
                                 project_name='BayesianOpt')

    tuner.search_space_summary()
    start = time.process_time()
    tuner.search(x=train_x,
                 y=train_y,
                 epochs=n_tune_epochs,
                 validation_data=(dev_x, dev_y),
                 callbacks=[early_stopping])
    end = time.process_time()
    time_cost = end - start
    tuner.results_summary()
    best_hps = tuner.oracle.get_best_trials(num_trials=1)[0].hyperparameters
    model = build_model(best_hps)

    if retrain or not os.path.exists(checkpoint_path):
        history = model.fit(X,
                            y,
                            epochs=n_epochs,
                            batch_size=batch_size,
                            validation_data=(X, y),
                            verbose=1,
                            callbacks=[
                                cp_callback,
                                early_stopping,
                            ])
        hist = pd.DataFrame(history.history)
        hist.to_csv(opt_path + 'PARAMS-CAL-HISTORY.csv')
        plot_history(history, opt_path + 'MAE-HISTORY.png',
                     opt_path + 'MSE-HISTORY.png')
    else:
        model.load_weights(checkpoint_path)

    train_predictions = model.predict(train_x).flatten()
    dev_predictions = model.predict(dev_x).flatten()
    test_predictions = model.predict(test_x).flatten()
    sMax = sMax[sMax.shape[0] - 1]
    sMin = sMin[sMin.shape[0] - 1]
    train_y = np.multiply(train_y + 1, sMax - sMin) / 2 + sMin
    dev_y = np.multiply(dev_y + 1, sMax - sMin) / 2 + sMin
    test_y = np.multiply(test_y + 1, sMax - sMin) / 2 + sMin
    train_predictions = np.multiply(train_predictions + 1,
                                    sMax - sMin) / 2 + sMin
    dev_predictions = np.multiply(dev_predictions + 1, sMax - sMin) / 2 + sMin
    test_predictions = np.multiply(test_predictions + 1,
                                   sMax - sMin) / 2 + sMin
    if cast_to_zero:
        train_predictions[train_predictions < 0.0] = 0.0
        dev_predictions[dev_predictions < 0.0] = 0.0
        test_predictions[test_predictions < 0.0] = 0.0
    dump_pred_results(
        path=opt_path + '/opt_pred.csv',
        train_y=train_y,
        train_predictions=train_predictions,
        dev_y=dev_y,
        dev_predictions=dev_predictions,
        test_y=test_y,
        test_predictions=test_predictions,
        time_cost=time_cost,
    )
    plot_rela_pred(train_y,
                   train_predictions,
                   measurement_time=measurement_time,
                   measurement_unit=measurement_unit,
                   fig_savepath=opt_path + 'TRAIN-PRED.png')
    plot_rela_pred(dev_y,
                   dev_predictions,
                   measurement_time=measurement_time,
                   measurement_unit=measurement_unit,
                   fig_savepath=opt_path + "DEV-PRED.png")
    plot_rela_pred(test_y,
                   test_predictions,
                   measurement_time=measurement_time,
                   measurement_unit=measurement_unit,
                   fig_savepath=opt_path + "TEST-PRED.png")
    plot_error_distribution(test_predictions, test_y,
                            opt_path + 'TEST-ERROR-DSTRI.png')
    plt.show()
コード例 #6
0
def predict_rem(col_name):
    print(
        'Creating a best model for predicting {} MU and Checking its accuracy with test data'
        .format(col_name))
    import pandas as pd
    import warnings

    warnings.filterwarnings("ignore")

    df3 = pd.read_csv('pycharm_data.csv')
    df3 = df3.drop(columns=['Unnamed: 0', 'date'])
    new_df = df3[col_name].copy()
    new_df

    # create a differenced series
    from pandas import Series

    def difference(dataset, interval=1):
        diff = list()
        for i in range(interval, len(dataset)):
            value = dataset[i] - dataset[i - interval]
            diff.append(value)
        return Series(diff)

    differenced = difference(new_df, 1)
    differenced.head()

    X = []
    y = []
    for i in range(0, differenced.shape[0] - 48):
        X.append(
            differenced.iloc[i:i +
                             48])  # taking 48 rows for training incrementally
        y.append(differenced.iloc[i + 48])

    import numpy as np
    X, y = np.array(X, dtype=np.float32), np.array(y, dtype=np.float32)
    y = np.reshape(y, (len(y), 1))

    X_train, X_test = X[:-480], X[-480:]
    y_train, y_test = y[:-480], y[-480:]

    for i in range(len(y_test)):
        y_test_diff = y_test[i] + new_df[1786 + i]

    X_train = np.reshape(
        X_train,
        (X_train.shape[0], X_train.shape[1], 1))  # reshaping the data to 3d
    X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))

    import warnings
    warnings.filterwarnings("ignore")
    import keras
    from keras.layers import LSTM, Activation, Dense
    from keras import optimizers
    from keras.callbacks import ModelCheckpoint, EarlyStopping
    from keras.layers import ReLU, LeakyReLU
    import h5py
    from keras.models import load_model
    from keras.models import Sequential
    from keras.layers import Dense
    from keras.layers import Dropout
    from keras.layers import LSTM
    from kerastuner.tuners import BayesianOptimization

    def build_model(hp):
        for i in range(hp.Int('num_layers', 2, 5)):
            model = keras.Sequential()
            model.add(
                LSTM(units=hp.Int(
                    'units_' + str(i), min_value=32, max_value=300, step=32)))
            model.add(ReLU())
            model.add(
                Dropout(rate=hp.Float('dropout_1',
                                      min_value=0.1,
                                      max_value=0.5,
                                      default=0.1,
                                      step=0.05)))
            model.add(Dense(1, activation='linear'))
            model.compile(optimizer=keras.optimizers.Adam(
                hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
                          loss='mean_absolute_error',
                          metrics=['mse'])
        return model

    bayesian_opt_tuner = BayesianOptimization(build_model,
                                              objective='mse',
                                              max_trials=3,
                                              executions_per_trial=1,
                                              overwrite=True)

    bayesian_opt_tuner.search(X_train,
                              y_train,
                              epochs=1000,
                              validation_data=(X_test, y_test),
                              callbacks=[
                                  keras.callbacks.EarlyStopping(
                                      monitor='val_loss', patience=100)
                              ],
                              verbose=0,
                              batch_size=5)

    best_model = bayesian_opt_tuner.get_best_models(num_models=1)
    model = best_model[0]

    predictions = model.predict(X_test)
    final_pred = []
    for i in range(len(predictions)):
        y_test_diff = predictions[i] + new_df[1786 + i]
        final_pred.append(y_test_diff[0])
    final_pred

    y_test_orig = []
    for i in range(len(y_test)):
        y_test_diff = y_test[i] + new_df[1786 + i]
        y_test_orig.append(y_test_diff[0])
    y_test_orig

    total = 0
    for i, j in zip(y_test_orig, final_pred):
        value = abs(i - j) / abs(i)
        total += value
    error = float(total * 100 / (len(y_test_orig)))  # calculate mape
    mape = round(error, 1)  # round to 3 significant figures
    accuracy = 100 - mape  # Calculate accuracy

    import matplotlib.pyplot as plt
    print("The LSTM's accuracy in predicting the mega units on test data : " +
          str(accuracy) + "%")
    print(' ')
    df = pd.DataFrame(y_test_orig, final_pred).reset_index()
    df.columns = ['original', 'predicted']
    df.plot(title='Original data vs Predicted data (for test data)')
    plt.show()
    model.save('models_with_tuner\\' + col_name + '_model_75.h5')
    print(' ')
    print(
        '================================================================================'
    )

    print('')
    print('Now training the model with 100% data for future predictions....')
    X_train = X
    y_train = y
    X_train = np.reshape(
        X_train,
        (X_train.shape[0], X_train.shape[1], 1))  # reshaping the data to 3d
    X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))

    callbacks = [EarlyStopping(monitor='val_loss', patience=100)]

    model.fit(X_train,
              y_train,
              validation_split=0.2,
              epochs=1000,
              callbacks=callbacks,
              batch_size=5,
              shuffle=False)
    model.save('models_with_tuner\\' + col_name + '_model_100.h5')
    print(' ')
    print('saved the model successfully')
コード例 #7
0
## Step 2: Pass Neptune Logger to Tuner

import neptunecontrib.monitoring.kerastuner as npt_utils

tuner = BayesianOptimization(build_model,
                             objective='val_accuracy',
                             max_trials=10,
                             num_initial_points=3,
                             executions_per_trial=3,
                             project_name='bayesian-sweep',
                             logger=npt_utils.NeptuneLogger())

## Step 3: Run the search and monitor it in Neptune

tuner.search(x=x, y=y, epochs=5, validation_data=(val_x, val_y))

## Step 4: Log additional sweep information after the sweep

npt_utils.log_tuner_info(tuner)

## Step 5: Stop logging

# tests
exp = neptune.get_experiment()

neptune.stop()

# tests
all_logs = exp.get_logs()
コード例 #8
0
ファイル: tuner.py プロジェクト: IVPLatNU/DeepCovidXR
    TOTAL_TRIALS = 20
    EXECUTION_PER_TRIAL = 1
    EPOCHS = 50

    tuner = BayesianOptimization(
        hp,
        max_trials=TOTAL_TRIALS,
        objective=kerastuner.Objective("val_auc", direction="max"),
        executions_per_trial=EXECUTION_PER_TRIAL,
        directory=base_dir,
        project_name=exp_name
    )

    history = tuner.search(train_gen, 
                           epochs=EPOCHS,
                           validation_data=val_gen, 
                            callbacks = [es, cp],
                            verbose =2,
                            use_multiprocessing=False)

    # Save best model and weight
    best_model = tuner.get_best_models()[0]
    best_config = best_model.optimizer.get_config()

    best_hyperparameters = tuner.get_best_hyperparameters()[0].get_config()
    best_hyperparameters_values = tuner.get_best_hyperparameters()[0].values

    best_model.save(model_dir)
    best_model.save_weights(weight_dir)

    with open(os.path.join(param_dir, 'hyperparameters.txt'), "w") as text_file:
        text_file.write(str(best_hyperparameters))
コード例 #9
0

# This is where we set the searching strategy
tuner = BayesianOptimization(build_model,
                             objective='val_mae',
                             seed=40,
                             max_trials=500,
                             executions_per_trial=1,
                             directory=jobdir)

tuner.search_space_summary()

training_class.cache_validation()

# replacement for model.fit
tuner.search(
    training_class.local_generator,
    validation_data=training_class.local_test_generator,
    steps_per_epoch=training_class.steps_per_epoch,
    epochs=training_class.epochs,
    max_queue_size=4,  # 32,
    workers=training_class.workers,
    shuffle=False,
    use_multiprocessing=True,
    callbacks=training_class.callbacks_list,
    initial_epoch=0,
)

tuner.results_summary()
pickle.dump(tuner, open(os.path.join(jobdir, "result.pkl"), "wb"))
コード例 #10
0
            layers.Dense(
                hp.Int(f"Dense {j} units",
                       min_value=128,
                       max_value=840,
                       step=32)))
        model.add(Activation('relu'))

    model.add(layers.Dense(5, activation='softmax'))

    model.summary()

    model.compile(optimizer=Adamax(
        hp.Choice('learning_rate', values=[1e-3, 1e-4])),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])

    return model


tuner = BayesianOptimization(build_model,
                             objective="val_accuracy",
                             max_trials=6,
                             executions_per_trial=1,
                             directory=Log_dir)

tuner.search(train_generator,
             epochs=8,
             batch_size=BATCH_SIZE,
             validation_data=val_generator,
             steps_per_epoch=450,
             validation_steps=len(val_generator) // BATCH_SIZE)
コード例 #11
0
                     default=1e-3)),
                      loss='mse',
                      metrics=['mse'])
        return model


hypermodel = RGModel(n_hidden=2)

HYPERBAND_MAX_EPOCHS = 40
MAX_TRIALS = 40
EXECUTION_PER_TRIAL = 4

tuner = BayesianOptimization(hypermodel,
                             objective='val_mean_squared_error',
                             seed=1,
                             max_trials=MAX_TRIALS,
                             executions_per_trial=EXECUTION_PER_TRIAL,
                             directory='random_search',
                             project_name='RGBFV8')

print(tuner.search_space_summary())

N_EPOCH_SEARCH = 10
# train_generator, steps_per_epoch=200, epochs=60, validation_data=validation_generator
tuner.search(train_gen_bf, epochs=N_EPOCH_SEARCH, validation_data=valid_gen_bf)

print(tuner.results_summary())

best_model = tuner.get_best_models(num_models=1)[0]
best_model.save('/DFS-L/DATA/pritchard/ankitesg/models/BFv12.h5')