コード例 #1
0
def define_tuners(hypermodel, directory, project_name):
    random_tuner = RandomSearch(
        hypermodel,
        objective="val_loss",
        seed=SEED,
        max_trials=MAX_TRIALS,
        executions_per_trial=EXECUTION_PER_TRIAL,
        directory=f"{directory}_random_search",
        project_name=project_name,
    )
    hyperband_tuner = Hyperband(
        hypermodel,
        max_epochs=HYPERBAND_MAX_EPOCHS,
        objective="val_loss",
        seed=SEED,
        executions_per_trial=EXECUTION_PER_TRIAL,
        directory=f"{directory}_hyperband",
        project_name=project_name,
    )
    bayesian_tuner = BayesianOptimization(
        hypermodel,
        objective='val_loss',
        seed=SEED,
        num_initial_points=BAYESIAN_NUM_INITIAL_POINTS,
        max_trials=MAX_TRIALS,
        directory=f"{directory}_bayesian",
        project_name=project_name
    )
    return [random_tuner, hyperband_tuner, bayesian_tuner]
コード例 #2
0
def hyperparam_optimization( x, y ) :
    nBits = x.shape[1]
    frac_test = 0.3
    x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=frac_test )

    TRAIN_EPOCHS = 20
    MAX_TRIALS=20
    EXECUTIONS_PER_TRIAL = 5

    b_tuner = BayesianOptimization(
        make_optimizer_model(nBits),
        objective = 'val_mean_squared_error',
        max_trials = MAX_TRIALS,
        executions_per_trial=EXECUTIONS_PER_TRIAL,
        directory='test_dir',
        project_name='tune_optimizer',
        seed=1
    )
    b_tuner.search_space_summary()

    b_tuner.search( x=x_train, y=y_train, epochs=TRAIN_EPOCHS, validation_data=(x_test, y_test))
    b_tuner.results_summary()
    best_model = b_tuner.get_best_models()[0]

    return best_model
コード例 #3
0
def run_hyperparameter_tuning():
    hypermodel = NNTSMLPModel(5, 1)

    tuner = MyTunner(
        BayesianOptimization(hypermodel=hypermodel,
                             objective=Objective('loss', 'min'),
                             max_trials=2))
    # BayesianOptimization(
    #     hypermodel,
    #     objective='val_loss',
    #     seed=SEED,
    #     num_initial_points=BAYESIAN_NUM_INITIAL_POINTS,

    data_raw = load_data_raw()
    # transform series into supervised format
    data_train = series_to_supervised(data_raw, n_in=5)
    tuner_evaluation(tuner, data_train)
コード例 #4
0
    def bayesian(self):
        tuner = BayesianOptimization(
            self.build_model,
            objective='mean_squared_error',
            max_trials=self.max_trials,  # more than 2 and it crashes
            num_initial_points=self.initial_points,
            seed=self.seed,
            overwrite=True,
            directory=self.kt_dir)

        tuner.search(
            x=self.data,
            y=self.train_labels,
            epochs=self.epochs,
            batch_size=self.batch_size,
            validation_data=(self.test_data, self.test_labels),
        )

        return tuner
コード例 #5
0
def train(args):
    print(args)
    global_conf.config_tf2(args)
    checkpoint_dir, log_dir, export_dir = create_env_directories(
        args, get_experiment_name(args))

    train_dataset = dataloader.get_dataset(
        args['dataloader'],
        transformation_list=args['dataloader']['train_list'],
        num_classes=args["num_classes"],
        split=args['dataloader']['train_split_id'])
    val_dataset = dataloader.get_dataset(
        args['dataloader'],
        transformation_list=args['dataloader']['val_list'],
        num_classes=args["num_classes"],
        split=args['dataloader']['val_split_id'])

    setup_mp(args)
    build_model_fn = get_model(args)
    callbacks = get_callbacks(args, log_dir)

    # tuner = Hyperband(build_model_fn,
    #                   objective='val_accuracy',
    #                   max_epochs=args['num_epochs'],
    #                   hyperband_iterations=10e100,
    #                   directory=checkpoint_dir)

    tuner = BayesianOptimization(build_model_fn,
                                 objective='val_accuracy',
                                 max_trials=100000,
                                 num_initial_points=10,
                                 directory=checkpoint_dir)

    tuner.search_space_summary()
    tuner.search(x=train_dataset,
                 validation_data=val_dataset,
                 callbacks=callbacks,
                 epochs=args['num_epochs'])
    tuner.results_summary()
コード例 #6
0
        best_model = tuner.get_best_models(num_models=1)[0]

        # Evaluate the best model.
        loss, accuracy = best_model.evaluate(test_data,
                                             test_labels,
                                             sample_weight=test_weights)
        return elapsed_time, loss, accuracy

    NUM_CLASSES = 17
    INPUT_SHAPE = shape

    hypermodel = CNNHyperModel(input_shape=INPUT_SHAPE,
                               num_classes=NUM_CLASSES)

    tuner = BayesianOptimization(hypermodel,
                                 objective='val_accuracy',
                                 max_trials=500,
                                 num_initial_points=2,
                                 seed=2,
                                 directory="SALINAS_unet_sep_random_search",
                                 overwrite=True)

    results = []

    elapsed_time, loss, accuracy = tuner_evaluation(tuner, generator, val_data,
                                                    val_labels, val_weights)
    logger.info(
        f"Elapsed time = {elapsed_time:10.4f} s, accuracy = {accuracy}, loss = {loss}"
    )
    results.append([elapsed_time, loss, accuracy])
    logger.info(results)
コード例 #7
0
    def __init__(self,
                 model,
                 metrics=None,
                 custom_objects=None,
                 goal=None,
                 output_dir="result",
                 mode="random",
                 transfer_weights=False,
                 frozen_layers=None,
                 activation_bits=4,
                 limit=None,
                 tune_filters="none",
                 tune_filters_exceptions=None,
                 learning_rate_optimizer=False,
                 layer_indexes=None,
                 quantization_config=None,
                 overwrite=True,
                 **tuner_kwargs):

        if not metrics:
            metrics = []

        if not custom_objects:
            custom_objects = {}

        # goal: { "type": ["bits", "energy"], "params": {...} } or ForgivingFactor
        #   type
        # For type == "bits":
        #   delta_p: increment (in %) of the accuracy if trial is smaller.
        #   delta_n: decrement (in %) of the accuracy if trial is bigger.
        #   rate: rate of decrease/increase in model size in terms of bits.
        #   input_bits; size of input tensors.
        #   output_bits; size of output tensors.
        #   stress: parameter to reduce reference size to force tuner to
        #     choose smaller models.
        #   config: configuration on what to compute for each layer
        #     minimum configuration is { "default": ["parameters", "activations"] }

        # use simplest one - number of bits
        if not goal:
            goal = {
                "type": "bits",
                "params": {
                    "delta_p": 8.0,
                    "delta_n": 8.0,
                    "rate": 2.0,
                    "stress": 1.0,
                    "input_bits": 8,
                    "output_bits": 8,
                    "ref_bits": 8,
                    "config": {
                        "default": ["parameters", "activations"]
                    }
                }
            }

        self.overwrite = overwrite

        # if we have not created it already, create new one.
        if not isinstance(goal, ForgivingFactor):
            target = forgiving_factor[goal["type"]](**goal["params"])
        else:
            target = goal

        # if no metrics were specified, we want to make sure we monitor at least
        # accuracy.
        if not metrics:
            metrics = ["acc"]

        self.hypermodel = AutoQKHyperModel(
            model,
            metrics,
            custom_objects,
            target,
            transfer_weights=transfer_weights,
            frozen_layers=frozen_layers,
            activation_bits=activation_bits,
            limit=limit,
            tune_filters=tune_filters,
            tune_filters_exceptions=tune_filters_exceptions,
            layer_indexes=layer_indexes,
            learning_rate_optimizer=learning_rate_optimizer,
            quantization_config=quantization_config)

        # right now we create unique results directory
        idx = 0
        name = output_dir
        if self.overwrite:
            while os.path.exists(name):
                idx += 1
                name = output_dir + "_" + str(idx)
        output_dir = name
        self.output_dir = output_dir

        # let's ignore mode for now
        assert mode in ["random", "bayesian", "hyperband"]
        if mode == "random":
            self.tuner = RandomSearch(self.hypermodel,
                                      objective=kt.Objective(
                                          "val_score", "max"),
                                      project_name=output_dir,
                                      **tuner_kwargs)
        elif mode == "bayesian":
            self.tuner = BayesianOptimization(self.hypermodel,
                                              objective=kt.Objective(
                                                  "val_score", "max"),
                                              project_name=output_dir,
                                              **tuner_kwargs)
        elif mode == "hyperband":
            self.tuner = Hyperband(self.hypermodel,
                                   objective=kt.Objective("val_score", "max"),
                                   project_name=output_dir,
                                   **tuner_kwargs)
        else:
            pass

        self.tuner.search_space_summary()
コード例 #8
0
def BuildDNN(train_samples,
             dev_samples,
             test_samples,
             norm_id,
             model_path,
             lags=None,
             seed=None,
             batch_size=512,
             n_epochs=5,
             max_trials=5,
             executions_per_trial=3,
             max_hidden_layers=3,
             min_units=16,
             max_units=64,
             unit_step=16,
             min_droprate=0.0,
             max_droprate=0.5,
             droprate_step=0.05,
             min_learnrate=1e-4,
             max_learnrate=1e-1,
             n_tune_epochs=5,
             cast_to_zero=True,
             early_stop=True,
             early_stop_patience=10,
             retrain=False,
             warm_up=False,
             initial_epoch=None,
             measurement_time='day',
             measurement_unit='$m^3/s$'):
    if not os.path.exists(model_path):
        os.makedirs(model_path)

    setting_info = {
        "model_path": model_path,
        "lags": lags,
        "seed": seed,
        "batch_size": batch_size,
        "n_epoch": n_epochs,
        "max_trials": max_trials,
        "executions_per_trial": executions_per_trial,
        "max_hidden_layers": max_hidden_layers,
        "min_units": min_units,
        "max_units": max_units,
        "unit_step": unit_step,
        "min_droprate": min_droprate,
        "max_droprate": max_droprate,
        "droprate_step": droprate_step,
        "min_learnrate": min_learnrate,
        "max_learnrate": max_learnrate,
        "n_tune_epochs": n_tune_epochs,
        "cast_to_zero": cast_to_zero,
        "early_stop": early_stop,
        "early_stop_patience": early_stop_patience,
        "retrain": retrain,
    }

    with open(model_path + 'setting.json', 'w') as outfile:
        json.dump(setting_info, outfile)

    sMin = norm_id['series_min']
    sMax = norm_id['series_max']
    # sMin = train_samples.min(axis=0)
    # sMax = train_samples.max(axis=0)
    # train_samples = 2*(train_samples-sMin)/(sMax-sMin)-1
    # dev_samples = 2*(dev_samples-sMin)/(sMax-sMin)-1
    # test_samples = 2*(test_samples-sMin)/(sMax-sMin)-1
    cal_samples = pd.concat([train_samples, dev_samples], axis=0)
    cal_samples = cal_samples.sample(frac=1)
    cal_samples = cal_samples.reset_index(drop=True)
    train_samples = cal_samples.iloc[:train_samples.shape[0]]
    dev_samples = cal_samples.iloc[train_samples.shape[0]:]
    X = cal_samples
    y = (cal_samples.pop('Y')).values
    train_x = train_samples
    train_y = train_samples.pop('Y')
    train_y = train_y.values
    dev_x = dev_samples
    dev_y = dev_samples.pop('Y')
    dev_y = dev_y.values
    test_x = test_samples
    test_y = test_samples.pop('Y')
    test_y = test_y.values

    # Config path to save optimal results
    opt_path = model_path + '\\optimal\\'
    cp_path = model_path + '\\optimal\\checkpoints\\'
    if not os.path.exists(cp_path):
        os.makedirs(cp_path)
    # restore only the latest checkpoint after every update
    checkpoint_path = cp_path + 'cp.h5'
    checkpoint_dir = os.path.dirname(checkpoint_path)
    # Define callbacks
    cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path,
                                                  save_best_only=True,
                                                  mode='min',
                                                  save_weights_only=True,
                                                  verbose=1)
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  min_lr=0.00001,
                                                  factor=0.2,
                                                  verbose=1,
                                                  patience=10,
                                                  mode='min')
    early_stopping = keras.callbacks.EarlyStopping(
        monitor='val_loss',
        mode='min',
        verbose=1,
        patience=early_stop_patience,
        restore_best_weights=True)

    def build_model(hp):
        input_shape = (train_x.shape[1], )
        model = keras.Sequential()
        num_layers = hp.Int('num_layers',
                            min_value=1,
                            max_value=max_hidden_layers,
                            step=1,
                            default=1)
        for i in range(num_layers):
            units = hp.Int('units_' + str(i),
                           min_value=min_units,
                           max_value=max_units,
                           step=unit_step)
            dropout_rate = hp.Float('drop_rate_' + str(i),
                                    min_value=min_droprate,
                                    max_value=max_droprate,
                                    step=droprate_step)
            if i == 0:
                model.add(
                    layers.Dense(units=units,
                                 activation='relu',
                                 input_shape=input_shape))
            else:
                model.add(layers.Dense(units=units, activation='relu'))
            model.add(
                layers.Dropout(rate=dropout_rate, noise_shape=None, seed=seed))
        model.add(layers.Dense(1))
        model.compile(optimizer=keras.optimizers.Adam(
            hp.Float('learning_rate',
                     min_value=min_learnrate,
                     max_value=max_learnrate,
                     sampling='LOG',
                     default=1e-2)),
                      loss='mean_squared_error',
                      metrics=['mean_absolute_error', 'mean_squared_error'])
        return model

    tuner = BayesianOptimization(build_model,
                                 objective='mean_squared_error',
                                 max_trials=max_trials,
                                 executions_per_trial=executions_per_trial,
                                 directory=model_path,
                                 project_name='BayesianOpt')

    tuner.search_space_summary()
    start = time.process_time()
    tuner.search(x=train_x,
                 y=train_y,
                 epochs=n_tune_epochs,
                 validation_data=(dev_x, dev_y),
                 callbacks=[early_stopping])
    end = time.process_time()
    time_cost = end - start
    tuner.results_summary()
    best_hps = tuner.oracle.get_best_trials(num_trials=1)[0].hyperparameters
    model = build_model(best_hps)

    if retrain or not os.path.exists(checkpoint_path):
        history = model.fit(X,
                            y,
                            epochs=n_epochs,
                            batch_size=batch_size,
                            validation_data=(X, y),
                            verbose=1,
                            callbacks=[
                                cp_callback,
                                early_stopping,
                            ])
        hist = pd.DataFrame(history.history)
        hist.to_csv(opt_path + 'PARAMS-CAL-HISTORY.csv')
        plot_history(history, opt_path + 'MAE-HISTORY.png',
                     opt_path + 'MSE-HISTORY.png')
    else:
        model.load_weights(checkpoint_path)

    train_predictions = model.predict(train_x).flatten()
    dev_predictions = model.predict(dev_x).flatten()
    test_predictions = model.predict(test_x).flatten()
    sMax = sMax[sMax.shape[0] - 1]
    sMin = sMin[sMin.shape[0] - 1]
    train_y = np.multiply(train_y + 1, sMax - sMin) / 2 + sMin
    dev_y = np.multiply(dev_y + 1, sMax - sMin) / 2 + sMin
    test_y = np.multiply(test_y + 1, sMax - sMin) / 2 + sMin
    train_predictions = np.multiply(train_predictions + 1,
                                    sMax - sMin) / 2 + sMin
    dev_predictions = np.multiply(dev_predictions + 1, sMax - sMin) / 2 + sMin
    test_predictions = np.multiply(test_predictions + 1,
                                   sMax - sMin) / 2 + sMin
    if cast_to_zero:
        train_predictions[train_predictions < 0.0] = 0.0
        dev_predictions[dev_predictions < 0.0] = 0.0
        test_predictions[test_predictions < 0.0] = 0.0
    dump_pred_results(
        path=opt_path + '/opt_pred.csv',
        train_y=train_y,
        train_predictions=train_predictions,
        dev_y=dev_y,
        dev_predictions=dev_predictions,
        test_y=test_y,
        test_predictions=test_predictions,
        time_cost=time_cost,
    )
    plot_rela_pred(train_y,
                   train_predictions,
                   measurement_time=measurement_time,
                   measurement_unit=measurement_unit,
                   fig_savepath=opt_path + 'TRAIN-PRED.png')
    plot_rela_pred(dev_y,
                   dev_predictions,
                   measurement_time=measurement_time,
                   measurement_unit=measurement_unit,
                   fig_savepath=opt_path + "DEV-PRED.png")
    plot_rela_pred(test_y,
                   test_predictions,
                   measurement_time=measurement_time,
                   measurement_unit=measurement_unit,
                   fig_savepath=opt_path + "TEST-PRED.png")
    plot_error_distribution(test_predictions, test_y,
                            opt_path + 'TEST-ERROR-DSTRI.png')
    plt.show()
コード例 #9
0
        # Evaluate the best model.
        loss, accuracy = best_model.evaluate(test_data,
                                             test_labels,
                                             sample_weight=test_weights)
        return elapsed_time, loss, accuracy

    NUM_CLASSES = 3
    INPUT_SHAPE = shape

    hypermodel = CNNHyperModel(input_shape=INPUT_SHAPE,
                               num_classes=NUM_CLASSES)

    #tuner = RandomSearch(hypermodel, objective='accuracy', max_trials=500, seed=2, directory = "SPARCS_unet_random_search", max_model_size=100000000, overwrite=True)
    tuner = BayesianOptimization(hypermodel,
                                 objective='accuracy',
                                 max_trials=500,
                                 num_initial_points=5,
                                 seed=2,
                                 directory="SPARCS_unet_random_search",
                                 max_model_size=100000000,
                                 overwrite=True)

    results = []

    elapsed_time, loss, accuracy = tuner_evaluation(tuner, generator, val_data,
                                                    val_labels, val_weights)
    logger.info(
        f"Elapsed time = {elapsed_time:10.4f} s, accuracy = {accuracy}, loss = {loss}"
    )
    results.append([elapsed_time, loss, accuracy])
    logger.info(results)
コード例 #10
0
def predict_rem(col_name):
    print(
        'Creating a best model for predicting {} MU and Checking its accuracy with test data'
        .format(col_name))
    import pandas as pd
    import warnings

    warnings.filterwarnings("ignore")

    df3 = pd.read_csv('pycharm_data.csv')
    df3 = df3.drop(columns=['Unnamed: 0', 'date'])
    new_df = df3[col_name].copy()
    new_df

    # create a differenced series
    from pandas import Series

    def difference(dataset, interval=1):
        diff = list()
        for i in range(interval, len(dataset)):
            value = dataset[i] - dataset[i - interval]
            diff.append(value)
        return Series(diff)

    differenced = difference(new_df, 1)
    differenced.head()

    X = []
    y = []
    for i in range(0, differenced.shape[0] - 48):
        X.append(
            differenced.iloc[i:i +
                             48])  # taking 48 rows for training incrementally
        y.append(differenced.iloc[i + 48])

    import numpy as np
    X, y = np.array(X, dtype=np.float32), np.array(y, dtype=np.float32)
    y = np.reshape(y, (len(y), 1))

    X_train, X_test = X[:-480], X[-480:]
    y_train, y_test = y[:-480], y[-480:]

    for i in range(len(y_test)):
        y_test_diff = y_test[i] + new_df[1786 + i]

    X_train = np.reshape(
        X_train,
        (X_train.shape[0], X_train.shape[1], 1))  # reshaping the data to 3d
    X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))

    import warnings
    warnings.filterwarnings("ignore")
    import keras
    from keras.layers import LSTM, Activation, Dense
    from keras import optimizers
    from keras.callbacks import ModelCheckpoint, EarlyStopping
    from keras.layers import ReLU, LeakyReLU
    import h5py
    from keras.models import load_model
    from keras.models import Sequential
    from keras.layers import Dense
    from keras.layers import Dropout
    from keras.layers import LSTM
    from kerastuner.tuners import BayesianOptimization

    def build_model(hp):
        for i in range(hp.Int('num_layers', 2, 5)):
            model = keras.Sequential()
            model.add(
                LSTM(units=hp.Int(
                    'units_' + str(i), min_value=32, max_value=300, step=32)))
            model.add(ReLU())
            model.add(
                Dropout(rate=hp.Float('dropout_1',
                                      min_value=0.1,
                                      max_value=0.5,
                                      default=0.1,
                                      step=0.05)))
            model.add(Dense(1, activation='linear'))
            model.compile(optimizer=keras.optimizers.Adam(
                hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
                          loss='mean_absolute_error',
                          metrics=['mse'])
        return model

    bayesian_opt_tuner = BayesianOptimization(build_model,
                                              objective='mse',
                                              max_trials=3,
                                              executions_per_trial=1,
                                              overwrite=True)

    bayesian_opt_tuner.search(X_train,
                              y_train,
                              epochs=1000,
                              validation_data=(X_test, y_test),
                              callbacks=[
                                  keras.callbacks.EarlyStopping(
                                      monitor='val_loss', patience=100)
                              ],
                              verbose=0,
                              batch_size=5)

    best_model = bayesian_opt_tuner.get_best_models(num_models=1)
    model = best_model[0]

    predictions = model.predict(X_test)
    final_pred = []
    for i in range(len(predictions)):
        y_test_diff = predictions[i] + new_df[1786 + i]
        final_pred.append(y_test_diff[0])
    final_pred

    y_test_orig = []
    for i in range(len(y_test)):
        y_test_diff = y_test[i] + new_df[1786 + i]
        y_test_orig.append(y_test_diff[0])
    y_test_orig

    total = 0
    for i, j in zip(y_test_orig, final_pred):
        value = abs(i - j) / abs(i)
        total += value
    error = float(total * 100 / (len(y_test_orig)))  # calculate mape
    mape = round(error, 1)  # round to 3 significant figures
    accuracy = 100 - mape  # Calculate accuracy

    import matplotlib.pyplot as plt
    print("The LSTM's accuracy in predicting the mega units on test data : " +
          str(accuracy) + "%")
    print(' ')
    df = pd.DataFrame(y_test_orig, final_pred).reset_index()
    df.columns = ['original', 'predicted']
    df.plot(title='Original data vs Predicted data (for test data)')
    plt.show()
    model.save('models_with_tuner\\' + col_name + '_model_75.h5')
    print(' ')
    print(
        '================================================================================'
    )

    print('')
    print('Now training the model with 100% data for future predictions....')
    X_train = X
    y_train = y
    X_train = np.reshape(
        X_train,
        (X_train.shape[0], X_train.shape[1], 1))  # reshaping the data to 3d
    X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))

    callbacks = [EarlyStopping(monitor='val_loss', patience=100)]

    model.fit(X_train,
              y_train,
              validation_split=0.2,
              epochs=1000,
              callbacks=callbacks,
              batch_size=5,
              shuffle=False)
    model.save('models_with_tuner\\' + col_name + '_model_100.h5')
    print(' ')
    print('saved the model successfully')
コード例 #11
0
             project_qualified_name='shared/keras-tuner-integration')

# Quickstart

## Step 1: Create an Experiment

neptune.create_experiment('bayesian-sweep')

## Step 2: Pass Neptune Logger to Tuner

import neptunecontrib.monitoring.kerastuner as npt_utils

tuner = BayesianOptimization(build_model,
                             objective='val_accuracy',
                             max_trials=10,
                             num_initial_points=3,
                             executions_per_trial=3,
                             project_name='bayesian-sweep',
                             logger=npt_utils.NeptuneLogger())

## Step 3: Run the search and monitor it in Neptune

tuner.search(x=x, y=y, epochs=5, validation_data=(val_x, val_y))

## Step 4: Log additional sweep information after the sweep

npt_utils.log_tuner_info(tuner)

## Step 5: Stop logging

# tests
コード例 #12
0
  model.add(Dense(total_words/2, activation='relu', kernel_regularizer=regularizers.l2(0.01))) #regulizer to avoid overfitting
  # final dense layer => the size of total words to predict
  model.add(Dense(total_words, activation='softmax'))

  model.compile(
    optimizer='adam', #with default learnig rate 
    loss='categorical_crossentropy', #our loss function
    metrics=['accuracy'])
  return model

from kerastuner.tuners import BayesianOptimization

tuner = BayesianOptimization(
    build_model,
    objective='val_accuracy',
    max_trials=5,
    executions_per_trial=1,
    directory='experiments',
    project_name='test2')

tuner.search_space_summary()

history = tuner.search(x = X_train, y = y_train, validation_data=(X_test, y_test), epochs = 10)

tuner.get_best_models(num_models=2)

tuner.results_summary()

'''my_callbacks = [
    #tf.keras.callbacks.EarlyStopping(patience=2),
    tf.keras.callbacks.ModelCheckpoint(filepath='model.{epoch:02d}-{val_loss:.2f}.h5'),
コード例 #13
0
    local_size = training_class.local_generator.get_input_size()

    input_img = Input(shape=local_size)
    training_class.local_model = Model(
        input_img, training_class.network_obj(input_img, hp))

    training_class.compile()

    return training_class.local_model


# This is where we set the searching strategy
tuner = BayesianOptimization(build_model,
                             objective='val_mae',
                             seed=40,
                             max_trials=500,
                             executions_per_trial=1,
                             directory=jobdir)

tuner.search_space_summary()

training_class.cache_validation()

# replacement for model.fit
tuner.search(
    training_class.local_generator,
    validation_data=training_class.local_test_generator,
    steps_per_epoch=training_class.steps_per_epoch,
    epochs=training_class.epochs,
    max_queue_size=4,  # 32,
    workers=training_class.workers,
コード例 #14
0
            layers.Dense(
                hp.Int(f"Dense {j} units",
                       min_value=128,
                       max_value=840,
                       step=32)))
        model.add(Activation('relu'))

    model.add(layers.Dense(5, activation='softmax'))

    model.summary()

    model.compile(optimizer=Adamax(
        hp.Choice('learning_rate', values=[1e-3, 1e-4])),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])

    return model


tuner = BayesianOptimization(build_model,
                             objective="val_accuracy",
                             max_trials=6,
                             executions_per_trial=1,
                             directory=Log_dir)

tuner.search(train_generator,
             epochs=8,
             batch_size=BATCH_SIZE,
             validation_data=val_generator,
             steps_per_epoch=450,
             validation_steps=len(val_generator) // BATCH_SIZE)
            objective=config['HP_Optimization_CrossingDetection_Shuffle']['tuner']['objetive'],
            seed=config['HP_Optimization_CrossingDetection_Shuffle']['tuner']['seed'],
            max_epochs=config['HP_Optimization_CrossingDetection_Shuffle']['tuner']['max_epochs'],
            executions_per_trial=config['HP_Optimization_CrossingDetection_Shuffle']['tuner']['executions_per_trial'],
            directory=path_output_results_FT,
            project_name=project_name,
            overwrite=False
        )

    else:

        tuner = BayesianOptimization(
            hypermodel_ft,
            objective=config['HP_Optimization_CrossingDetection_Shuffle']['tuner']['objetive'],
            seed=config['HP_Optimization_CrossingDetection_Shuffle']['tuner']['seed'],
            max_trials=config['HP_Optimization_CrossingDetection_Shuffle']['tuner']['max_trials'],
            num_initial_points=config['HP_Optimization_CrossingDetection_Shuffle']['tuner']['num_initial_points'],
            directory=path_output_results_FT,
            project_name=project_name,
            overwrite=False
        )


    earlystopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='min', restore_best_weights=True)

    reducelronplateau = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, verbose=1, mode='min', min_delta=0.0001, cooldown=0, min_lr=0)

    tuner.search_space_summary()

    start_time = time.time()

    tuner.search(x=train_generator, validation_data=validation_generator, epochs=epochs, callbacks=[earlystopping, reducelronplateau])
コード例 #16
0
random_model = random_tuner.hypermodel.build(params)

random_model.fit(X.values, y.values.flatten(), epochs=15)

random_accuracy_df = pd.DataFrame(random_model.history.history)

random_accuracy_df[['loss', 'accuracy']].plot()
plt.title('Loss & Accuracy Per EPOCH For Random Model')
plt.xlabel('EPOCH')
plt.ylabel('Accruacy')
plt.show()

bayesian_tuner = BayesianOptimization(hypermodel,
                                      objective='accuracy',
                                      max_trials=10,
                                      seed=10,
                                      project_name='divorce test')

bayesian_tuner.search(X_train.values,
                      y_train.values.flatten(),
                      epochs=10,
                      validation_data=(X_test.values, y_test.values.flatten()))

bayesian_params = bayesian_tuner.get_best_hyperparameters()[0]

bayesian_model = bayesian_tuner.hypermodel.build(bayesian_params)

bayesian_model.fit(X.values, y.values.flatten(), epochs=15)

bayesian_accuracy_df = pd.DataFrame(bayesian_model.history.history)
コード例 #17
0
ファイル: tuner.py プロジェクト: IVPLatNU/DeepCovidXR
    patience_es = 10
    
    es = features.setES(monitor, patience_es, min_delta)
    cp = features.setCP(monitor, unfreeze_dir)

    hp = hyperModel(base, freeze_dir)

    TOTAL_TRIALS = 20
    EXECUTION_PER_TRIAL = 1
    EPOCHS = 50

    tuner = BayesianOptimization(
        hp,
        max_trials=TOTAL_TRIALS,
        objective=kerastuner.Objective("val_auc", direction="max"),
        executions_per_trial=EXECUTION_PER_TRIAL,
        directory=base_dir,
        project_name=exp_name
    )

    history = tuner.search(train_gen, 
                           epochs=EPOCHS,
                           validation_data=val_gen, 
                            callbacks = [es, cp],
                            verbose =2,
                            use_multiprocessing=False)

    # Save best model and weight
    best_model = tuner.get_best_models()[0]
    best_config = best_model.optimizer.get_config()
コード例 #18
0
                     default=1e-3)),
                      loss='mse',
                      metrics=['mse'])
        return model


hypermodel = RGModel(n_hidden=2)

HYPERBAND_MAX_EPOCHS = 40
MAX_TRIALS = 40
EXECUTION_PER_TRIAL = 4

tuner = BayesianOptimization(hypermodel,
                             objective='val_mean_squared_error',
                             seed=1,
                             max_trials=MAX_TRIALS,
                             executions_per_trial=EXECUTION_PER_TRIAL,
                             directory='random_search',
                             project_name='RGBFV8')

print(tuner.search_space_summary())

N_EPOCH_SEARCH = 10
# train_generator, steps_per_epoch=200, epochs=60, validation_data=validation_generator
tuner.search(train_gen_bf, epochs=N_EPOCH_SEARCH, validation_data=valid_gen_bf)

print(tuner.results_summary())

best_model = tuner.get_best_models(num_models=1)[0]
best_model.save('/DFS-L/DATA/pritchard/ankitesg/models/BFv12.h5')