def search_bestCNN(self,
                    X,
                    Y,
                    testX,
                    testY,
                    epochs=50,
                    max_trails=20,
                    batch_size=64,
                    project_name='A1'):
     tuner = RandomSearch(self._build_CNN,
                          objective='val_accuracy',
                          max_trials=max_trails,
                          executions_per_trial=1,
                          directory='tunerlog',
                          project_name=project_name)
     tuner.search(x=X,
                  y=Y,
                  epochs=epochs,
                  batch_size=batch_size,
                  validation_data=(testX, testY),
                  callbacks=[
                      tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                       patience=5)
                  ],
                  verbose=2)
     tuner.search_space_summary()
     print(tuner.results_summary())
     print('best_hyperparameters')
     print(tuner.get_best_hyperparameters()[0].values)
     return tuner.get_best_models()
Example #2
0
def train_resnet50():

    # generating data from existing images
    train_datagenerator, test_datagenerator = data_generator()

    # getting the model and callback from model.py
    lr_reduction = ReduceLROnPlateau(monitor='val_loss',
                                     factor=0.1,
                                     patience=3,
                                     min_lr=1e-5)

    tuner = RandomSearch(project_name=os.path.join(LOGS, 'trial_2/resnet_50'),
                         max_trials=3,
                         executions_per_trial=5,
                         hypermodel=vgg_16,
                         objective='val_accuracy')
    tuner.search(train_datagenerator,
                 epochs=10,
                 callbacks=[lr_reduction],
                 validation_data=test_datagenerator)
    best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]

    model = tuner.hypermodel.build(best_hps)
    model.fit_generator(train_datagenerator,
                        epochs=EPOCHS,
                        validation_data=test_datagenerator)

    return model
Example #3
0
        def build_model(hp):
            x_train = np.random.random((100, 28, 28))
            y_train = np.random.randint(10, size=(100, 1))
            x_test = np.random.random((20, 28, 28))
            y_test = np.random.randint(10, size=(20, 1))

            model = tf.keras.models.Sequential([
                tf.keras.layers.Flatten(input_shape=(28, 28)),
                tf.keras.layers.Dense(128, activation='relu'),
                tf.keras.layers.Dropout(hp.Choice('dropout_rate', values=[0.2, 0.4])),
                tf.keras.layers.Dense(10, activation='softmax')
            ])

            model.compile(
                optimizer='adam',
                loss='sparse_categorical_crossentropy',
                metrics=['accuracy'])

            return model

            tuner = RandomSearch(build_model, objective='accuracy', max_trials=1, executions_per_trial=1, seed=1)

            tuner.search(x_train, y_train, epochs=1)

            self.assertEqual(0.4, tuner.get_best_hyperparameters(1)[0].get('dropout_rate'))
Example #4
0
def tuneCNN(
        X_train,
        X_test,
        height,
        width,
        num_classes,
        patience=1,
        executions_per_trial=1,
        seed=42,
        max_trials=3,
        objective='val_accuracy',
        directory='my_dir',
        epochs=10,
        verbose=0,
        test_size=0.2):
    # creates hypermodel object based on the num_classes and the input shape
    hypermodel = CNNHyperModel(input_shape=(
        height, width, 3), num_classes=num_classes)

    # # tuners, establish the object to look through the tuner search space
    tuner = RandomSearch(
        hypermodel,
        objective=objective,
        seed=seed,
        max_trials=max_trials,
        executions_per_trial=executions_per_trial,
        directory=directory,
    )


    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model

    tuner.search(X_train,
                 validation_data=X_test,
                 callbacks=[tf.keras.callbacks.EarlyStopping(patience=patience)],
                 epochs=epochs,
                 verbose=verbose)

    # best hyperparamters
    hyp = tuner.get_best_hyperparameters(num_trials=1)[0]
    #hyp = tuner.oracle.get_best_trials(num_trials=1)[0].hyperparameters.values
    #best_hps = np.stack(hyp).astype(None)
    history = tuner_hist(
        X_train,
        X_test,
        tuner,
        hyp,
        img=1,
        epochs=epochs,
        verbose=verbose,
        test_size=test_size)

    """
    Return:
        models[0] : best model obtained after tuning
        best_hps : best Hyperprameters obtained after tuning, stored as array
        history : history of the data executed from the given model
    """
    return tuner.get_best_models(1)[0], hyp, history
def get_best_nn(data, num_output=1, **tuner_kw):
    """
    Find the "best" model based on `MyHyperModel` class.

    Parameters
    ----------
    data: numpy.array or similar
        The train and validation data to be used by the hyper parameter tuner.
    num_output: int, optional
        The number of outputs for our NN. 1 default for regression.
    tuner_kw: dictionary
        A dictionary of parameters to be  `RandomSearch` tuner.

    Returns
    -------
    The trained model instance with the "optimised" parameters.
    """
    # Load encoded data
    enc_data = data_encode(data, encoder='CatBoostEncoder')
    x_train_enc, y_train = enc_data.get('train_data')
    x_val_enc, y_val = enc_data.get('test_data')

    # Create an instance of the `MyHyperModel` class
    hyper_model = MyHyperModel(num_output=num_output,
                               nun_features=int(x_train_enc.shape[1]))

    # Default tuner params
    default_tuner_params = {
        'objective': 'val_loss',
        'max_trials': 10,
        'directory':
        'keras_tuner_output',  # Directory for logs, checkpoints, etc
        'project_name': 'sgsc'
    }  # Default is utils/keras_tuner_output

    # Update tuner params
    tuner_params = {**default_tuner_params, **tuner_kw}

    # Initialise tuner and run it
    tuner = RandomSearch(hyper_model, **tuner_params)
    # Check about seed!! We need to define it? or does it use numpy's by default?
    tuner.search(
        x_train_enc,
        y_train,
        epochs=5,  # Default number of epochs
        validation_data=(x_val_enc, y_val),
        verbose=0)

    # Get best model
    best_hp = tuner.get_best_hyperparameters()[0]
    best_model = tuner.hypermodel.build(best_hp)

    return best_model, best_hp
Example #6
0
def find_best_NN(x_train, y_train):
  tuner = RandomSearch(build_model, objective="loss", max_trials=10, executions_per_trial=1)
  print("\n\n\n")
  print('[INFO] start searching')
  tuner.search(x_train, y_train, batch_size=100, epochs=10, validation_split=0.2)
  print("\n\n\nRESULTS SUMMARY")
  tuner.results_summary()
  print("\n\n\n")
  print("\n\n\nHERE IS THE BEST MODEL\n\n\n")
  best_params = tuner.get_best_hyperparameters()[0]
  best_model = tuner.hypermodel.build(best_params)
  best_model.summary()
  return best_model
Example #7
0
def find_best_NN(x_train, y_train):
  # создаю тюнер, который сможет подобрать оптимальную архитектуру модели
  tuner = RandomSearch(build_model, objective="val_mae", max_trials=40, executions_per_trial=1,)
  print("\n\n\n")
  # начинается автоматический подбор гиперпараметров
  print('[INFO] start searching')
  tuner.search(x_train, y_train, batch_size=500, epochs=150, validation_split=0.3)
  # выбираем лучшую модель
  print("\n\n\nRESULTS SUMMARY")
  tuner.results_summary()
  print("\n\n\n")
  # получаем лучшую модель
  print("\n\n\nHERE IS THE BEST MODEL\n\n\n")
  best_params = tuner.get_best_hyperparameters()[0]
  best_model = tuner.hypermodel.build(best_params)
  best_model.summary()
  return best_model
Example #8
0
def tune():

    tuner = RandomSearch(tuner_model,
                         objective="val_accuracy",
                         max_trials=100,
                         executions_per_trial=1,
                         directory=LOG_DIR,
                         project_name='final_year_project')

    tuner.search(x=x_train,
                 y=y_train,
                 epochs=3,
                 batch_size=64,
                 validation_data=(x_test, y_test))

    with open("tuner.pkl", "wb") as f:
        pickle.dump(tuner, f)

    tuner = pickle.load(open("tuner.pkl", "rb"))

    print(tuner.get_best_hyperparameters()[0].values)
    print(tuner.results_summary())
    print(tuner.get_best_models()[0].summary())
Example #9
0
    return model



tuner = RandomSearch(
    build_model,
    objective='val_accuracy',
    max_trials=1,  # how many model variations to test?
    executions_per_trial=1,  # how many trials per variation? (same model could perform differently)
    directory=LOG_DIR)


tuner.search(x=x_train,
             y=y_train,
             verbose=2, # just slapping this here bc jupyter notebook. The console out was getting messy.
             epochs=1,
             batch_size=64,
             #callbacks=[tensorboard],  # if you have callbacks like tensorboard, they go here.
             validation_data=(x_test, y_test))


tuner.results_summary()
tuner.get_best_hyperparameters()[0].values
tuner.get_best_models()[0].summary()

with open(f"tuner_{int(time.time())}.pkl", "wb") as f:
    pickle.dump(tuner, f)


#TO LOAD DATA
#tuner = pickle.load(open("tuner_1576628824.pkl","rb"))
Example #10
0
def main():
    logging.getLogger().setLevel(logging.INFO)
    parser = argparse.ArgumentParser(description='Keras Tuner HP search')
    parser.add_argument('--epochs', type=int, default=1)
    parser.add_argument(
        '--steps-per-epoch', type=int,
        default=-1)  # if set to -1, don't override the normal calcs for this
    parser.add_argument('--tuner-proj', required=True)
    parser.add_argument('--bucket-name', required=True)
    parser.add_argument('--tuner-dir', required=True)
    parser.add_argument('--tuner-num', required=True)
    parser.add_argument('--respath', required=True)
    parser.add_argument('--executions-per-trial', type=int, default=2)
    parser.add_argument('--max-trials', type=int, default=20)
    parser.add_argument('--num-best-hps', type=int, default=2)
    parser.add_argument('--data-dir',
                        default='gs://aju-dev-demos-codelabs/bikes_weather/')

    args = parser.parse_args()
    logging.info('Tensorflow version %s', tf.__version__)

    TRAIN_DATA_PATTERN = args.data_dir + "train*"
    EVAL_DATA_PATTERN = args.data_dir + "test*"

    train_batch_size = TRAIN_BATCH_SIZE
    eval_batch_size = 1000
    if args.steps_per_epoch == -1:  # calc based on dataset size
        steps_per_epoch = NUM_EXAMPLES // train_batch_size
    else:
        steps_per_epoch = args.steps_per_epoch
    logging.info('using %s steps per epoch', steps_per_epoch)

    logging.info('using train batch size %s', train_batch_size)
    train_dataset = bwmodel.read_dataset(TRAIN_DATA_PATTERN, train_batch_size)
    eval_dataset = bwmodel.read_dataset(
        EVAL_DATA_PATTERN, eval_batch_size, tf.estimator.ModeKeys.EVAL,
        eval_batch_size * 100 * STRATEGY.num_replicas_in_sync)

    logging.info('executions per trial: %s', args.executions_per_trial)

    # TODO: parameterize
    retries = 0
    num_retries = 5
    sleep_time = 5
    while retries < num_retries:
        try:
            tuner = RandomSearch(
                # tuner = Hyperband(
                create_model,
                objective='val_mae',
                # max_epochs=10,
                # hyperband_iterations=2,
                max_trials=args.max_trials,
                distribution_strategy=STRATEGY,
                executions_per_trial=args.executions_per_trial,
                directory=args.tuner_dir,
                project_name=args.tuner_proj)
            break
        except Exception as e:
            logging.warning(e)
            logging.info('sleeping %s seconds...', sleep_time)
            time.sleep(sleep_time)
            retries += 1
            sleep_time *= 2

    logging.info("search space summary:")
    logging.info(tuner.search_space_summary())

    logging.info("hp tuning model....")
    tuner.search(
        train_dataset,
        validation_data=eval_dataset,
        validation_steps=eval_batch_size,
        epochs=args.epochs,
        steps_per_epoch=steps_per_epoch,
    )
    best_hps = tuner.get_best_hyperparameters(args.num_best_hps)
    best_hps_list = [best_hps[i].values for i in range(args.num_best_hps)]
    logging.info('best_hps_list: %s', best_hps_list)
    best_hp_values = json.dumps(best_hps_list)
    logging.info('best hyperparameters: %s', best_hp_values)

    storage_client = storage.Client()
    logging.info('writing best results to %s', args.respath)
    bucket = storage_client.get_bucket(args.bucket_name)
    logging.info('using bucket %s: %s, path %s', args.bucket_name, bucket,
                 args.respath)
    blob = bucket.blob(args.respath)
    blob.upload_from_string(best_hp_values)
Example #11
0
def search(
    epochs: int,
    batch_size: int,
    n_trials: int,
    execution_per_trial: int,
    project: Text,
    do_cleanup: bool,
):
    set_seed(SEED)

    dir_to_clean = os.path.join(SEARCH_DIR, project)
    if do_cleanup and os.path.exists(dir_to_clean):
        shutil.rmtree(dir_to_clean)

    # first 80% for train. remaining 20% for val & test dataset for final eval.
    ds_tr, ds_val, ds_test = tfds.load(
        name="mnist",
        split=["train[:80%]", "train[-20%:]", "test"],
        data_dir="mnist",
        shuffle_files=False,
    )

    ds_tr = prepare_dataset(ds_tr,
                            batch_size,
                            shuffle=True,
                            drop_remainder=True)
    ds_val = prepare_dataset(ds_val,
                             batch_size,
                             shuffle=False,
                             drop_remainder=False)
    ds_test = prepare_dataset(ds_test,
                              batch_size,
                              shuffle=False,
                              drop_remainder=False)

    tuner = RandomSearch(
        build_model,
        objective="val_accuracy",
        max_trials=n_trials,
        executions_per_trial=execution_per_trial,
        directory=SEARCH_DIR,
        project_name=project,
    )

    # ? add callbacks
    tuner.search(
        ds_tr,
        epochs=epochs,
        validation_data=ds_val,
    )

    best_model: tf.keras.Model = tuner.get_best_models(num_models=1)[0]
    best_model.build((None, DEFAULTS["num_features"]))
    results = best_model.evaluate(ds_test, return_dict=True)

    tuner.results_summary(num_trials=1)
    best_hyperparams = tuner.get_best_hyperparameters(num_trials=1)
    print(f"Test results: {results}")

    output = {"results": results, "best_hyperparams": best_hyperparams}
    with open("search_results.pickle", "wb") as f:
        pickle.dump(output, f)
Example #12
0
def train_data(symbol, timeframe):

    df = mt.history("EURUSD", "M1", 2)
    print("traning", df)

    df.isnull().sum().sum()  # there are no nans
    df.fillna(method="ffill", inplace=True)
    df = df.loc[~df.index.duplicated(keep='first')]

    # indicators
    df = Indicators(df)

    df = df.dropna()
    df = df.fillna(method="ffill")
    df = df.dropna()

    df.sort_index(inplace=True)

    df['target'] = list(map(classify, df['return'], df['return_next']))

    print(df)

    df.dropna(inplace=True)
    df['target'].value_counts()
    df.dropna(inplace=True)
    df = df.astype('float32')

    df = preprocess_df(df)
    train_x, train_y = df
    validation_x, validation_y = df

    train_y = np.asarray(train_y)
    validation_y = np.asarray(validation_y)
    print(('%% of Class0 : %f Sell' %
           (np.count_nonzero(train_y == 0) / float(len(train_y)))))
    print(('%% of Class1 : %f Buy' %
           (np.count_nonzero(train_y == 1) / float(len(train_y)))))

    def build_model(hp):
        model = Sequential()

        model.add(
            LSTM(hp.Int('units', min_value=10, max_value=50, step=1),
                 input_shape=(train_x.shape[1:]),
                 return_sequences=True))
        model.add(Dropout(0.1))
        model.add(BatchNormalization())

        model.add(
            LSTM(units=hp.Int('units', min_value=10, max_value=50, step=1),
                 return_sequences=True))
        model.add(Dropout(0.2))
        model.add(BatchNormalization())

        model.add(
            LSTM(units=hp.Int('units', min_value=10, max_value=50, step=1)))
        model.add(Dropout(0.2))
        model.add(BatchNormalization())

        model.add(
            Dense(hp.Int('units', min_value=10, max_value=50, step=1),
                  activation='relu'))
        model.add(Dropout(0.2))

        model.add(Dense(2, activation='softmax'))

        # Compile model
        model.compile(optimizer=Adam(hp.Choice('learning_rate',
                                               values=[1e-2])),
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        return model

    tuner = RandomSearch(build_model,
                         objective='val_accuracy',
                         max_trials=10,
                         executions_per_trial=1,
                         directory='TUN',
                         project_name='IQOTC')

    # tuner.search_space_summary()
    stop_early = EarlyStopping(monitor='val_loss', patience=15)

    tuner.search(train_x,
                 train_y,
                 batch_size=BATCH_SIZE,
                 epochs=EPOCHS,
                 validation_split=0.2,
                 verbose=1,
                 callbacks=[stop_early]),

    best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]

    print(f"""
    The optimal number of units layer is {best_hps.get('units')} 
    and the optimal learning rate for the optimizer is {best_hps.get('learning_rate')}.
    """)

    filepath = "ThesisBrain"
    # Build the model with the optimal hyperparameters and train it on the data for 50 epochs
    model = tuner.hypermodel.build(best_hps)

    history = model.fit(train_x,
                        train_y,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS,
                        validation_split=0.2,
                        verbose=1)
    val_acc_per_epoch = history.history['val_accuracy']
    best_epoch = val_acc_per_epoch.index(max(val_acc_per_epoch)) + 1
    print(('Best epoch: %d' % (best_epoch, )))
    hypermodel = tuner.hypermodel.build(best_hps)
    scores = model.evaluate(validation_x, validation_y, verbose=0)
    print(("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100)))

    del model
    del history
    # Retrain the model
    hypermodel.fit(validation_x,
                   validation_y,
                   batch_size=BATCH_SIZE,
                   epochs=best_epoch,
                   verbose=1)
    hypermodel.save("models/{}.h5".format(filepath))

    scores = hypermodel.evaluate(validation_x, validation_y, verbose=0)
    print(("%s: %.2f%%" % (hypermodel.metrics_names[1], scores[1] * 100)))
    scores = scores[1] * 100

    return scores
Example #13
0
def tuneReg(data,
            target,
            max_layers=10,
            min_layers=2,
            min_dense=32,
            max_dense=512,
            executions_per_trial=3,
            max_trials=3,
            epochs=10,
            activation='relu',
            step=32,
            verbose=0,
            test_size=0.2):

    # function build model using hyperparameter
    def build_model(hp):
        model = keras.Sequential()
        for i in range(hp.Int('num_layers', min_layers, max_layers)):
            model.add(
                Dense(units=hp.Int('units_' + str(i),
                                   min_value=min_dense,
                                   max_value=max_dense,
                                   step=step),
                      activation=activation))
        model.add(Dense(1))
        model.compile(optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
                      loss='mean_squared_error')
        return model

    # random search for the model
    tuner = RandomSearch(build_model,
                         objective='loss',
                         max_trials=max_trials,
                         executions_per_trial=executions_per_trial)
    # tuner.search_space_summary()
    # del data[target]

    X_train, X_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        test_size=0.2,
                                                        random_state=49)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model
    tuner.search(X_train,
                 y_train,
                 epochs=epochs,
                 validation_data=(X_test, y_test),
                 callbacks=[tf.keras.callbacks.TensorBoard('my_dir')])

    models = tuner.get_best_models(num_models=1)
    hyp = tuner.get_best_hyperparameters(num_trials=1)[0]
    #hyp = tuner.oracle.get_best_trials(num_trials=1)[0].hyperparameters.values
    #best_hps = np.stack(hyp).astype(None)
    history = tuner_hist(data,
                         target,
                         tuner,
                         hyp,
                         epochs=epochs,
                         verbose=verbose,
                         test_size=test_size)
    """
    Return:
        models[0] : best model obtained after tuning
        best_hps : best Hyperprameters obtained after tuning, stored as map
        history : history of the data executed from the given model
    """
    return models[0], hyp, history
Example #14
0
def tuneClass(X,
              y,
              num_classes,
              max_layers=10,
              min_layers=2,
              min_dense=32,
              max_dense=512,
              executions_per_trial=3,
              max_trials=3,
              activation='relu',
              loss='categorical_crossentropy',
              metrics='accuracy',
              epochs=10,
              step=32,
              verbose=0,
              test_size=0.2):
    # function build model using hyperparameter
    le = preprocessing.LabelEncoder()
    y = tf.keras.utils.to_categorical(le.fit_transform(y),
                                      num_classes=num_classes)

    def build_model(hp):
        model = keras.Sequential()
        for i in range(hp.Int('num_layers', min_layers, max_layers)):
            model.add(
                Dense(units=hp.Int('units_' + str(i),
                                   min_value=min_dense,
                                   max_value=max_dense,
                                   step=step),
                      activation=activation))
        model.add(Dense(num_classes, activation='softmax'))
        model.compile(optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
                      loss=loss,
                      metrics=[metrics])
        return model

    # tuners, establish the object to look through the tuner search space
    tuner = RandomSearch(build_model,
                         objective='loss',
                         max_trials=max_trials,
                         executions_per_trial=executions_per_trial,
                         directory='models',
                         project_name='class_tuned')

    # tuner.search_space_summary()

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=49)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model
    tuner.search(X_train,
                 y_train,
                 epochs=epochs,
                 validation_data=(X_test, y_test))
    models = tuner.get_best_models(num_models=1)
    hyp = tuner.get_best_hyperparameters(num_trials=1)[0]
    #hyp = tuner.oracle.get_best_trials(num_trials=1)[0].hyperparameters.values
    #best_hps = np.stack(hyp).astype(None)
    history = tuner_hist(X,
                         y,
                         tuner,
                         hyp,
                         epochs=epochs,
                         verbose=verbose,
                         test_size=test_size)
    """
    Return:
        models[0] : best model obtained after tuning
        best_hps : best Hyperprameters obtained after tuning, stored as array
        history : history of the data executed from the given model
    """
    return models[0], hyp, history
Example #15
0
    tuner = RandomSearch(trial,
                         objective='val_acc',
                         max_trials=1,
                         executions_per_trial=1,
                         directory=LOG_DIR)

    tuner.search(x=X_train,
                 y=y_train,
                 epochs=100,
                 batch_size=50,
                 shuffle='true',
                 validation_data=(X_val, y_val))

    model = RNN(input_shape=X_train.shape[1:],
                output_shape=n_classes,
                hyperparams=tuner.get_best_hyperparameters()[0].values).run()

    hist = model.fit(X_train,
                     y_train,
                     epochs=100,
                     batch_size=50,
                     shuffle='true',
                     validation_data=(X_val, y_val))

    model.summary()

    pickle.dump(
        hist.history,
        open('histories/' + sys.argv[1] + '_' + sys.argv[2] + '.pickle', 'wb'))
    model.save('models/model_' + sys.argv[1] + '_' + sys.argv[2] + '.h5')
Example #16
0
def tuneReg(data,
            target,
            max_layers=10,
            min_layers=2,
            min_dense=32,
            max_dense=512,
            executions_per_trial=1,
            max_trials=5,
            epochs=10,
            activation='relu',
            directory='my_dir',
            step=32,
            verbose=0,
            test_size=0.2):
    # function build model using hyperparameter
    def build_model(hp):
        model = keras.Sequential()
        model.add(
            Dense(units=hp.Int('units_0',
                               min_value=min_dense,
                               max_value=max_dense,
                               step=step),
                  input_dim=data.shape[1],
                  activation=activation))
        for i in range(hp.Int('num_layers', min_layers, max_layers)):
            model.add(
                Dense(units=hp.Int('units_' + str(i + 1),
                                   min_value=min_dense,
                                   max_value=max_dense,
                                   step=step),
                      activation=activation))
            model.add(
                Dropout(rate=hp.Float('dropout_' + str(i),
                                      min_value=0.0,
                                      max_value=0.5,
                                      default=0.20,
                                      step=0.05)))
        model.add(Dense(1, activation='linear'))
        lrate = hp.Float('learning_rate',
                         min_value=1e-5,
                         max_value=1e-1,
                         sampling='LOG',
                         default=1e-3)
        model.compile(optimizer=hp.Choice(
            'optimizer',
            values=[
                keras.optimizers.Adam(learning_rate=lrate),
                keras.optimizers.SGD(learning_rate=lrate),
                keras.optimizers.RMSprop(learning_rate=lrate),
                keras.optimizers.Adamax(learning_rate=lrate)
            ]),
                      loss=hp.Choice('loss',
                                     values=[
                                         'mean_squared_logarithmic_error',
                                         'mean_squared_error', 'huber_loss',
                                         'mean_absolute_error',
                                         'cosine_similarity', 'log_cosh'
                                     ],
                                     default='mean_squared_error'),
                      metrics=['accuracy'])
        return model

    # random search for the model
    tuner = RandomSearch(build_model,
                         objective='val_accuracy',
                         max_trials=max_trials,
                         executions_per_trial=executions_per_trial,
                         directory=directory)
    # tuner.search_space_summary()
    # del data[target]

    X_train, X_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        test_size=0.2,
                                                        random_state=49)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model

    tuner.search(X_train,
                 y_train,
                 epochs=epochs,
                 validation_data=(X_test, y_test),
                 callbacks=[tf.keras.callbacks.TensorBoard('my_dir')])

    models = tuner.get_best_models(num_models=1)[0]
    hyp = tuner.get_best_hyperparameters(num_trials=1)[0]
    history = tuner_hist(data,
                         target,
                         tuner,
                         hyp,
                         epochs=epochs,
                         verbose=verbose,
                         test_size=test_size)
    """
    Return:
        models[0] : best model obtained after tuning
        best_hps : best Hyperprameters obtained after tuning, stored as map
        history : history of the data executed from the given model
    """
    return models, hyp, history, X_test, y_test
Example #17
0
tensorboard_callback = TensorBoard(log_dir, histogram_freq=1, profile_batch=0)

start = datetime.now()
tuner.search(train_imgs,
             train_labels,
             epochs=epochs,
             batch_size=batch_size,
             callbacks=[tensorboard_callback],
             validation_data=(test_imgs, test_labels))
print(f'Time taken to complete {epochs} epochs: {datetime.now() - start}')

tuner.results_summary()

with open(f"tuner_{int(datetime.now())}.pkl", "wb") as f:
    pickle.dump(tuner, f)
""" tuner = pickle.load(open("tuner_1576628824.pkl","rb"))
tuner.get_best_hyperparameters()[0].values
tuner.get_best_models()[0].summary()
 """
'''
# Convolutional Neural Network
cnn = Sequential(
    [
    Conv2D(32, (3,3), activation='relu', input_shape=(32,32,3), padding='same'),
    MaxPooling2D((2,2)),
    Conv2D(64, (3, 3), activation='relu', padding='same'),
    MaxPooling2D((2, 2)),
    Conv2D(64, (3, 3), activation='relu', padding='same'),
    MaxPooling2D((2, 2)),
    Conv2D(32, (3, 3), activation='relu', padding='same'),
    Flatten(),
Example #18
0
class AutoEncoder():
    def __init__(self, df_source_info, df_fluxes, df_wavelengths):
        X = self._prepare_data(df_source_info, df_fluxes, df_wavelengths)
        objids = self.df_quasars['objid'].values
        print(f'objids = {objids}')

        X_train, X_test = train_test_split(X, 0.2)
        self.objids_train, self.objids_test = train_test_split(objids, 0.2)
        
        self.scaler = StandardScaler()
        X_train = self.scaler.fit_transform(X_train)
        X_test = self.scaler.transform(X_test)

        self.X_train = np.expand_dims(X_train, axis=2)
        self.X_test = np.expand_dims(X_test, axis=2)
        
        print(f'self.X_train = {self.X_train}')
        
        self.optimizer = Nadam(lr=0.001)

    
    def _prepare_data(self, df_source_info, df_fluxes, df_wavelengths):
        if "b'" in str(df_source_info['class'][0]):
            df_source_info = remove_bytes_from_class(df_source_info)
    
        self.df_quasars = df_source_info.loc[df_source_info['class'] == 'QSO']
        quasar_objids = self.df_quasars['objid'].to_numpy()
        quasar_fluxes = df_fluxes.loc[df_fluxes['objid'].isin(quasar_objids)]
        
        X = np.delete(quasar_fluxes.values, 0, axis=1)
        X = X[:, 0::8]
        print(f'X.shape = {X.shape}')

        X = X[:, np.mod(np.arange(X[0].size),25)!=0]

        print(f'X.shape {X.shape}')
        wavelengths = df_wavelengths.to_numpy()

        wavelengths = wavelengths[::8]
        self.wavelengths = wavelengths[0:448]
        # plot_spectrum(X[0], wavelengths)
        return X
    
    def build_model(self, hp):

        hyperparameters = {
            'layer_1_filters': hp.Choice('layer_1_filters', values=[16, 32, 64, 128, 256], default=64),
            'layer_1_kernel_size': hp.Choice('layer_1_kernel_size', values=[3, 5, 7, 9, 11]),
            'layer_2_filters': hp.Choice('layer_2_filters', values=[8, 16, 32, 64, 128], default=32),
            'layer_2_kernel_size': hp.Choice('layer_2_kernel_size', values=[3, 5, 7, 9]),
            'layer_3_filters': hp.Choice('layer_3_filters', values=[4, 8, 16, 32], default=32),
            'layer_3_kernel_size': hp.Choice('layer_3_kernel_size', values=[3, 5, 7]),
            'layer_4_filters': hp.Choice('layer_4_filters', values=[4, 8, 12, 16], default=16),
            'layer_4_kernel_size': hp.Choice('layer_4_kernel_size', values=[3, 5]),
            'layer_5_filters': hp.Choice('layer_5_filters', values=[2, 3, 4, 8], default=8),
            'layer_5_kernel_size': hp.Choice('layer_5_kernel_size', values=[3]),
            'optimizer': hp.Choice('optimizer', values=['adam', 'nadam', 'rmsprop']),
            'last_activation': hp.Choice('last_activation', ['tanh'])
        }
        
        # ================================================================================== #
        # ==================================== ENCODER ===================================== #
        # ================================================================================== #
        
        input_layer = Input(shape=(self.X_train.shape[1], 1))

        # encoder
        x = Conv1D(filters=hyperparameters['layer_1_filters'],
                   kernel_size=hyperparameters['layer_1_kernel_size'],
                   activation='relu', 
                   padding='same')(input_layer)

        x = MaxPooling1D(2)(x)
        x = Conv1D(filters=hyperparameters['layer_2_filters'],
                    kernel_size=hyperparameters['layer_2_kernel_size'],
                    activation='relu',
                    padding='same')(x)
        
        x = MaxPooling1D(2)(x)
        x = Conv1D(filters=hyperparameters['layer_3_filters'],
                    kernel_size=hyperparameters['layer_3_kernel_size'],
                    activation='relu',
                    padding='same')(x)

        x = MaxPooling1D(2)(x)
        x = Conv1D(filters=hyperparameters['layer_4_filters'],
                    kernel_size=hyperparameters['layer_4_kernel_size'],
                    activation='relu',
                    padding='same')(x)

        x = MaxPooling1D(2)(x)
        x = Conv1D(filters=hyperparameters['layer_5_filters'],
                    kernel_size=hyperparameters['layer_5_kernel_size'],
                    activation='relu',
                    padding='same')(x)

        encoded = MaxPooling1D(2, padding="same")(x)

        # ================================================================================== #
        # ==================================== DECODER ===================================== #
        # ================================================================================== #

        x = Conv1D(filters=hyperparameters['layer_5_filters'],
                   kernel_size=hyperparameters['layer_5_kernel_size'],
                   activation='relu',
                   padding='same')(encoded)
        
        x = UpSampling1D(2)(x)

        x = Conv1D(filters=hyperparameters['layer_4_filters'],
                   kernel_size=hyperparameters['layer_4_kernel_size'],
                   activation='relu',
                   padding='same')(x)

        x = UpSampling1D(2)(x)

        x = Conv1D(filters=hyperparameters['layer_3_filters'],
                   kernel_size=hyperparameters['layer_3_kernel_size'],
                   activation='relu',
                   padding='same')(x)

        x = UpSampling1D(2)(x)

        x = Conv1D(filters=hyperparameters['layer_2_filters'],
                   kernel_size=hyperparameters['layer_2_kernel_size'],
                   activation='relu',
                   padding='same')(x)

        x = UpSampling1D(2)(x)

        x = Conv1D(filters=hyperparameters['layer_1_filters'],
                   kernel_size=hyperparameters['layer_1_kernel_size'],
                   activation='relu',
                   padding='same')(x)

        x = UpSampling1D(2)(x)
        decoded = Conv1D(1, 1, activation=hyperparameters['last_activation'], padding='same')(x)
        
        self.autoencoder = Model(input_layer, decoded)
        self.autoencoder.summary()
        self.autoencoder.compile(loss='mse', optimizer=hyperparameters['optimizer'])

        return self.autoencoder
    
    def train_model(self, epochs, batch_size=32):
        self.tuner = RandomSearch(self.build_model,
                                  objective='val_loss',
                                  max_trials=50,
                                  executions_per_trial=1,
                                  directory='logs/keras-tuner/',
                                  project_name='autoencoder')

        self.tuner.search_space_summary()

        self.tuner.search(x=self.X_train,
                          y=self.X_train,
                          epochs=24,
                          batch_size=32,
                          validation_data=(self.X_test, self.X_test),
                          callbacks=[EarlyStopping('val_loss', patience=3)])

        self.tuner.results_summary()

    def evaluate_model(self):
        best_model = self.tuner.get_best_models(1)[0]
        best_model.save('best_autoencoder_model')
        best_hyperparameters = self.tuner.get_best_hyperparameters(1)[0]

        print(f'best_model = {best_model}')
        print(f'best_hyperparameters = {self.tuner.results_summary()[0]}')
        nth_qso = 24

        X_test = np.squeeze(self.X_test, axis=2)

        preds = best_model.predict(self.X_test)
        preds = self.scaler.inverse_transform(np.squeeze(preds, axis=2))
        original = self.scaler.inverse_transform(X_test)

        qso_ra = self.df_quasars.loc[self.df_quasars['objid'] == self.objids_test[nth_qso]]['ra'].values[0]
        qso_dec = self.df_quasars.loc[self.df_quasars['objid'] == self.objids_test[nth_qso]]['dec'].values[0]
        qso_plate = self.df_quasars.loc[self.df_quasars['objid'] == self.objids_test[nth_qso]]['plate'].values[0]
        qso_z = self.df_quasars.loc[self.df_quasars['objid'] == self.objids_test[nth_qso]]['z'].values[0]

        plotify = Plotify(theme='ugly') 

        _, axs = plotify.get_figax(nrows=2, figsize=(8, 8))
        axs[0].plot(self.wavelengths, original[nth_qso], color=plotify.c_orange)
        axs[1].plot(self.wavelengths, preds[nth_qso], color=plotify.c_orange)
        axs[0].set_title(f'ra = {qso_ra}, dec = {qso_dec}, z = {qso_z}, plate = {qso_plate}', fontsize=14)
        axs[1].set_title(f'Autoencoder recreation')
        axs[0].set_ylabel(r'$F_{\lambda[10^{-17} erg \: cm^{-2}s^{-1} Å^{-1}]}$', fontsize=14)
        axs[1].set_ylabel(r'$F_{\lambda[10^{-17} erg \: cm^{-2}s^{-1} Å^{-1}]}$', fontsize=14)
        axs[1].set_xlabel('Wavelength (Å)')

        plt.subplots_adjust(hspace=0.4)
        # plt.savefig('plots/autoencoder_gaussian', facecolor=plotify.c_background, dpi=180)
        plt.show()

        return preds
plt.xlabel('EPOCH')
plt.ylabel('Accruacy')
plt.show()

random_tuner = RandomSearch(hypermodel,
                            objective='accuracy',
                            max_trials=10,
                            seed=10,
                            project_name='divorce test')

random_tuner.search(X_train.values,
                    y_train.values.flatten(),
                    epochs=10,
                    validation_data=(X_test.values, y_test.values.flatten()))

random_params = random_tuner.get_best_hyperparameters()[0]

random_model = random_tuner.hypermodel.build(params)

random_model.fit(X.values, y.values.flatten(), epochs=15)

random_accuracy_df = pd.DataFrame(random_model.history.history)

random_accuracy_df[['loss', 'accuracy']].plot()
plt.title('Loss & Accuracy Per EPOCH For Random Model')
plt.xlabel('EPOCH')
plt.ylabel('Accruacy')
plt.show()

bayesian_tuner = BayesianOptimization(hypermodel,
                                      objective='accuracy',
                  activation=hp.Choice('activation',
                                       values=['relu', 'sigmoid', 'tanh']),
                  name='recurrent_layer'))

    model.add(Dense(units=1, activation='linear', name='output_layer'))

    model.compile(optimizer=SGD(learning_rate=hp.Choice(
        'learning_rate', values=list(np.logspace(-10, -0.2, base=2, num=15))),
                                momentum=hp.Choice('momentum',
                                                   values=list(
                                                       np.logspace(-10,
                                                                   -0.1,
                                                                   base=2,
                                                                   num=15)))),
                  loss='mse',
                  metrics=['mse'])
    return model


tuner = RandomSearch(build_model,
                     objective='loss',
                     max_trials=30,
                     executions_per_trial=1,
                     seed=2020)

tuner.search_space_summary()

tuner.search(X_train, y_train, epochs=30, validation_split=0.2)

tuner.get_best_hyperparameters()
SEED = 10
direc = 'logs'

from kerastuner.tuners import Hyperband

turner = RandomSearch(build_model,
                      objective="val_accuracy",
                      max_trials=3,
                      seed=SEED,
                      directory=direc,
                      executions_per_trial=3)

print(turner.search_space_summary())

turner.search(x_train,
              y_train,
              epochs=50,
              batch_size=24,
              validation_data=(x_test, y_test))

print(turner.results_summary())

model = turner.get_best_models()[0]
print(model.summary())
model.save("HP_model.h5")

print(turner.get_best_hyperparameters()[0].values)
hps = turner.oracle.get_best_trials(num_trials=1)[0].hyperparameters

model = build_model(hps)
print(model.summary())
Example #22
0

tuner = RandomSearch(
    build_model,
    objective="val_accuracy",
    max_trials=15,  # how many times change the model randomly
    executions_per_trial=1  # how many times to train the model selected
)

tuner.search(x=X_train,
             y=y_train,
             epochs=20,
             batch_size=64,
             validation_data=(X_test, y_test))

best_hps = tuner.get_best_hyperparameters(
    num_trials=1)[0]  # save hyperparameters when the val_accuracy is highest.

print(
    "The hyperparameter search is complete. The optimal number of units in the first densely-connected layer is %d, "
    "the number of layers is %d and the optimal learning rate for the optimizer is %f."
    % (best_hps.get('input_units'), best_hps.get('learning_rate'),
       best_hps.get('n_layers')))  #

model = tuner.hypermodel.build(
    best_hps)  # save the model the best model among best_hps
model.save('save.h5')

model.fit(X_train,
          y_train,
          batch_size=64,
          epochs=5,
Example #23
0

tuner = RandomSearch(
    build_model,  # 위에 def build_model 만들겁니다.
    objective="val_accuracy",
    max_trials=15,  # 몇번이나 랜덤하게 모델을 학습시킬지
    executions_per_trial=1  # 같은 모델을 몇번이나 학습시킬지
)

tuner.search(x=X_train,
             y=y_train,
             epochs=20,
             batch_size=64,
             validation_data=(X_test, y_test))

best_hps = tuner.get_best_hyperparameters(
    num_trials=1)[0]  # val_accuracy 가 가장 높았을때의 hyperparameters 를 저장한다.

print(
    "The hyperparameter search is complete. The optimal number of units in the first densely-connected layer is %d, "
    "the number of layers is %d and the optimal learning rate for the optimizer is %f."
    % (best_hps.get('input_units'), best_hps.get('learning_rate'),
       best_hps.get('n_layers')))  #

model = tuner.hypermodel.build(
    best_hps)  # 위에서 bext_hps 에 저장한 높은 정확성이 있는 모델을 model에 저장한다.
model.save('save.h5')

model.fit(X_train,
          y_train,
          batch_size=64,
          epochs=5,
Example #24
0
    def createModel(self):
        self.model_instance += 1
        clear_session()

        features, label = self.getDataset()
        X_train, y_train = self.createLag(features, label)
        X_train = X_train[:, self.lags]

        learning_rate = float(self.hyperparameters["Learning_Rate"].get())
        momentum = float(self.hyperparameters["Momentum"].get())
        optimizers = {
                "Adam": Adam(learning_rate=learning_rate),
                "SGD": SGD(learning_rate=learning_rate, momentum=momentum),
                "RMSprop": RMSprop(learning_rate=learning_rate, momentum=momentum)
                }

        shape = (X_train.shape[1], X_train.shape[2])
        model_choice = self.model_var.get()

        if not self.do_optimization:
            model = Sequential()
            model.add(Input(shape=shape))
            
            if model_choice == 0:
                model.add(Flatten())

            layers = self.no_optimization_choice_var.get()
            for i in range(layers):
                neuron_number = self.neuron_numbers_var[i].get()
                activation_function = self.activation_var[i].get()
                if model_choice == 0:
                    model.add(Dense(neuron_number, activation=activation_function, kernel_initializer=GlorotUniform(seed=0)))
                
                elif model_choice == 1:
                    model.add(Conv1D(filters=neuron_number, kernel_size=2, activation=activation_function, kernel_initializer=GlorotUniform(seed=0)))
                    model.add(MaxPooling1D(pool_size=2))
                
                elif model_choice == 2:
                    if i == layers-1:
                        model.add(LSTM(neuron_number, activation=activation_function, return_sequences=False, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))
                    else:
                        model.add(LSTM(neuron_number, activation=activation_function, return_sequences=True, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))

                elif model_choice == 3:
                    if i == layers-1:
                        model.add(Bidirectional(LSTM(neuron_number, activation=activation_function, return_sequences=False, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0))))
                        model.add(Dropout(0.2))
                    else:
                        model.add(Bidirectional(LSTM(neuron_number, activation=activation_function, return_sequences=True, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0))))
                        model.add(Dropout(0.2))

                elif model_choice == 4:
                    if i == layers-1:
                        model.add(SimpleRNN(neuron_number, activation=activation_function, return_sequences=False, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))
                    else:
                        model.add(SimpleRNN(neuron_number, activation=activation_function, return_sequences=True, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))
                
                elif model_choice == 5:
                    if i == layers-1:
                        model.add(GRU(neuron_number, activation=activation_function, return_sequences=False, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))
                    else:
                        model.add(GRU(neuron_number, activation=activation_function, return_sequences=True, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))
            
            if model_choice == 1:
                model.add(Flatten())
                model.add(Dense(32, kernel_initializer=GlorotUniform(seed=0)))

            model.add(Dense(1, activation=self.output_activation.get(), kernel_initializer=GlorotUniform(seed=0)))
            model.compile(optimizer = optimizers[self.hyperparameters["Optimizer"].get()], loss=self.hyperparameters["Loss_Function"].get())
            
            history = model.fit(X_train, y_train, epochs=self.hyperparameters["Epoch"].get(), batch_size=self.hyperparameters["Batch_Size"].get(), verbose=1, shuffle=False)
            loss = history.history["loss"][-1]
            self.train_loss.set(loss)

        elif self.do_optimization:
            layer = self.optimization_choice_var.get()

            if model_choice == 0:
                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    model.add(Flatten())
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max - n_min)/4)
                        model.add(Dense(units=hp.Int('MLP_'+str(i), min_value=n_min, max_value=n_max, step=step), activation='relu'))
                    model.add(Dense(1))
                    model.compile(optimizer = optimizers[self.hyperparameters["Optimizer"].get()], loss=self.hyperparameters["Loss_Function"].get())
                    return model
                
                name = str(self.model_instance) + ". MLP"

            elif model_choice == 1:
                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max-n_min)/4)
                        model.add(Conv1D(filters=hp.Int("CNN_"+str(i), min_value=n_min, max_value=n_max, step=step), kernel_size=2, activation="relu", kernel_initializer=GlorotUniform(seed=0)))
                        model.add(MaxPooling1D(pool_size=2))
                    
                    model.add(Flatten())
                    model.add(Dense(32, kernel_initializer=GlorotUniform(seed=0)))
                    model.add(Dense(1, kernel_initializer=GlorotUniform(seed=0)))
                    model.compile(optimizer = optimizers[self.hyperparameters["Optimizer"].get()], loss=self.hyperparameters["Loss_Function"].get())
                    return model
                
                name = str(self.model_instance) + ". CNN"

            elif model_choice == 2:
                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max - n_min)/4)
                        model.add(LSTM(units=hp.Int("LSTM_"+str(i), min_value=n_min, max_value=n_max, step=step), activation='relu', return_sequences=True, kernel_initializer=GlorotUniform(seed=0)))
                        if i == layer-1:
                            model.add(LSTM(units=hp.Int("LSTM_"+str(i), min_value=n_min, max_value=n_max, step=step), activation='relu', return_sequences=False, kernel_initializer=GlorotUniform(seed=0)))
                    
                    model.add(Dense(1))
                    model.compile(optimizer = optimizers[self.hyperparameters["Optimizer"].get()], loss=self.hyperparameters["Loss_Function"].get())
                    return model
                
                name = str(self.model_instance) + ". LSTM"
            
            elif model_choice == 3:
                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max - n_min)/4)
                        model.add(Bidirectional(LSTM(units=hp.Int("LSTM_"+str(i), min_value=n_min, max_value=n_max, step=step), activation='relu', return_sequences=True, kernel_initializer=GlorotUniform(seed=0))))
                        if i == layer-1:
                            model.add(Bidirectional(LSTM(units=hp.Int("LSTM_"+str(i), min_value=n_min, max_value=n_max, step=step), activation='relu', return_sequences=False, kernel_initializer=GlorotUniform(seed=0))))
                    
                    model.add(Dense(1, kernel_initializer=GlorotUniform(seed=0)))
                    model.compile(optimizer = optimizers[self.hyperparameters["Optimizer"].get()], loss=self.hyperparameters["Loss_Function"].get())
                    return model

                name = str(self.model_instance) + ". Bi-LSTM"


            tuner = RandomSearch(build_model, objective='loss', max_trials=25, executions_per_trial=2, directory=self.runtime, project_name=name)
            
            tuner.search(X_train, y_train, epochs=self.hyperparameters["Epoch"].get(), batch_size=self.hyperparameters["Batch_Size"].get())
            hps = tuner.get_best_hyperparameters(num_trials = 1)[0]
            model = tuner.hypermodel.build(hps)
            
            history = model.fit(X_train, y_train, epochs=self.hyperparameters["Epoch"].get(), batch_size=self.hyperparameters["Batch_Size"].get(), verbose=1)
            loss = history.history["loss"][-1]
            self.train_loss.set(loss)
            

            for i in range(layer):
                if model_choice == 0:
                    self.best_model_neurons[i].set(model.get_layer(index=i+1).get_config()["units"])
                elif model_choice == 1:
                    self.best_model_neurons[i].set(model.get_layer(index=(2*i)).get_config()["filters"])
                elif model_choice == 2:
                    self.best_model_neurons[i].set(model.get_layer(index=i).get_config()["units"])
                elif model_choice == 3:
                    self.best_model_neurons[i].set(model.get_layer(index=i).get_config()["layer"]["config"]["units"])
        model.summary()
        self.model = model
                     overwrite=True)

tuner.search_space_summary()

tuner.search(train_data,
             train_labels,
             epochs=20,
             validation_data=(test_data, test_labels))

tuner.results_summary()

# get bets model
best_model = tuner.get_best_models(num_models=1)[0]

# get the optimal hyperparameters
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]

print(f"""
The hyperparameter search is complete. 
- optimal number of units in the 0 layer is {best_hps.get('units_0')} 
- optimal number of units in the 1 densely-connected layer is {best_hps.get('units_1')} 
- optimal number of units in the 2 densely-connected layer is {best_hps.get('units_2')} 
- optimal number of units in the 3 densely-connected layer is {best_hps.get('units_3')} 
- optimal number of units in the 4 densely-connected layer is {best_hps.get('units_4')}
- optimal number of units in the 5 densely-connected layer is {best_hps.get('units_5')}
- optimal activation is is {best_hps.get('dense_activation')} 
- optimal dropout is {best_hps.get('dropout')} 
- the optimal learning rate for the optimizer is {best_hps.get('learning_rate')}.
""")

# Build the model with the optimal hyperparameters and train it on the data