Example #1
0
def train_resnet50():

    # generating data from existing images
    train_datagenerator, test_datagenerator = data_generator()

    # getting the model and callback from model.py
    lr_reduction = ReduceLROnPlateau(monitor='val_loss',
                                     factor=0.1,
                                     patience=3,
                                     min_lr=1e-5)

    tuner = RandomSearch(project_name=os.path.join(LOGS, 'trial_2/resnet_50'),
                         max_trials=3,
                         executions_per_trial=5,
                         hypermodel=vgg_16,
                         objective='val_accuracy')
    tuner.search(train_datagenerator,
                 epochs=10,
                 callbacks=[lr_reduction],
                 validation_data=test_datagenerator)
    best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]

    model = tuner.hypermodel.build(best_hps)
    model.fit_generator(train_datagenerator,
                        epochs=EPOCHS,
                        validation_data=test_datagenerator)

    return model
Example #2
0
        def build_model(hp):
            x_train = np.random.random((100, 28, 28))
            y_train = np.random.randint(10, size=(100, 1))
            x_test = np.random.random((20, 28, 28))
            y_test = np.random.randint(10, size=(20, 1))

            model = tf.keras.models.Sequential([
                tf.keras.layers.Flatten(input_shape=(28, 28)),
                tf.keras.layers.Dense(128, activation='relu'),
                tf.keras.layers.Dropout(hp.Choice('dropout_rate', values=[0.2, 0.4])),
                tf.keras.layers.Dense(10, activation='softmax')
            ])

            model.compile(
                optimizer='adam',
                loss='sparse_categorical_crossentropy',
                metrics=['accuracy'])

            return model

            tuner = RandomSearch(build_model, objective='accuracy', max_trials=1, executions_per_trial=1, seed=1)

            tuner.search(x_train, y_train, epochs=1)

            self.assertEqual(0.4, tuner.get_best_hyperparameters(1)[0].get('dropout_rate'))
    def tune(self):
        """ TODO: actually this should be def tune(..) - will have to reoncile/fix the nomentalures at some point """
        tuner = RandomSearch(self.model,
                             objective='val_accuracy',
                             max_trials=50,
                             executions_per_trial=2,
                             directory='my_dir',
                             project_name='helloworld')

        # proprocess the data
        data = self.data_reduced
        LOG.info(f"BUILD: data from keras: {data.keys()}")

        x = data["x_train"]
        y = data["y_train"]
        x_val = data["x_test"]
        y_val = data["y_test"]

        print(f"label shapes: {y.shape} {y_val.shape}")
        # call the tuner on that

        try:
            tuner.search(x, y, epochs=3, validation_data=(x_val, y_val))
        except Exception as ex:
            LOG.error(f"Failed to tuner.search: {ex}")
        models = tuner.get_best_models(num_models=2)

        # self.tuner_model.fit(data["x_train"],data["y_train"])
        LOG.info(f"Finished tuning model. Summary:")
Example #4
0
def build_model(X_train, Y_train, X_test, Y_test):
    hyperModel = RegressionHyperModel((X_train.shape[1], ))

    tuner_rs = RandomSearch(hyperModel,
                            objective='mse',
                            max_trials=135,
                            executions_per_trial=1,
                            directory='param_opt_checkouts',
                            project_name='GDW')
    tuner_rs.search(X_train,
                    Y_train,
                    validation_data=(X_test, Y_test),
                    epochs=160)
    best_model = tuner_rs.get_best_models(num_models=1)[0]

    #metrics = ['loss', 'mse', 'mae', 'mape', 'cosine_proximity']
    #_eval = best_model.evaluate(X_test, Y_test)
    #print(_eval)
    #for i in range(len(metrics)):
    #    print(f'{metrics[i]} : {_eval[i]}')

    # history = best_model.fit(X_train, Y_train, validation_data = (X_test, Y_test), epochs=50)

    # best_model.save('./models_ANN/best_model')

    # save_model(best_model)
    tuner_rs.results_summary()
    print(load_model().summary())
    predict(best_model)
Example #5
0
def main(args):
    tv = FLAGS.tv
    vv = FLAGS.vv
    bs = FLAGS.bs

    project_name = f'tv{tv}-vv{vv}-bs{bs}'
    print(f'Project Name: {project_name}')
    print()
    tuner = RandomSearch(
        build_hyper_conv_estimator,
        objective='val_loss',
        max_trials=20,
        executions_per_trial=3,
        directory='hyper_search',
        project_name=project_name,
    )

    batch_size = 64
    batches = 4000
    workers = 2
    verbose = 2

    tuner.search_space_summary()
    dataset = TFSeqRandomDataGenerator(batch_size, batches)
    valid_dataset = TFSeqRandomDataGenerator(batch_size, 4000, version=1)
    tuner.search(dataset,
                 validation_data=valid_dataset,
                 epochs=10,
                 workers=workers,
                 use_multiprocessing=True,
                 verbose=verbose)
Example #6
0
def CNN_Hyper():
    training_set = tf.keras.preprocessing.image_dataset_from_directory(
        DATA_PATH + "processed/training",
        seed=957,
        image_size=IMAGE_SIZE,
        batch_size=BATCH_SIZE,
    )
    validation_set = tf.keras.preprocessing.image_dataset_from_directory(
        DATA_PATH + "processed/validation",
        seed=957,
        image_size=IMAGE_SIZE,
        batch_size=BATCH_SIZE,
    )

    test_set = tf.keras.preprocessing.image_dataset_from_directory(
        DATA_PATH + "processed/testing",
        seed=957,
        image_size=IMAGE_SIZE,
        batch_size=BATCH_SIZE,
    )

    training_set = training_set.prefetch(buffer_size=32)
    validation_set = validation_set.prefetch(buffer_size=32)

    hyperModel = CNNHyperModel(IMAGE_SIZE + (3, ), CLASS_COUNT, "softmax")

    MAX_TRIALS = 20
    EXECUTION_PER_TRIAL = 1
    N_EPOCH_SEARCH = 25

    tuner = RandomSearch(hyperModel,
                         objective='val_accuracy',
                         seed=957,
                         max_trials=MAX_TRIALS,
                         executions_per_trial=EXECUTION_PER_TRIAL,
                         directory='random_search',
                         project_name='Stanford-Dogs-40_1')

    tuner.search_space_summary()

    tuner.search(training_set,
                 epochs=N_EPOCH_SEARCH,
                 validation_data=validation_set)

    # Show a summary of the search
    tuner.results_summary()

    # Retrieve the best model.
    best_model = tuner.get_best_models(num_models=1)[0]

    # Evaluate the best model.
    loss, accuracy = best_model.evaluate(test_set)
    print("Loss: ", loss)
    print("Accuracy: ", accuracy)
    best_model.summary()
    # Save model
    best_model.save('CNN_Tuned_Best_Model')


# https://www.sicara.ai/blog/hyperparameter-tuning-keras-tuner
Example #7
0
def tuneCNN(
        X_train,
        X_test,
        height,
        width,
        num_classes,
        patience=1,
        executions_per_trial=1,
        seed=42,
        max_trials=3,
        objective='val_accuracy',
        directory='my_dir',
        epochs=10,
        verbose=0,
        test_size=0.2):
    # creates hypermodel object based on the num_classes and the input shape
    hypermodel = CNNHyperModel(input_shape=(
        height, width, 3), num_classes=num_classes)

    # # tuners, establish the object to look through the tuner search space
    tuner = RandomSearch(
        hypermodel,
        objective=objective,
        seed=seed,
        max_trials=max_trials,
        executions_per_trial=executions_per_trial,
        directory=directory,
    )


    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model

    tuner.search(X_train,
                 validation_data=X_test,
                 callbacks=[tf.keras.callbacks.EarlyStopping(patience=patience)],
                 epochs=epochs,
                 verbose=verbose)

    # best hyperparamters
    hyp = tuner.get_best_hyperparameters(num_trials=1)[0]
    #hyp = tuner.oracle.get_best_trials(num_trials=1)[0].hyperparameters.values
    #best_hps = np.stack(hyp).astype(None)
    history = tuner_hist(
        X_train,
        X_test,
        tuner,
        hyp,
        img=1,
        epochs=epochs,
        verbose=verbose,
        test_size=test_size)

    """
    Return:
        models[0] : best model obtained after tuning
        best_hps : best Hyperprameters obtained after tuning, stored as array
        history : history of the data executed from the given model
    """
    return tuner.get_best_models(1)[0], hyp, history
Example #8
0
    def _fit(self, X_train, y_train, X_test, y_test, X_val, y_val):
        tuner = RandomSearch(self._build_model,
                             objective='val_accuracy',
                             max_trials=self.max_trials,
                             executions_per_trial=1,
                             directory='logs/keras-tuner/',
                             project_name='cnn')

        tuner.search_space_summary()

        tuner.search(x=X_train,
                     y=y_train,
                     epochs=self.epochs,
                     batch_size=self.batch_size,
                     verbose=0,
                     validation_data=(X_val, y_val),
                     callbacks=[EarlyStopping('val_accuracy', patience=4)])
        print('kakkanat\n\n\n\n\n\n')
        print(tuner.results_summary())
        model = tuner.get_best_models(num_models=1)[0]
        print(model.summary())

        # Evaluate Best Model #
        _, train_acc = model.evaluate(X_train, y_train, verbose=0)
        _, test_acc = model.evaluate(X_test, y_test, verbose=0)
        print('Train: %.3f, Test: %.3f' % (train_acc, test_acc))
Example #9
0
def tuneCNN(X, y, num_classes):

    # creates hypermodel object based on the num_classes and the input shape
    hypermodel = CNNHyperModel(input_shape=(224, 224, 3),
                               num_classes=num_classes)

    # tuners, establish the object to look through the tuner search space
    tuner = RandomSearch(
        hypermodel,
        objective='val_accuracy',
        seed=42,
        max_trials=3,
        executions_per_trial=3,
        directory='random_search',
    )

    X_train, X_test, y_train, y_test = train_test_split(np.asarray(X),
                                                        np.asarray(y),
                                                        test_size=0.33,
                                                        random_state=42)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model
    tuner.search(X_train,
                 y_train,
                 validation_data=(X_test, y_test),
                 callbacks=[tf.keras.callbacks.EarlyStopping(patience=1)])

    # returns the best model
    return tuner.get_best_models(1)[0]
 def search_bestCNN(self,
                    X,
                    Y,
                    testX,
                    testY,
                    epochs=50,
                    max_trails=20,
                    batch_size=64,
                    project_name='A1'):
     tuner = RandomSearch(self._build_CNN,
                          objective='val_accuracy',
                          max_trials=max_trails,
                          executions_per_trial=1,
                          directory='tunerlog',
                          project_name=project_name)
     tuner.search(x=X,
                  y=Y,
                  epochs=epochs,
                  batch_size=batch_size,
                  validation_data=(testX, testY),
                  callbacks=[
                      tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                       patience=5)
                  ],
                  verbose=2)
     tuner.search_space_summary()
     print(tuner.results_summary())
     print('best_hyperparameters')
     print(tuner.get_best_hyperparameters()[0].values)
     return tuner.get_best_models()
def fit_hier_embedding(X, y, result_dir, project):
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
    y_train = to_categorical(y_train, output_dim)
    y_test = to_categorical(y_test, output_dim)

    X_train1 = X_train[['Rating', 'CocoaPercent']].values
    X_train2 = X_train.drop(['Rating', 'CocoaPercent'], axis=1).values
    X_test1 = X_test[['Rating', 'CocoaPercent']].values
    X_test2 = X_test.drop(['Rating', 'CocoaPercent'], axis=1).values

    dim1 = X_train1.shape[1]
    dim2 = X_train2.shape[1]

    hp = HyperParameters()

    bm = lambda x: tune_optimizer_model(hp, dim1, dim2)

    print(dim1, dim2)
    tuner = RandomSearch(bm,
                         objective='val_accuracy',
                         max_trials=MAX_TRIALS,
                         executions_per_trial=EXECUTIONS_PER_TRIAL,
                         directory=result_dir,
                         project_name=project,
                         seed=32)

    TRAIN_EPOCHS = 1000

    tuner.search(x=[X_train1, X_train2],
                 y=y_train,
                 epochs=TRAIN_EPOCHS,
                 validation_data=([X_test1, X_test2], y_test))
    tuner.results_summary()
Example #12
0
def main():

    dataset = makeHistoricalData(fixed_data, temporal_data, h, r, 'death',
                                 'mrmr', 'country', 'regular')

    numberOfSelectedCounties = len(dataset['county_fips'].unique())
    new_dataset = clean_data(dataset, numberOfSelectedCounties)
    X_train, y_train, X_val, y_val, X_test, y_test, y_train_date, y_test_date, y_val_date, val_naive_pred, test_naive_pred = preprocess(
        new_dataset)
    X_train, y_train, X_val, y_val, X_test, y_test, scalar = data_normalize(
        X_train, y_train, X_val, y_val, X_test, y_test)

    hypermodel = LSTMHyperModel(n=X_train.shape[2])

    tuner = RandomSearch(hypermodel,
                         objective='mse',
                         seed=1,
                         max_trials=60,
                         executions_per_trial=4,
                         directory='parameter_tuning',
                         project_name='lstm_model_tuning')

    tuner.search_space_summary()

    print()
    input("Press Enter to continue...")
    print()

    N_EPOCH_SEARCH = 50
    tuner.search(X_train, y_train, epochs=N_EPOCH_SEARCH, validation_split=0.2)

    print()
    input("Press Enter to show the summary of search...")
    print()

    # Show a summary of the search
    tuner.results_summary()

    print()
    input("Press Enter to retrive the best model...")
    print()

    # Retrieve the best model.
    best_model = tuner.get_best_models(num_models=1)[0]

    print()
    input("Press Enter to show best model summary...")
    print()

    best_model.summary()

    print()
    input("Press Enter to run the best model on test dataset...")
    print()

    # Evaluate the best model.
    loss, accuracy = best_model.evaluate(X_test, y_test)
    print("loss = " + str(loss) + ", acc = " + str(accuracy))
def fixed_result_tuner(fixed_model_tmp_path):
    tmp_dir = str(fixed_model_tmp_path / "tmp")
    results_dir = str(fixed_model_tmp_path / "results")
    export_dir = str(fixed_model_tmp_path / "export")

    # Random data to feed the model.

    x_train = []
    y_train = []

    for idx in range(100):
        if idx % 2 == 0:
            x_train.append([0, 1])
            y_train.append([0, 1])
        else:
            x_train.append([1, 0])
            y_train.append([1, 0])

    for idx in range(10):
        if idx % 2 == 0:
            x_train.append([0, 1])
            y_train.append([1, 0])
        else:
            x_train.append([1, 0])
            y_train.append([0, 1])

    x_train = np.array(x_train, dtype=np.float32)
    y_train = np.array(y_train, dtype=np.float32)

    # Initialize the hypertuner by passing the model function (model_fn)
    # and specifying key search constraints: maximize val_acc (objective),
    # spend 9 epochs doing the search, spend at most 3 epoch on each model.
    tuner = RandomSearch(fixed_model_fn,
                         objective='val_acc',
                         epoch_budget=100,
                         max_epochs=10,
                         results_dir=results_dir,
                         tmp_dir=tmp_dir,
                         export_dir=export_dir)

    # display search overview
    tuner.summary()

    # You can use http://keras-tuner.appspot.com to track results on the web,
    # and get notifications. To do so, grab an API key on that site, and fill
    # it here.
    # tuner.enable_cloud(api_key=api_key)

    # Perform the model search. The search function has the same prototype than
    # keras.Model.fit(). Similarly search_generator() mirror
    # search_generator().
    tuner.search(x_train, y_train, validation_data=(x_train, y_train))

    return tuner
Example #14
0
def tuneClass(X,
              y,
              num_classes,
              max_layers=10,
              min_layers=2,
              min_dense=32,
              max_dense=512,
              executions_per_trial=3,
              max_trials=1,
              activation='relu',
              loss='categorical_crossentropy',
              metrics='accuracy'):
    # function build model using hyperparameter
    le = preprocessing.LabelEncoder()
    y = tf.keras.utils.to_categorical(le.fit_transform(y),
                                      num_classes=num_classes)

    def build_model(hp):
        model = keras.Sequential()
        for i in range(hp.Int('num_layers', min_layers, max_layers)):
            model.add(
                layers.Dense(units=hp.Int('units_' + str(i),
                                          min_value=min_dense,
                                          max_value=max_dense,
                                          step=32),
                             activation=activation))
        model.add(layers.Dense(num_classes, activation='softmax'))
        model.compile(optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
                      loss=loss,
                      metrics=[metrics])
        return model

    # tuners, establish the object to look through the tuner search space
    tuner = RandomSearch(build_model,
                         objective='loss',
                         max_trials=max_trials,
                         executions_per_trial=executions_per_trial,
                         directory='models',
                         project_name='class_tuned')

    # tuner.search_space_summary()

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=49)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model
    tuner.search(X_train, y_train, epochs=5, validation_data=(X_test, y_test))
    models = tuner.get_best_models(num_models=1)
    return models[0]
def get_best_nn(data, num_output=1, **tuner_kw):
    """
    Find the "best" model based on `MyHyperModel` class.

    Parameters
    ----------
    data: numpy.array or similar
        The train and validation data to be used by the hyper parameter tuner.
    num_output: int, optional
        The number of outputs for our NN. 1 default for regression.
    tuner_kw: dictionary
        A dictionary of parameters to be  `RandomSearch` tuner.

    Returns
    -------
    The trained model instance with the "optimised" parameters.
    """
    # Load encoded data
    enc_data = data_encode(data, encoder='CatBoostEncoder')
    x_train_enc, y_train = enc_data.get('train_data')
    x_val_enc, y_val = enc_data.get('test_data')

    # Create an instance of the `MyHyperModel` class
    hyper_model = MyHyperModel(num_output=num_output,
                               nun_features=int(x_train_enc.shape[1]))

    # Default tuner params
    default_tuner_params = {
        'objective': 'val_loss',
        'max_trials': 10,
        'directory':
        'keras_tuner_output',  # Directory for logs, checkpoints, etc
        'project_name': 'sgsc'
    }  # Default is utils/keras_tuner_output

    # Update tuner params
    tuner_params = {**default_tuner_params, **tuner_kw}

    # Initialise tuner and run it
    tuner = RandomSearch(hyper_model, **tuner_params)
    # Check about seed!! We need to define it? or does it use numpy's by default?
    tuner.search(
        x_train_enc,
        y_train,
        epochs=5,  # Default number of epochs
        validation_data=(x_val_enc, y_val),
        verbose=0)

    # Get best model
    best_hp = tuner.get_best_hyperparameters()[0]
    best_model = tuner.hypermodel.build(best_hp)

    return best_model, best_hp
Example #16
0
def find_best_NN(x_train, y_train):
  tuner = RandomSearch(build_model, objective="loss", max_trials=10, executions_per_trial=1)
  print("\n\n\n")
  print('[INFO] start searching')
  tuner.search(x_train, y_train, batch_size=100, epochs=10, validation_split=0.2)
  print("\n\n\nRESULTS SUMMARY")
  tuner.results_summary()
  print("\n\n\n")
  print("\n\n\nHERE IS THE BEST MODEL\n\n\n")
  best_params = tuner.get_best_hyperparameters()[0]
  best_model = tuner.hypermodel.build(best_params)
  best_model.summary()
  return best_model
Example #17
0
def run_fn(fn_args):

  tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)

  train_dataset = input_fn(fn_args.train_files, tf_transform_output, batch_size=100)
  eval_dataset = input_fn(fn_args.eval_files, tf_transform_output, batch_size=100)

  log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')
  tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq='batch')

  if True:
    print("Use normal Keras model")
    mirrored_strategy = tf.distribute.MirroredStrategy()
    with mirrored_strategy.scope():
      model = build_keras_model(None)
    model.fit(
        train_dataset,
        epochs=1,
        steps_per_epoch=fn_args.train_steps,
        validation_data=eval_dataset,
        validation_steps=fn_args.eval_steps,
        callbacks=[tensorboard_callback])
  else:
    print("Use normal Keras Tuner")
    tuner = RandomSearch(
        build_keras_model,
        objective='val_binary_accuracy',
        max_trials=5,
        executions_per_trial=3,
        directory=fn_args.serving_model_dir,
        project_name='tuner')
    tuner.search(
        train_dataset,
        epochs=1,
        steps_per_epoch=fn_args.train_steps, # or few steps to get best HP and then well fit
        validation_steps=fn_args.eval_steps,
        validation_data=eval_dataset,
        callbacks=[tensorboard_callback, tf.keras.callbacks.EarlyStopping()])
    tuner.search_space_summary()
    tuner.results_summary()
    best_hparams = tuner.oracle.get_best_trials(1)[0].hyperparameters.get_config()
    model = tuner.get_best_models(1)[0]

  signatures = {
      'serving_default': get_serve_tf_examples_fn(model, tf_transform_output).get_concrete_function(
          tf.TensorSpec(shape=[None],
                        dtype=tf.string,
                        name='input_example_tensor')),
  }

  model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
Example #18
0
def tuneReg(data,
            target,
            max_layers=10,
            min_layers=2,
            min_dense=32,
            max_dense=512,
            executions_per_trial=3,
            max_trials=1):
    print("entered1")

    # function build model using hyperparameter

    def build_model(hp):
        model = keras.Sequential()
        for i in range(hp.Int('num_layers', min_layers, max_layers)):
            model.add(
                layers.Dense(units=hp.Int('units_' + str(i),
                                          min_value=min_dense,
                                          max_value=max_dense,
                                          step=32),
                             activation='relu'))
        model.add(layers.Dense(1, activation='softmax'))
        model.compile(optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
                      loss='mean_squared_error')
        return model

    # random search for the model
    tuner = RandomSearch(build_model,
                         objective='loss',
                         max_trials=max_trials,
                         executions_per_trial=executions_per_trial)

    # tuner.search_space_summary()
    # del data[target]

    X_train, X_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        test_size=0.2,
                                                        random_state=49)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model
    tuner.search(X_train,
                 y_train,
                 epochs=5,
                 validation_data=(X_test, y_test),
                 callbacks=[tf.keras.callbacks.TensorBoard('my_dir')])

    models = tuner.get_best_models(num_models=1)
    return models[0]
Example #19
0
def keras_tuner(x_train, y_train, x_test, y_test):
    from kerastuner.tuners import RandomSearch
    tuner = RandomSearch(build_model,
                         objective='val_accuracy',
                         max_trials=5,
                         executions_per_trial=3,
                         directory='./test',
                         project_name='helloworld')

    tuner.search_space_summary()

    tuner.search(x_train, y_train, epochs=5, validation_data=(x_test, y_test))

    print(tuner.results_summary())
def search(
    epochs: int,
    n_trials: int,
    execution_per_trial: int,
    project: Text = "test",
    cleanup: bool = False,
):
    start_time = datetime.now()

    results_path = os.path.join(SEARCH_DIR, project)
    if cleanup and os.path.exists(results_path):
        shutil.rmtree(results_path)

    ds_tr = input_fn(
        "data/train_covertype.csv", shuffle=True, batch_size=DEFAULTS["batch_size"]
    )
    ds_val = input_fn(
        "data/val_covertype.csv", shuffle=False, batch_size=DEFAULTS["batch_size"]
    )

    num_train_steps = np.floor(N_TR_SAMPLES / DEFAULTS["batch_size"])
    num_valid_steps = np.floor(N_VAL_SAMPLES / DEFAULTS["batch_size"])

    # RandomSearch, BayesianOptimization
    tuner = RandomSearch(
        build_model,
        objective="val_loss",
        max_trials=n_trials,
        executions_per_trial=execution_per_trial,
        directory=SEARCH_DIR,
        project_name=project,
    )

    # tuner.search_space_summary()

    tuner.search(
        ds_tr,
        epochs=epochs,
        validation_data=ds_val,
        steps_per_epoch=num_train_steps,
        validation_steps=num_valid_steps,
    )

    # models = tuner.get_best_models(num_models=1)

    tuner.results_summary(num_trials=2)

    print(f"Total runtime: {(datetime.now() - start_time).seconds / 60:.2f} mins")
Example #21
0
def search_hp(neumf, dataset):

    trainset = tf.data.TFRecordDataset(
        join('datasets', dataset) + '.trainset.tfrecord').repeat(-1).map(
            parse_function).shuffle(batch_size).batch(batch_size).prefetch(
                tf.data.experimental.AUTOTUNE)
    testset = tf.data.TFRecordDataset(
        join('datasets', dataset) + '.testset.tfrecord').repeat(-1).map(
            parse_function).shuffle(batch_size).batch(batch_size).prefetch(
                tf.data.experimental.AUTOTUNE)
    tuner = RandomSearch(neumf,
                         objective='val_accuracy',
                         max_trials=100,
                         directory='my_dir',
                         project_name='neumf')
    tuner.search(trainset, epochs=5, validation_data=testset)
    neumf.save('neumf.h5')
Example #22
0
def find_best_NN(x_train, y_train):
  # создаю тюнер, который сможет подобрать оптимальную архитектуру модели
  tuner = RandomSearch(build_model, objective="val_mae", max_trials=40, executions_per_trial=1,)
  print("\n\n\n")
  # начинается автоматический подбор гиперпараметров
  print('[INFO] start searching')
  tuner.search(x_train, y_train, batch_size=500, epochs=150, validation_split=0.3)
  # выбираем лучшую модель
  print("\n\n\nRESULTS SUMMARY")
  tuner.results_summary()
  print("\n\n\n")
  # получаем лучшую модель
  print("\n\n\nHERE IS THE BEST MODEL\n\n\n")
  best_params = tuner.get_best_hyperparameters()[0]
  best_model = tuner.hypermodel.build(best_params)
  best_model.summary()
  return best_model
Example #23
0
def keras_hp_search(
    model_dir,
    epochs = 3,
    dataset_source: DATASET_SOURCE_TYPE = DATASET_SOURCE_TYPE.gcs,
    dataset_size: DATASET_SIZE_TYPE = DATASET_SIZE_TYPE.tiny,
    embeddings_mode: EMBEDDINGS_MODE_TYPE = EMBEDDINGS_MODE_TYPE.hashbucket,
    distribution_strategy: DistributionStrategyType = None):

    def build_model(hp):
        feature_columns = create_feature_columns(embeddings_mode)
        feature_layer = tf.keras.layers.DenseFeatures(feature_columns, name="feature_layer")
        Dense = tf.keras.layers.Dense
        kernel_regularizer=tf.keras.regularizers.l2(0.001)
        model = tf.keras.Sequential()
        model.add(feature_layer)
        model.add(Dense(hp.Choice('layer1', values=[50, 100, 200]), activation=tf.nn.relu, kernel_regularizer=kernel_regularizer)),
        model.add(Dense(hp.Choice('layer2', values=[50, 100, 200]), activation=tf.nn.relu, kernel_regularizer=kernel_regularizer)),
        model.add(Dense(1, activation=tf.nn.sigmoid, kernel_regularizer=kernel_regularizer))

        logging.info('compiling sequential keras model')
        # Compile Keras model
        model.compile(
          optimizer=tf.optimizers.SGD(learning_rate=0.05),
          loss=tf.keras.losses.BinaryCrossentropy(),
          metrics=['accuracy'])
        return model

    training_ds = criteo_nbdev.data_reader.get_dataset(dataset_source, dataset_size, DATASET_TYPE.training, embeddings_mode).repeat(epochs)
    eval_ds = criteo_nbdev.data_reader.get_dataset(dataset_source, dataset_size, DATASET_TYPE.validation, embeddings_mode).repeat(epochs)

    tuner = RandomSearch(
        build_model,
        objective='val_loss',
        max_trials=30,
        executions_per_trial=1,
        directory=model_dir)

    tuner.search_space_summary()
    tuner.search(training_ds,
                 validation_data=eval_ds,
                 epochs=3,
                 verbose=2)
Example #24
0
def KerasTuner(XTrain, YTrain, XValidation, YValidation):
    tuner = RandomSearch(buildModel,
                         objective='mse',
                         max_trials=30,
                         executions_per_trial=10,
                         directory='KerasTuner',
                         project_name=f'KerasTuner-{constants.NAME}')

    tuner.search_space_summary()

    tuner.search(XTrain,
                 YTrain,
                 epochs=5,
                 validation_data=(XValidation, YValidation))

    models = tuner.get_best_models(num_models=1)

    tuner.results_summary()

    return models
Example #25
0
def train_models(x_train, x_test, y_train, y_test, model_name, epochs,
                 batch_size, params):
    # Get the class object from the models file and create instance
    model = getattr(models, model_name)(**params)
    tuner = RandomSearch(
        model,
        objective=kerastuner.Objective("val_f1_m", direction="max"),
        max_trials=5,
        executions_per_trial=1,
        directory='random_search',
        project_name='sentiment_analysis_' + str(model_name),
        distribution_strategy=tf.distribute.MirroredStrategy())
    tuner.search_space_summary()
    tuner.search(x_train,
                 to_categorical(y_train),
                 epochs=epochs,
                 validation_data=(x_test, to_categorical(y_test)))
    return tuner.get_best_models(
        num_models=1)[0], tuner.oracle.get_best_trials(
            num_trials=1)[0].hyperparameters
Example #26
0
    def random_search(self):
        tuner = RandomSearch(
            self.build_model,
            'mean_squared_error',
            self.max_trials,  # more than 2 and it crashes
            overwrite=True,
            directory=self.kt_dir
            # executions_per_trial=self.max_executions_per,
            # project_name=name
        )
        # try:
        tuner.search(x=self.data,
                     y=self.train_labels,
                     epochs=self.epochs,
                     batch_size=self.batch_size,
                     validation_data=(self.test_data, self.test_labels))
        # except ValueError:
        #     print('error')

        return tuner
Example #27
0
def tune():

    tuner = RandomSearch(tuner_model,
                         objective="val_accuracy",
                         max_trials=100,
                         executions_per_trial=1,
                         directory=LOG_DIR,
                         project_name='final_year_project')

    tuner.search(x=x_train,
                 y=y_train,
                 epochs=3,
                 batch_size=64,
                 validation_data=(x_test, y_test))

    with open("tuner.pkl", "wb") as f:
        pickle.dump(tuner, f)

    tuner = pickle.load(open("tuner.pkl", "rb"))

    print(tuner.get_best_hyperparameters()[0].values)
    print(tuner.results_summary())
    print(tuner.get_best_models()[0].summary())
Example #28
0
def run_tuner(hypermodel, hp):
    # load dataset
    train_dataset, test_dataset = load_data()

    # init tensorboard here so each run will have folder,
    # which we can rename based on trial_id
    tb_callback = get_tensorboard(TUNER_SETTINGS['log_dir'])

    tuner = RandomSearch(
        hypermodel,
        objective=TUNER_SETTINGS['objective'],
        max_trials=TUNER_SETTINGS['max_trials'],
        metrics=['accuracy'],
        loss='sparse_categorical_crossentropy',
        hyperparameters=hp,
        executions_per_trial=TUNER_SETTINGS['executions_per_trial'],
        directory=TUNER_SETTINGS['log_dir'],
        project_name=project_name)

    tuner.search(train_dataset,
                 validation_data=test_dataset,
                 batch_size=TUNER_SETTINGS['batch_size'],
                 callbacks=TUNER_SETTINGS['callbacks'] + [tb_callback],
                 epochs=TUNER_SETTINGS['epochs'])
Example #29
0
    def createModel(self):
        self.model_instance += 1
        clear_session()

        features, label = self.getDataset()
        X_train, y_train = self.createLag(features, label)
        X_train = X_train[:, self.lags]

        learning_rate = float(self.hyperparameters["Learning_Rate"].get())
        momentum = float(self.hyperparameters["Momentum"].get())
        optimizers = {
                "Adam": Adam(learning_rate=learning_rate),
                "SGD": SGD(learning_rate=learning_rate, momentum=momentum),
                "RMSprop": RMSprop(learning_rate=learning_rate, momentum=momentum)
                }

        shape = (X_train.shape[1], X_train.shape[2])
        model_choice = self.model_var.get()

        if not self.do_optimization:
            model = Sequential()
            model.add(Input(shape=shape))
            
            if model_choice == 0:
                model.add(Flatten())

            layers = self.no_optimization_choice_var.get()
            for i in range(layers):
                neuron_number = self.neuron_numbers_var[i].get()
                activation_function = self.activation_var[i].get()
                if model_choice == 0:
                    model.add(Dense(neuron_number, activation=activation_function, kernel_initializer=GlorotUniform(seed=0)))
                
                elif model_choice == 1:
                    model.add(Conv1D(filters=neuron_number, kernel_size=2, activation=activation_function, kernel_initializer=GlorotUniform(seed=0)))
                    model.add(MaxPooling1D(pool_size=2))
                
                elif model_choice == 2:
                    if i == layers-1:
                        model.add(LSTM(neuron_number, activation=activation_function, return_sequences=False, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))
                    else:
                        model.add(LSTM(neuron_number, activation=activation_function, return_sequences=True, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))

                elif model_choice == 3:
                    if i == layers-1:
                        model.add(Bidirectional(LSTM(neuron_number, activation=activation_function, return_sequences=False, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0))))
                        model.add(Dropout(0.2))
                    else:
                        model.add(Bidirectional(LSTM(neuron_number, activation=activation_function, return_sequences=True, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0))))
                        model.add(Dropout(0.2))

                elif model_choice == 4:
                    if i == layers-1:
                        model.add(SimpleRNN(neuron_number, activation=activation_function, return_sequences=False, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))
                    else:
                        model.add(SimpleRNN(neuron_number, activation=activation_function, return_sequences=True, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))
                
                elif model_choice == 5:
                    if i == layers-1:
                        model.add(GRU(neuron_number, activation=activation_function, return_sequences=False, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))
                    else:
                        model.add(GRU(neuron_number, activation=activation_function, return_sequences=True, kernel_initializer=GlorotUniform(seed=0), recurrent_initializer=Orthogonal(seed=0)))
                        model.add(Dropout(0.2))
            
            if model_choice == 1:
                model.add(Flatten())
                model.add(Dense(32, kernel_initializer=GlorotUniform(seed=0)))

            model.add(Dense(1, activation=self.output_activation.get(), kernel_initializer=GlorotUniform(seed=0)))
            model.compile(optimizer = optimizers[self.hyperparameters["Optimizer"].get()], loss=self.hyperparameters["Loss_Function"].get())
            
            history = model.fit(X_train, y_train, epochs=self.hyperparameters["Epoch"].get(), batch_size=self.hyperparameters["Batch_Size"].get(), verbose=1, shuffle=False)
            loss = history.history["loss"][-1]
            self.train_loss.set(loss)

        elif self.do_optimization:
            layer = self.optimization_choice_var.get()

            if model_choice == 0:
                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    model.add(Flatten())
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max - n_min)/4)
                        model.add(Dense(units=hp.Int('MLP_'+str(i), min_value=n_min, max_value=n_max, step=step), activation='relu'))
                    model.add(Dense(1))
                    model.compile(optimizer = optimizers[self.hyperparameters["Optimizer"].get()], loss=self.hyperparameters["Loss_Function"].get())
                    return model
                
                name = str(self.model_instance) + ". MLP"

            elif model_choice == 1:
                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max-n_min)/4)
                        model.add(Conv1D(filters=hp.Int("CNN_"+str(i), min_value=n_min, max_value=n_max, step=step), kernel_size=2, activation="relu", kernel_initializer=GlorotUniform(seed=0)))
                        model.add(MaxPooling1D(pool_size=2))
                    
                    model.add(Flatten())
                    model.add(Dense(32, kernel_initializer=GlorotUniform(seed=0)))
                    model.add(Dense(1, kernel_initializer=GlorotUniform(seed=0)))
                    model.compile(optimizer = optimizers[self.hyperparameters["Optimizer"].get()], loss=self.hyperparameters["Loss_Function"].get())
                    return model
                
                name = str(self.model_instance) + ". CNN"

            elif model_choice == 2:
                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max - n_min)/4)
                        model.add(LSTM(units=hp.Int("LSTM_"+str(i), min_value=n_min, max_value=n_max, step=step), activation='relu', return_sequences=True, kernel_initializer=GlorotUniform(seed=0)))
                        if i == layer-1:
                            model.add(LSTM(units=hp.Int("LSTM_"+str(i), min_value=n_min, max_value=n_max, step=step), activation='relu', return_sequences=False, kernel_initializer=GlorotUniform(seed=0)))
                    
                    model.add(Dense(1))
                    model.compile(optimizer = optimizers[self.hyperparameters["Optimizer"].get()], loss=self.hyperparameters["Loss_Function"].get())
                    return model
                
                name = str(self.model_instance) + ". LSTM"
            
            elif model_choice == 3:
                def build_model(hp):
                    model = Sequential()
                    model.add(Input(shape=shape))
                    for i in range(layer):
                        n_min = self.neuron_min_number_var[i].get()
                        n_max = self.neuron_max_number_var[i].get()
                        step = int((n_max - n_min)/4)
                        model.add(Bidirectional(LSTM(units=hp.Int("LSTM_"+str(i), min_value=n_min, max_value=n_max, step=step), activation='relu', return_sequences=True, kernel_initializer=GlorotUniform(seed=0))))
                        if i == layer-1:
                            model.add(Bidirectional(LSTM(units=hp.Int("LSTM_"+str(i), min_value=n_min, max_value=n_max, step=step), activation='relu', return_sequences=False, kernel_initializer=GlorotUniform(seed=0))))
                    
                    model.add(Dense(1, kernel_initializer=GlorotUniform(seed=0)))
                    model.compile(optimizer = optimizers[self.hyperparameters["Optimizer"].get()], loss=self.hyperparameters["Loss_Function"].get())
                    return model

                name = str(self.model_instance) + ". Bi-LSTM"


            tuner = RandomSearch(build_model, objective='loss', max_trials=25, executions_per_trial=2, directory=self.runtime, project_name=name)
            
            tuner.search(X_train, y_train, epochs=self.hyperparameters["Epoch"].get(), batch_size=self.hyperparameters["Batch_Size"].get())
            hps = tuner.get_best_hyperparameters(num_trials = 1)[0]
            model = tuner.hypermodel.build(hps)
            
            history = model.fit(X_train, y_train, epochs=self.hyperparameters["Epoch"].get(), batch_size=self.hyperparameters["Batch_Size"].get(), verbose=1)
            loss = history.history["loss"][-1]
            self.train_loss.set(loss)
            

            for i in range(layer):
                if model_choice == 0:
                    self.best_model_neurons[i].set(model.get_layer(index=i+1).get_config()["units"])
                elif model_choice == 1:
                    self.best_model_neurons[i].set(model.get_layer(index=(2*i)).get_config()["filters"])
                elif model_choice == 2:
                    self.best_model_neurons[i].set(model.get_layer(index=i).get_config()["units"])
                elif model_choice == 3:
                    self.best_model_neurons[i].set(model.get_layer(index=i).get_config()["layer"]["config"]["units"])
        model.summary()
        self.model = model
    model.compile(optimizer=keras.optimizers.Adam(
        hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    return model


tuner = RandomSearch(build_model,
                     objective='val_accuracy',
                     max_trials=5,
                     directory='output',
                     project_name='models')

# Search for the best model
tuner.search(train_images, train_labels, epochs=3, validation_split=0.1)

model = tuner.get_best_models(num_models=1)[0]

model.summary()

# Train the data with the best model
model.fit(train_images,
          train_labels,
          batch_size=100,
          epochs=10,
          validation_split=0.1,
          initial_epoch=3)

# Check the test data
m = 0