Esempio n. 1
0
def CNN_Hyper():
    training_set = tf.keras.preprocessing.image_dataset_from_directory(
        DATA_PATH + "processed/training",
        seed=957,
        image_size=IMAGE_SIZE,
        batch_size=BATCH_SIZE,
    )
    validation_set = tf.keras.preprocessing.image_dataset_from_directory(
        DATA_PATH + "processed/validation",
        seed=957,
        image_size=IMAGE_SIZE,
        batch_size=BATCH_SIZE,
    )

    test_set = tf.keras.preprocessing.image_dataset_from_directory(
        DATA_PATH + "processed/testing",
        seed=957,
        image_size=IMAGE_SIZE,
        batch_size=BATCH_SIZE,
    )

    training_set = training_set.prefetch(buffer_size=32)
    validation_set = validation_set.prefetch(buffer_size=32)

    hyperModel = CNNHyperModel(IMAGE_SIZE + (3, ), CLASS_COUNT, "softmax")

    MAX_TRIALS = 20
    EXECUTION_PER_TRIAL = 1
    N_EPOCH_SEARCH = 25

    tuner = RandomSearch(hyperModel,
                         objective='val_accuracy',
                         seed=957,
                         max_trials=MAX_TRIALS,
                         executions_per_trial=EXECUTION_PER_TRIAL,
                         directory='random_search',
                         project_name='Stanford-Dogs-40_1')

    tuner.search_space_summary()

    tuner.search(training_set,
                 epochs=N_EPOCH_SEARCH,
                 validation_data=validation_set)

    # Show a summary of the search
    tuner.results_summary()

    # Retrieve the best model.
    best_model = tuner.get_best_models(num_models=1)[0]

    # Evaluate the best model.
    loss, accuracy = best_model.evaluate(test_set)
    print("Loss: ", loss)
    print("Accuracy: ", accuracy)
    best_model.summary()
    # Save model
    best_model.save('CNN_Tuned_Best_Model')


# https://www.sicara.ai/blog/hyperparameter-tuning-keras-tuner
Esempio n. 2
0
def build_model(X_train, Y_train, X_test, Y_test):
    hyperModel = RegressionHyperModel((X_train.shape[1], ))

    tuner_rs = RandomSearch(hyperModel,
                            objective='mse',
                            max_trials=135,
                            executions_per_trial=1,
                            directory='param_opt_checkouts',
                            project_name='GDW')
    tuner_rs.search(X_train,
                    Y_train,
                    validation_data=(X_test, Y_test),
                    epochs=160)
    best_model = tuner_rs.get_best_models(num_models=1)[0]

    #metrics = ['loss', 'mse', 'mae', 'mape', 'cosine_proximity']
    #_eval = best_model.evaluate(X_test, Y_test)
    #print(_eval)
    #for i in range(len(metrics)):
    #    print(f'{metrics[i]} : {_eval[i]}')

    # history = best_model.fit(X_train, Y_train, validation_data = (X_test, Y_test), epochs=50)

    # best_model.save('./models_ANN/best_model')

    # save_model(best_model)
    tuner_rs.results_summary()
    print(load_model().summary())
    predict(best_model)
Esempio n. 3
0
def tuneCNN(
        X_train,
        X_test,
        height,
        width,
        num_classes,
        patience=1,
        executions_per_trial=1,
        seed=42,
        max_trials=3,
        objective='val_accuracy',
        directory='my_dir',
        epochs=10,
        verbose=0,
        test_size=0.2):
    # creates hypermodel object based on the num_classes and the input shape
    hypermodel = CNNHyperModel(input_shape=(
        height, width, 3), num_classes=num_classes)

    # # tuners, establish the object to look through the tuner search space
    tuner = RandomSearch(
        hypermodel,
        objective=objective,
        seed=seed,
        max_trials=max_trials,
        executions_per_trial=executions_per_trial,
        directory=directory,
    )


    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model

    tuner.search(X_train,
                 validation_data=X_test,
                 callbacks=[tf.keras.callbacks.EarlyStopping(patience=patience)],
                 epochs=epochs,
                 verbose=verbose)

    # best hyperparamters
    hyp = tuner.get_best_hyperparameters(num_trials=1)[0]
    #hyp = tuner.oracle.get_best_trials(num_trials=1)[0].hyperparameters.values
    #best_hps = np.stack(hyp).astype(None)
    history = tuner_hist(
        X_train,
        X_test,
        tuner,
        hyp,
        img=1,
        epochs=epochs,
        verbose=verbose,
        test_size=test_size)

    """
    Return:
        models[0] : best model obtained after tuning
        best_hps : best Hyperprameters obtained after tuning, stored as array
        history : history of the data executed from the given model
    """
    return tuner.get_best_models(1)[0], hyp, history
 def search_bestCNN(self,
                    X,
                    Y,
                    testX,
                    testY,
                    epochs=50,
                    max_trails=20,
                    batch_size=64,
                    project_name='A1'):
     tuner = RandomSearch(self._build_CNN,
                          objective='val_accuracy',
                          max_trials=max_trails,
                          executions_per_trial=1,
                          directory='tunerlog',
                          project_name=project_name)
     tuner.search(x=X,
                  y=Y,
                  epochs=epochs,
                  batch_size=batch_size,
                  validation_data=(testX, testY),
                  callbacks=[
                      tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                       patience=5)
                  ],
                  verbose=2)
     tuner.search_space_summary()
     print(tuner.results_summary())
     print('best_hyperparameters')
     print(tuner.get_best_hyperparameters()[0].values)
     return tuner.get_best_models()
Esempio n. 5
0
def tuneCNN(X, y, num_classes):

    # creates hypermodel object based on the num_classes and the input shape
    hypermodel = CNNHyperModel(input_shape=(224, 224, 3),
                               num_classes=num_classes)

    # tuners, establish the object to look through the tuner search space
    tuner = RandomSearch(
        hypermodel,
        objective='val_accuracy',
        seed=42,
        max_trials=3,
        executions_per_trial=3,
        directory='random_search',
    )

    X_train, X_test, y_train, y_test = train_test_split(np.asarray(X),
                                                        np.asarray(y),
                                                        test_size=0.33,
                                                        random_state=42)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model
    tuner.search(X_train,
                 y_train,
                 validation_data=(X_test, y_test),
                 callbacks=[tf.keras.callbacks.EarlyStopping(patience=1)])

    # returns the best model
    return tuner.get_best_models(1)[0]
Esempio n. 6
0
    def _fit(self, X_train, y_train, X_test, y_test, X_val, y_val):
        tuner = RandomSearch(self._build_model,
                             objective='val_accuracy',
                             max_trials=self.max_trials,
                             executions_per_trial=1,
                             directory='logs/keras-tuner/',
                             project_name='cnn')

        tuner.search_space_summary()

        tuner.search(x=X_train,
                     y=y_train,
                     epochs=self.epochs,
                     batch_size=self.batch_size,
                     verbose=0,
                     validation_data=(X_val, y_val),
                     callbacks=[EarlyStopping('val_accuracy', patience=4)])
        print('kakkanat\n\n\n\n\n\n')
        print(tuner.results_summary())
        model = tuner.get_best_models(num_models=1)[0]
        print(model.summary())

        # Evaluate Best Model #
        _, train_acc = model.evaluate(X_train, y_train, verbose=0)
        _, test_acc = model.evaluate(X_test, y_test, verbose=0)
        print('Train: %.3f, Test: %.3f' % (train_acc, test_acc))
    def tune(self):
        """ TODO: actually this should be def tune(..) - will have to reoncile/fix the nomentalures at some point """
        tuner = RandomSearch(self.model,
                             objective='val_accuracy',
                             max_trials=50,
                             executions_per_trial=2,
                             directory='my_dir',
                             project_name='helloworld')

        # proprocess the data
        data = self.data_reduced
        LOG.info(f"BUILD: data from keras: {data.keys()}")

        x = data["x_train"]
        y = data["y_train"]
        x_val = data["x_test"]
        y_val = data["y_test"]

        print(f"label shapes: {y.shape} {y_val.shape}")
        # call the tuner on that

        try:
            tuner.search(x, y, epochs=3, validation_data=(x_val, y_val))
        except Exception as ex:
            LOG.error(f"Failed to tuner.search: {ex}")
        models = tuner.get_best_models(num_models=2)

        # self.tuner_model.fit(data["x_train"],data["y_train"])
        LOG.info(f"Finished tuning model. Summary:")
Esempio n. 8
0
def main():

    dataset = makeHistoricalData(fixed_data, temporal_data, h, r, 'death',
                                 'mrmr', 'country', 'regular')

    numberOfSelectedCounties = len(dataset['county_fips'].unique())
    new_dataset = clean_data(dataset, numberOfSelectedCounties)
    X_train, y_train, X_val, y_val, X_test, y_test, y_train_date, y_test_date, y_val_date, val_naive_pred, test_naive_pred = preprocess(
        new_dataset)
    X_train, y_train, X_val, y_val, X_test, y_test, scalar = data_normalize(
        X_train, y_train, X_val, y_val, X_test, y_test)

    hypermodel = LSTMHyperModel(n=X_train.shape[2])

    tuner = RandomSearch(hypermodel,
                         objective='mse',
                         seed=1,
                         max_trials=60,
                         executions_per_trial=4,
                         directory='parameter_tuning',
                         project_name='lstm_model_tuning')

    tuner.search_space_summary()

    print()
    input("Press Enter to continue...")
    print()

    N_EPOCH_SEARCH = 50
    tuner.search(X_train, y_train, epochs=N_EPOCH_SEARCH, validation_split=0.2)

    print()
    input("Press Enter to show the summary of search...")
    print()

    # Show a summary of the search
    tuner.results_summary()

    print()
    input("Press Enter to retrive the best model...")
    print()

    # Retrieve the best model.
    best_model = tuner.get_best_models(num_models=1)[0]

    print()
    input("Press Enter to show best model summary...")
    print()

    best_model.summary()

    print()
    input("Press Enter to run the best model on test dataset...")
    print()

    # Evaluate the best model.
    loss, accuracy = best_model.evaluate(X_test, y_test)
    print("loss = " + str(loss) + ", acc = " + str(accuracy))
Esempio n. 9
0
def tuneClass(X,
              y,
              num_classes,
              max_layers=10,
              min_layers=2,
              min_dense=32,
              max_dense=512,
              executions_per_trial=3,
              max_trials=1,
              activation='relu',
              loss='categorical_crossentropy',
              metrics='accuracy'):
    # function build model using hyperparameter
    le = preprocessing.LabelEncoder()
    y = tf.keras.utils.to_categorical(le.fit_transform(y),
                                      num_classes=num_classes)

    def build_model(hp):
        model = keras.Sequential()
        for i in range(hp.Int('num_layers', min_layers, max_layers)):
            model.add(
                layers.Dense(units=hp.Int('units_' + str(i),
                                          min_value=min_dense,
                                          max_value=max_dense,
                                          step=32),
                             activation=activation))
        model.add(layers.Dense(num_classes, activation='softmax'))
        model.compile(optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
                      loss=loss,
                      metrics=[metrics])
        return model

    # tuners, establish the object to look through the tuner search space
    tuner = RandomSearch(build_model,
                         objective='loss',
                         max_trials=max_trials,
                         executions_per_trial=executions_per_trial,
                         directory='models',
                         project_name='class_tuned')

    # tuner.search_space_summary()

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=49)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model
    tuner.search(X_train, y_train, epochs=5, validation_data=(X_test, y_test))
    models = tuner.get_best_models(num_models=1)
    return models[0]
Esempio n. 10
0
def tuneReg(data,
            target,
            max_layers=10,
            min_layers=2,
            min_dense=32,
            max_dense=512,
            executions_per_trial=3,
            max_trials=1):
    print("entered1")

    # function build model using hyperparameter

    def build_model(hp):
        model = keras.Sequential()
        for i in range(hp.Int('num_layers', min_layers, max_layers)):
            model.add(
                layers.Dense(units=hp.Int('units_' + str(i),
                                          min_value=min_dense,
                                          max_value=max_dense,
                                          step=32),
                             activation='relu'))
        model.add(layers.Dense(1, activation='softmax'))
        model.compile(optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
                      loss='mean_squared_error')
        return model

    # random search for the model
    tuner = RandomSearch(build_model,
                         objective='loss',
                         max_trials=max_trials,
                         executions_per_trial=executions_per_trial)

    # tuner.search_space_summary()
    # del data[target]

    X_train, X_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        test_size=0.2,
                                                        random_state=49)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model
    tuner.search(X_train,
                 y_train,
                 epochs=5,
                 validation_data=(X_test, y_test),
                 callbacks=[tf.keras.callbacks.TensorBoard('my_dir')])

    models = tuner.get_best_models(num_models=1)
    return models[0]
Esempio n. 11
0
def run_fn(fn_args):

  tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)

  train_dataset = input_fn(fn_args.train_files, tf_transform_output, batch_size=100)
  eval_dataset = input_fn(fn_args.eval_files, tf_transform_output, batch_size=100)

  log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')
  tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq='batch')

  if True:
    print("Use normal Keras model")
    mirrored_strategy = tf.distribute.MirroredStrategy()
    with mirrored_strategy.scope():
      model = build_keras_model(None)
    model.fit(
        train_dataset,
        epochs=1,
        steps_per_epoch=fn_args.train_steps,
        validation_data=eval_dataset,
        validation_steps=fn_args.eval_steps,
        callbacks=[tensorboard_callback])
  else:
    print("Use normal Keras Tuner")
    tuner = RandomSearch(
        build_keras_model,
        objective='val_binary_accuracy',
        max_trials=5,
        executions_per_trial=3,
        directory=fn_args.serving_model_dir,
        project_name='tuner')
    tuner.search(
        train_dataset,
        epochs=1,
        steps_per_epoch=fn_args.train_steps, # or few steps to get best HP and then well fit
        validation_steps=fn_args.eval_steps,
        validation_data=eval_dataset,
        callbacks=[tensorboard_callback, tf.keras.callbacks.EarlyStopping()])
    tuner.search_space_summary()
    tuner.results_summary()
    best_hparams = tuner.oracle.get_best_trials(1)[0].hyperparameters.get_config()
    model = tuner.get_best_models(1)[0]

  signatures = {
      'serving_default': get_serve_tf_examples_fn(model, tf_transform_output).get_concrete_function(
          tf.TensorSpec(shape=[None],
                        dtype=tf.string,
                        name='input_example_tensor')),
  }

  model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
Esempio n. 12
0
def KerasTuner(XTrain, YTrain, XValidation, YValidation):
    tuner = RandomSearch(buildModel,
                         objective='mse',
                         max_trials=30,
                         executions_per_trial=10,
                         directory='KerasTuner',
                         project_name=f'KerasTuner-{constants.NAME}')

    tuner.search_space_summary()

    tuner.search(XTrain,
                 YTrain,
                 epochs=5,
                 validation_data=(XValidation, YValidation))

    models = tuner.get_best_models(num_models=1)

    tuner.results_summary()

    return models
Esempio n. 13
0
def train_models(x_train, x_test, y_train, y_test, model_name, epochs,
                 batch_size, params):
    # Get the class object from the models file and create instance
    model = getattr(models, model_name)(**params)
    tuner = RandomSearch(
        model,
        objective=kerastuner.Objective("val_f1_m", direction="max"),
        max_trials=5,
        executions_per_trial=1,
        directory='random_search',
        project_name='sentiment_analysis_' + str(model_name),
        distribution_strategy=tf.distribute.MirroredStrategy())
    tuner.search_space_summary()
    tuner.search(x_train,
                 to_categorical(y_train),
                 epochs=epochs,
                 validation_data=(x_test, to_categorical(y_test)))
    return tuner.get_best_models(
        num_models=1)[0], tuner.oracle.get_best_trials(
            num_trials=1)[0].hyperparameters
Esempio n. 14
0
def tune():

    tuner = RandomSearch(tuner_model,
                         objective="val_accuracy",
                         max_trials=100,
                         executions_per_trial=1,
                         directory=LOG_DIR,
                         project_name='final_year_project')

    tuner.search(x=x_train,
                 y=y_train,
                 epochs=3,
                 batch_size=64,
                 validation_data=(x_test, y_test))

    with open("tuner.pkl", "wb") as f:
        pickle.dump(tuner, f)

    tuner = pickle.load(open("tuner.pkl", "rb"))

    print(tuner.get_best_hyperparameters()[0].values)
    print(tuner.results_summary())
    print(tuner.get_best_models()[0].summary())
Esempio n. 15
0
    return model



tuner = RandomSearch(
    build_model,
    objective='val_accuracy',
    max_trials=1,  # how many model variations to test?
    executions_per_trial=1,  # how many trials per variation? (same model could perform differently)
    directory=LOG_DIR)


tuner.search(x=x_train,
             y=y_train,
             verbose=2, # just slapping this here bc jupyter notebook. The console out was getting messy.
             epochs=1,
             batch_size=64,
             #callbacks=[tensorboard],  # if you have callbacks like tensorboard, they go here.
             validation_data=(x_test, y_test))


tuner.results_summary()
tuner.get_best_hyperparameters()[0].values
tuner.get_best_models()[0].summary()

with open(f"tuner_{int(time.time())}.pkl", "wb") as f:
    pickle.dump(tuner, f)


#TO LOAD DATA
#tuner = pickle.load(open("tuner_1576628824.pkl","rb"))
Esempio n. 16
0
    def buttonClicked1(self):

        train_dir = 'train'
        val_dir = 'val'
        test_dir = 'test'
        img_width, img_height = 150, 150
        input_shape = (img_width, img_height, 3)

        epochs = self.InputEpochs.value()
        Nclasses = self.InputClass.value()
        batch_size = self.InputBatch.value()
        nb_train_samples = self.InputTrain.value()
        nb_validation_samples = self.InputValidation.value()
        nb_test_samples = self.InputTest.value()

        l=0

        def build_model(hp):  
            model = Sequential()

            num_hidden_layers = hp.Int('num_hidden_layers', 1, 3, default=1)
            num_conv_layers = hp.Int('num_conv_layers', 2, 6, default=2)

            model.add(Conv2D(32, (3, 3), input_shape=input_shape))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            for i in range(num_conv_layers):
                filters = hp.Int('filters'+str(i), 32, 64, step=16)
                model.add(Conv2D(filters,(3, 3)))
                model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Conv2D(128, (3, 3)))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Flatten())
    
            for j in range(num_hidden_layers):
                model.add(Dense(units=hp.Int('units_hiddenNeurons_'+str(j),
                                             min_value=128,
                                             max_value=1024,
                                             step=64),
                                activation=hp.Choice('activation'+str(j),values=['relu','tanh','elu','selu'])))

            model.add(Dropout(0.5))
            model.add(Dense(Nclasses))
            model.add(Activation('softmax'))
            model.compile(
                loss='categorical_crossentropy',
                optimizer=hp.Choice('optimizer', values=['adam','rmsprop','SGD'],default='adam'),
                metrics=['accuracy'])
            return model

        tuner = RandomSearch(
            build_model,
            objective='val_accuracy',
            max_trials=15,
            directory='test_directory')

        tuner.search_space_summary()

        datagen = ImageDataGenerator(rescale=1. / 255)

        train_generator = datagen.flow_from_directory(
            train_dir,
            target_size=(img_width, img_height),
            batch_size=batch_size,
            class_mode='categorical')

        val_generator = datagen.flow_from_directory(
            val_dir,
            target_size=(img_width, img_height),
            batch_size=batch_size,
            class_mode='categorical')

        test_generator = datagen.flow_from_directory(
            test_dir,
            target_size=(img_width, img_height),
            batch_size=batch_size,
            class_mode='categorical')

        tuner.search(
            train_generator,
            steps_per_epoch=nb_train_samples // batch_size,
            epochs=epochs,
            validation_data=val_generator,
            validation_steps=nb_validation_samples // batch_size)

        tuner.results_summary()

        models = tuner.get_best_models(num_models=3)

        for model in models:
            model.summary()
            l=l+1
            scores = model.evaluate_generator(test_generator, nb_test_samples // batch_size)
            model.save('bestmodel_'+str(l)+'.h5')
            print("Аккуратность на тестовых данных: %.2f%%" % (scores[1]*100))
def train_model(config, approach, input_window_size, samples,
                max_nr_dense_hidden_layer, max_nr_lstm_units,
                max_nr_dense_hidden_layer_neurons):
    n_features = samples[0][0].shape[2]

    def build_model(hp):
        model = Sequential()
        # input LSTM layer
        model.add(
            LSTM(hp.Int("input_units",
                        min_value=5,
                        max_value=max_nr_lstm_units,
                        step=3),
                 activation="relu",
                 input_shape=(input_window_size, n_features),
                 name="lstm"))

        # dense hidden layers with dropouts
        for i in range(hp.Int("n_layers", 0, max_nr_dense_hidden_layer)):
            model.add(
                Dense(hp.Int(f"dense_{i}_units",
                             min_value=5,
                             max_value=max_nr_dense_hidden_layer_neurons,
                             step=3),
                      activation="relu",
                      name=f"hidden_{i}"))
            for j in range(hp.Int("dropout_layers", 0, 1)):
                model.add(
                    Dropout(hp.Choice(
                        f"drop_rate_{j}",
                        values=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]),
                            name=f"dropout_{i}{j}"))

        # dense output layer
        model.add(Dense(1, name="output"))
        model.compile(loss="mean_squared_error",
                      optimizer=Adam(learning_rate=hp.Choice(
                          "learning_rate", values=[1e-2, 1e-3, 1e-4])),
                      metrics=[metrics.mae])
        return model

    # training & hyperparameter tuning
    LOG_DIR = f"{int((time.time()))}"

    tuner = RandomSearch(
        build_model,
        objective="val_mean_absolute_error",  # evtl. RMSE
        max_trials=5,
        executions_per_trial=3,
        directory=f'{config.models}{approach}_{input_window_size}/tuning_logs',
        project_name=LOG_DIR)

    tuner.search(x=samples[0][0],
                 y=samples[0][1],
                 epochs=100,
                 batch_size=32,
                 validation_data=(samples[1][0], samples[1][1]),
                 callbacks=[EarlyStopping('val_loss', patience=5)])

    # get best model
    model = tuner.get_best_models(num_models=1)[0]
    model.save(f"{config.models}{approach}_{input_window_size}")

    return model
Esempio n. 18
0
def search(
    epochs: int,
    batch_size: int,
    n_trials: int,
    execution_per_trial: int,
    project: Text,
    do_cleanup: bool,
):
    set_seed(SEED)

    dir_to_clean = os.path.join(SEARCH_DIR, project)
    if do_cleanup and os.path.exists(dir_to_clean):
        shutil.rmtree(dir_to_clean)

    # first 80% for train. remaining 20% for val & test dataset for final eval.
    ds_tr, ds_val, ds_test = tfds.load(
        name="mnist",
        split=["train[:80%]", "train[-20%:]", "test"],
        data_dir="mnist",
        shuffle_files=False,
    )

    ds_tr = prepare_dataset(ds_tr,
                            batch_size,
                            shuffle=True,
                            drop_remainder=True)
    ds_val = prepare_dataset(ds_val,
                             batch_size,
                             shuffle=False,
                             drop_remainder=False)
    ds_test = prepare_dataset(ds_test,
                              batch_size,
                              shuffle=False,
                              drop_remainder=False)

    tuner = RandomSearch(
        build_model,
        objective="val_accuracy",
        max_trials=n_trials,
        executions_per_trial=execution_per_trial,
        directory=SEARCH_DIR,
        project_name=project,
    )

    # ? add callbacks
    tuner.search(
        ds_tr,
        epochs=epochs,
        validation_data=ds_val,
    )

    best_model: tf.keras.Model = tuner.get_best_models(num_models=1)[0]
    best_model.build((None, DEFAULTS["num_features"]))
    results = best_model.evaluate(ds_test, return_dict=True)

    tuner.results_summary(num_trials=1)
    best_hyperparams = tuner.get_best_hyperparameters(num_trials=1)
    print(f"Test results: {results}")

    output = {"results": results, "best_hyperparams": best_hyperparams}
    with open("search_results.pickle", "wb") as f:
        pickle.dump(output, f)
Esempio n. 19
0
    return model


tuner = RandomSearch(tune_model,
                     objective='val_accuracy',
                     max_trials=10,
                     executions_per_trial=2)

tuner.search(x=x_train,
             y=y_train,
             epochs=3,
             batch_size=32,
             validation_data=(x_test, y_test))

print(tuner.get_best_hyperparameters()[0].values)
print(tuner.get_best_models()[0].summary)


def create_model():
    model = keras.models.Sequential([
        keras.layers.Dense(160, activation='relu'),
        keras.layers.Dense(224, activation='relu'),
        keras.layers.Dense(256, activation='relu'),
        keras.layers.Dense(96, activation='relu'),
        keras.layers.Dense(1, activation='sigmoid')
    ])

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
Esempio n. 20
0
    model.add(layers.Dense(1, activation='sigmoid'))
    model.compile(optimizer=keras.optimizers.Adam(
        hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])),
                  loss='binary_crossentropy',
                  metrics=['acc'])
    return model


tuner = RandomSearch(build_model,
                     objective='val_acc',
                     max_trials=5,
                     executions_per_trial=3)

tuner.search_space_summary()

tuner.search(X_train, y_train, epochs=1, validation_data=(X_test, y_test))

models = tuner.get_best_models(num_models=2)

print(models)

print(models[0])

print(models[1])

print(models[0].summary())

print(models[1].summary())

tuner.results_summary()
Esempio n. 21
0
def tune(cfg):
    # =========
    # Configure
    # =========

    cfg = yaml.full_load(open(cfg))
    # Go deep
    algName = [nm for nm in cfg][0]
    cfg = cfg[algName]

    # ======
    # Logger
    # ======

    logger = get_logger('Tune', 'INFO')

    # =======
    # Dataset
    # =======

    lmdb_dir = cfg['lmdb_dir']
    length = 4000
    train = 2000
    split = length - train

    s = np.arange(0, length)
    np.random.shuffle(s)

    # *** hardcoded shapes *** #
    y = list(
        islice(decaymode_generator(lmdb_dir, "Label", (), np.long), length))
    X_1 = list(
        islice(decaymode_generator(lmdb_dir, "ChargedPFO", (3, 6), np.float32),
               length))
    X_2 = list(
        islice(
            decaymode_generator(lmdb_dir, "NeutralPFO", (8, 21), np.float32),
            length))
    X_3 = list(
        islice(decaymode_generator(lmdb_dir, "ShotPFO", (6, 6), np.float32),
               length))
    X_4 = list(
        islice(decaymode_generator(lmdb_dir, "ConvTrack", (4, 6), np.float32),
               length))

    y = np.asarray(y)[s]
    X_1, X_2, X_3, X_4 = np.asarray(X_1)[s], np.asarray(X_2)[s], np.asarray(
        X_3)[s], np.asarray(X_4)[s]

    y_train = y[:-split]
    X_train_1, X_train_2, X_train_3, X_train_4 = X_1[:
                                                     -split], X_2[:
                                                                  -split], X_3[:
                                                                               -split], X_4[:
                                                                                            -split]

    y_valid = y[-split:]
    X_valid_1, X_valid_2, X_valid_3, X_valid_4 = X_1[-split:], X_2[
        -split:], X_3[-split:], X_4[-split:]

    # =====
    # Model
    # =====

    # build algs architecture, then print to console
    model_ftn = partial(getattr(ModelModule, cfg['model']), cfg['arch'])
    model = model_ftn()
    logger.info(model.summary())

    hp = HyperParameters()

    hp.Fixed("n_layers_tdd_default", 3)
    hp.Fixed("n_layers_fc_default", 3)

    tuner = RandomSearch(
        getattr(ModelModule, cfg['tune_model']),
        hyperparameters=hp,
        tune_new_entries=True,
        objective='val_loss',
        max_trials=20,
        executions_per_trial=2,
        directory=os.path.join(cfg['save_dir'], cfg['tune']),
        project_name=cfg['tune'],
        distribution_strategy=tf.distribute.MirroredStrategy(),
    )

    logger.info('Search space summary: ')
    tuner.search_space_summary()

    logger.info('Now searching ... ')
    tuner.search([X_train_1, X_train_2, X_train_3, X_train_4],
                 y_train,
                 steps_per_epoch=int(train / 200),
                 epochs=20,
                 validation_steps=int(split / 200),
                 validation_data=([X_valid_1, X_valid_2, X_valid_3,
                                   X_valid_4], y_valid),
                 workers=10,
                 verbose=0)

    logger.info('Done! ')
    models = tuner.get_best_models(num_models=8)
    tuner.results_summary()

    logger.info('Saving best models ... ')
    for i, model in enumerate(models):
        arch = model.to_json()
        with open(
                os.path.join(cfg['save_dir'], cfg['tune'],
                             f'architecture-{i}.json'), 'w') as arch_file:
            arch_file.write(arch)
        model.save_weights(
            os.path.join(cfg['save_dir'], cfg['tune'], f'weights-{i}.h5'), 'w')
    logger.info('Done! ')
                                           cooldown=0,
                                           min_lr=5E-3)

callbacks = [stop_condition, learning_rate_schedule]


tuner_rs.search(X_train,
                y_train,
                epochs=epochs,
                batch_size=batch_size,
                validation_split=0.2,
                callbacks=callbacks,
                verbose=1)

# Se guarda en un txt las 10 mejores arquitecturas
models = tuner_rs.get_best_models(num_models=10)

idx = 0
with open('fine_tuning/Best_Architectures.txt', 'w') as ff:
    for model in models:
        ss = get_model_summary(model)
        ff.write('\n')
        ff.write(ss)
        ff.write(str(model.get_config()))


best_model = tuner_rs.get_best_models(num_models=1)[0]
loss, mse = best_model.evaluate(X_test, y_test)
print(best_model.summary())

model_json = best_model.to_json()
Esempio n. 23
0
        keras.layers.Dense(units=hp.Int('dense_1_units',
                                        min_value=32,
                                        max_value=128,
                                        step=16),
                           activation='relu'),
        keras.layers.Dense(10, activation='softmax')
    ])

    model.compile(optimizer=keras.optimizers.Adam(
        hp.Choice('learning_rate', values=[1e-2, 1e-3])),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    return model


tuner_search = RandomSearch(build_model,
                            objective='val_accuracy',
                            max_trials=5,
                            directory='output',
                            project_name='mnist_fashion')

tuner_search.search(train_images, train_labels, epochs=3, validation_split=0.1)

model = tuner_search.get_best_models(num_models=1)[0]
model.fit(train_images,
          train_labels,
          epochs=10,
          validation_split=0.1,
          initial_epoch=3)
def run_fn(fn_args):
    """Build the estimator using the high level API.

  Args:
    fn_args: Holds args used to train the model as name/value pairs.

  Returns:
    A dict of the following:
      - estimator: The estimator that will be used for training and eval.
      - train_spec: Spec for training.
      - eval_spec: Spec for eval.
      - eval_input_receiver_fn: Input function for eval.
  """

    train_batch_size = 100
    eval_batch_size = 100

    tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
    # numeric_columns =  [tf.feature_column.numeric_column('packed_numeric')]
    numeric_columns = [
        tf.feature_column.numeric_column(key) for key in NUMERIC_FEATURE_KEYS
    ]
    categorical_columns = [
        tf.feature_column.indicator_column(  # pylint: disable=g-complex-comprehension
            tf.feature_column.categorical_column_with_hash_bucket(
                key, hash_bucket_size=CATEGORICAL_FEATURE_BUCKETS[key]))
        for key in CATEGORICAL_FEATURE_KEYS
    ]

    train_data = input_fn(  # pylint: disable=g-long-lambda
        fn_args.train_files,
        tf_transform_output,
        batch_size=train_batch_size)

    eval_data = input_fn(  # pylint: disable=g-long-lambda
        fn_args.eval_files,
        tf_transform_output,
        batch_size=eval_batch_size)

    feature_columns = numeric_columns + categorical_columns

    model = KerasModel(feature_columns)

    tuner = RandomSearch(
        model,
        objective='val_binary_accuracy',
        max_trials=10,
        # Separate tunner files with model files, so that pusher can work properly when version <= 0.21.4.
        directory=os.path.dirname(fn_args.serving_model_dir),
        project_name='keras_tuner')

    log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          update_freq='batch')
    # When passing an infinitely repeating dataset, you must specify the `steps_per_epoch` argument.
    tuner.search(train_data,
                 epochs=10,
                 steps_per_epoch=20,
                 validation_steps=fn_args.eval_steps,
                 validation_data=eval_data,
                 callbacks=[tensorboard_callback])
    tuner.search_space_summary()
    best_model = tuner.get_best_models(1)[0]
    signatures = {
        'serving_default':
        get_serving_receiver_fn(best_model,
                                tf_transform_output).get_concrete_function(
                                    tf.TensorSpec(shape=[None],
                                                  dtype=tf.string,
                                                  name='examples'))
    }
    # More about signatures:
    # https://www.tensorflow.org/api_docs/python/tf/saved_model/save?hl=en
    best_model.save(fn_args.serving_model_dir,
                    save_format='tf',
                    signatures=signatures)
#%%
random_search_tuner.results_summary()

#%%
bayesian_opt_tuner.search(X_train,
                          y_train,
                          epochs=5,
                          validation_data=(X_test, y_test)
                          #validation_split=0.2,verbose=1)
                          )
#%%
bayesian_opt_tuner.results_summary()

#%%[markdown]
### Best models achieved with the random search and and bayesian hyperparametrization
rand_searched_models = random_search_tuner.get_best_models(num_models=-1)
bayes_optimized_models = bayesian_opt_tuner.get_best_models(num_models=-1)

#tuner.get_best_hyperparameters()
print('number of random searched models: {}'.format(len(rand_searched_models)))
print('number of bayesian optimized models: {}'.format(
    len(bayes_optimized_models)))

#%%[markdown]
### Get model weights:
random_searched_model_best_model = random_search_tuner.get_best_models(
    num_models=1)
bayes_opt_model_best_model = bayesian_opt_tuner.get_best_models(num_models=1)

# %%[markdown]
# ### Evaluation score:
                            max_trials=100,
                            executions_per_trial=1,
                            seed=10,
                            project_name='lstm-kerastuner-uni',
                            directory="C:\\PATH")

# Search for the best parameters of the neural network using the contructed random search tuner
random_tuner.search(X_train,
                    Y_train,
                    epochs=100,
                    validation_data=(X_validate, Y_validate))

#get the best model

random_params = random_tuner.get_best_hyperparameters()[0]
best_model = random_tuner.get_best_models(1)[0]

#Evaluate it on the validation test

print("Evalutation of best performing model:")
print(best_model.evaluate(X_validate, Y_validate))

#Get summary

#random_tuner.results_summary()

#Saving the model

file_name = 'LSTM_BTC_tuned.h5'
best_model.save(file_name)
print("Saved model `{}` to disk".format(file_name))
Esempio n. 27
0
def tuneClass(X,
              y,
              num_classes,
              max_layers=10,
              min_layers=2,
              min_dense=32,
              max_dense=512,
              executions_per_trial=3,
              max_trials=3,
              activation='relu',
              loss='categorical_crossentropy',
              metrics='accuracy',
              epochs=10,
              step=32,
              verbose=0,
              test_size=0.2):
    # function build model using hyperparameter
    le = preprocessing.LabelEncoder()
    y = tf.keras.utils.to_categorical(le.fit_transform(y),
                                      num_classes=num_classes)

    def build_model(hp):
        model = keras.Sequential()
        for i in range(hp.Int('num_layers', min_layers, max_layers)):
            model.add(
                Dense(units=hp.Int('units_' + str(i),
                                   min_value=min_dense,
                                   max_value=max_dense,
                                   step=step),
                      activation=activation))
        model.add(Dense(num_classes, activation='softmax'))
        model.compile(optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
                      loss=loss,
                      metrics=[metrics])
        return model

    # tuners, establish the object to look through the tuner search space
    tuner = RandomSearch(build_model,
                         objective='loss',
                         max_trials=max_trials,
                         executions_per_trial=executions_per_trial,
                         directory='models',
                         project_name='class_tuned')

    # tuner.search_space_summary()

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=49)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model
    tuner.search(X_train,
                 y_train,
                 epochs=epochs,
                 validation_data=(X_test, y_test))
    models = tuner.get_best_models(num_models=1)
    hyp = tuner.get_best_hyperparameters(num_trials=1)[0]
    #hyp = tuner.oracle.get_best_trials(num_trials=1)[0].hyperparameters.values
    #best_hps = np.stack(hyp).astype(None)
    history = tuner_hist(X,
                         y,
                         tuner,
                         hyp,
                         epochs=epochs,
                         verbose=verbose,
                         test_size=test_size)
    """
    Return:
        models[0] : best model obtained after tuning
        best_hps : best Hyperprameters obtained after tuning, stored as array
        history : history of the data executed from the given model
    """
    return models[0], hyp, history
Esempio n. 28
0
def tuneReg(data,
            target,
            max_layers=10,
            min_layers=2,
            min_dense=32,
            max_dense=512,
            executions_per_trial=3,
            max_trials=3,
            epochs=10,
            activation='relu',
            step=32,
            verbose=0,
            test_size=0.2):

    # function build model using hyperparameter
    def build_model(hp):
        model = keras.Sequential()
        for i in range(hp.Int('num_layers', min_layers, max_layers)):
            model.add(
                Dense(units=hp.Int('units_' + str(i),
                                   min_value=min_dense,
                                   max_value=max_dense,
                                   step=step),
                      activation=activation))
        model.add(Dense(1))
        model.compile(optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
                      loss='mean_squared_error')
        return model

    # random search for the model
    tuner = RandomSearch(build_model,
                         objective='loss',
                         max_trials=max_trials,
                         executions_per_trial=executions_per_trial)
    # tuner.search_space_summary()
    # del data[target]

    X_train, X_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        test_size=0.2,
                                                        random_state=49)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model
    tuner.search(X_train,
                 y_train,
                 epochs=epochs,
                 validation_data=(X_test, y_test),
                 callbacks=[tf.keras.callbacks.TensorBoard('my_dir')])

    models = tuner.get_best_models(num_models=1)
    hyp = tuner.get_best_hyperparameters(num_trials=1)[0]
    #hyp = tuner.oracle.get_best_trials(num_trials=1)[0].hyperparameters.values
    #best_hps = np.stack(hyp).astype(None)
    history = tuner_hist(data,
                         target,
                         tuner,
                         hyp,
                         epochs=epochs,
                         verbose=verbose,
                         test_size=test_size)
    """
    Return:
        models[0] : best model obtained after tuning
        best_hps : best Hyperprameters obtained after tuning, stored as map
        history : history of the data executed from the given model
    """
    return models[0], hyp, history
        hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    return model


tuner = RandomSearch(build_model,
                     objective='val_accuracy',
                     max_trials=5,
                     directory='output',
                     project_name='models')

# Search for the best model
tuner.search(train_images, train_labels, epochs=3, validation_split=0.1)

model = tuner.get_best_models(num_models=1)[0]

model.summary()

# Train the data with the best model
model.fit(train_images,
          train_labels,
          batch_size=100,
          epochs=10,
          validation_split=0.1,
          initial_epoch=3)

# Check the test data
m = 0
img = test_images[m]
img = np.expand_dims(img, axis=0)
Esempio n. 30
0
def tuneReg(data,
            target,
            max_layers=10,
            min_layers=2,
            min_dense=32,
            max_dense=512,
            executions_per_trial=1,
            max_trials=5,
            epochs=10,
            activation='relu',
            directory='my_dir',
            step=32,
            verbose=0,
            test_size=0.2):
    # function build model using hyperparameter
    def build_model(hp):
        model = keras.Sequential()
        model.add(
            Dense(units=hp.Int('units_0',
                               min_value=min_dense,
                               max_value=max_dense,
                               step=step),
                  input_dim=data.shape[1],
                  activation=activation))
        for i in range(hp.Int('num_layers', min_layers, max_layers)):
            model.add(
                Dense(units=hp.Int('units_' + str(i + 1),
                                   min_value=min_dense,
                                   max_value=max_dense,
                                   step=step),
                      activation=activation))
            model.add(
                Dropout(rate=hp.Float('dropout_' + str(i),
                                      min_value=0.0,
                                      max_value=0.5,
                                      default=0.20,
                                      step=0.05)))
        model.add(Dense(1, activation='linear'))
        lrate = hp.Float('learning_rate',
                         min_value=1e-5,
                         max_value=1e-1,
                         sampling='LOG',
                         default=1e-3)
        model.compile(optimizer=hp.Choice(
            'optimizer',
            values=[
                keras.optimizers.Adam(learning_rate=lrate),
                keras.optimizers.SGD(learning_rate=lrate),
                keras.optimizers.RMSprop(learning_rate=lrate),
                keras.optimizers.Adamax(learning_rate=lrate)
            ]),
                      loss=hp.Choice('loss',
                                     values=[
                                         'mean_squared_logarithmic_error',
                                         'mean_squared_error', 'huber_loss',
                                         'mean_absolute_error',
                                         'cosine_similarity', 'log_cosh'
                                     ],
                                     default='mean_squared_error'),
                      metrics=['accuracy'])
        return model

    # random search for the model
    tuner = RandomSearch(build_model,
                         objective='val_accuracy',
                         max_trials=max_trials,
                         executions_per_trial=executions_per_trial,
                         directory=directory)
    # tuner.search_space_summary()
    # del data[target]

    X_train, X_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        test_size=0.2,
                                                        random_state=49)

    # searches the tuner space defined by hyperparameters (hp) and returns the
    # best model

    tuner.search(X_train,
                 y_train,
                 epochs=epochs,
                 validation_data=(X_test, y_test),
                 callbacks=[tf.keras.callbacks.TensorBoard('my_dir')])

    models = tuner.get_best_models(num_models=1)[0]
    hyp = tuner.get_best_hyperparameters(num_trials=1)[0]
    history = tuner_hist(data,
                         target,
                         tuner,
                         hyp,
                         epochs=epochs,
                         verbose=verbose,
                         test_size=test_size)
    """
    Return:
        models[0] : best model obtained after tuning
        best_hps : best Hyperprameters obtained after tuning, stored as map
        history : history of the data executed from the given model
    """
    return models, hyp, history, X_test, y_test