コード例 #1
0
def tune_with_kerastuner2():
    # model = define_one_block_model()
    tuner = RandomSearch(define_one_block_model,
                         objective='val_accuracy',
                         max_trials=5,
                         executions_per_trial=3,
                         directory="tuner_dir",
                         project_name="cats_vs_dogs_tuner")

    train_datagen = ImageDataGenerator(rescale=1.0 / 255.0)
    # width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
    test_datagen = ImageDataGenerator(rescale=1.0 / 255.0)
    # prepare iterators
    train_it = train_datagen.flow_from_directory('dataset_dogs_vs_cats/train/',
                                                 class_mode='binary',
                                                 batch_size=16,
                                                 target_size=(200, 200))
    test_it = test_datagen.flow_from_directory('dataset_dogs_vs_cats/test/',
                                               class_mode='binary',
                                               batch_size=16,
                                               target_size=(200, 200))

    tuner.search(train_it,
                 steps_per_epoch=len(train_it),
                 validation_data=test_it,
                 validation_steps=len(test_it),
                 epochs=5,
                 use_multiprocessing=True)

    models = tuner.get_best_models(num_models=3)
    tuner.results_summary()
    return tuner
コード例 #2
0
def tune_with_kerastuner1():
    photos = load('dogs_vs_cats_photos.npy')
    labels = load('dogs_vs_cats_labels.npy')
    (trainX, testX, trainY, testY) = train_test_split(photos,
                                                      labels,
                                                      test_size=0.25,
                                                      random_state=42)

    trainY = keras.utils.to_categorical(trainY, 2)
    testY = keras.utils.to_categorical(testY, 2)

    model = define_three_block_model()

    # history = model.fit(photos, labels, batch_size=16, epochs=10, validation_split=0.33, verbose=1, use_multiprocessing=True)

    tuner = RandomSearch(model,
                         objective='val_accuracy',
                         max_trials=5,
                         executions_per_trial=3,
                         directory="tuner_dir",
                         project_name="cats_vs_dogs_tuner")
    tuner.search_space_summary()

    # tuner.search(trainX, trainY,
    #              epochs=5,
    #              validation_data=(testX, testY))

    models = tuner.get_best_models(num_models=2)
    tuner.results_summary()
    return tuner
コード例 #3
0
    def fit_model(self, split_number):
        self.log_event('Training with HyperParameter Tuning is started..')

        tuner = RandomSearch(self.build_model,
                             objective=self.objective_function,
                             max_trials=self.max_trials,
                             executions_per_trial=self.execution_per_trial,
                             seed=self.random_state,
                             project_name='split_' + str(split_number),
                             directory=os.path.normpath(self.lstm_tuner_save_dir))

        tuner.search(self.train_features, self.train_targets,
                     epochs=self.epochs,
                     batch_size=self.batch_size,
                     verbose=2,
                     validation_data=(self.test_features, self.test_targets))

        model = tuner.get_best_models(num_models=1)[0]

        print(model.summary())
        keras.utils.plot_model(model,
                               to_file=self.lstm_model_save_dir + self.lstm_model_description + '.png',
                               show_shapes=True,
                               show_layer_names=True)

        # Fit the best model of the split with the data
        history = model.fit(x=self.train_features,
                            y=self.train_targets,
                            batch_size=self.batch_size,
                            epochs=self.epochs,
                            verbose=2,
                            validation_data=(self.test_features, self.test_targets))

        # Save the model
        current_time = datetime.datetime.now()
        model.save(self.lstm_tuner_save_dir + '/split_' + str(split_number) + '/' + self.lstm_model_description + '_' + str(current_time.strftime('%Y-%m-%d_%H-%M-%S')) + '_' + '.h5')

        self.lstm_model = model
        self.model_history = history



        hist_df = pd.DataFrame(history.history)
        hist_df.to_csv(self.lstm_tuner_save_dir + '/split_' + str(split_number) + '/best_model_history.csv', index=False, header=True)
        self.best_possible_models.append(hist_df)
        self.print_summary(split_number)
        self.log_event('Training with HyperParameter Tuning is finished..')
コード例 #4
0
                      hp.Choice('learning_rate', values=[1e-4, 1e-3])),
                  metrics=['accuracy'])
    return model


from kerastuner import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters

tuner = RandomSearch(build_model,
                     objective='val_accuracy',
                     max_trials=5,
                     directory='output1',
                     project_name='EMNIST_Balanced_Tuned')
tuner.search(X_train, y_train, epochs=3, validation_data=(X_val, y_val))

model = tuner.get_best_models(num_models=1)[0]
# model.summary()

from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint

model = tuner.get_best_models(num_models=1)[0]

# Some callbacks that we're using

MCP = ModelCheckpoint('Best_points.h5',
                      verbose=1,
                      save_best_only=True,
                      monitor='val_accuracy',
                      mode='max')
ES = EarlyStopping(monitor='val_accuracy',
                   min_delta=0,
コード例 #5
0
    ),
    keras.layers.Flatten(),
    keras.layers.Dense(
        units=hp.Int('dense_1_units', min_value=32, max_value=128, step=16),
        activation='relu'
    ),
    keras.layers.Dense(10, activation='softmax')
  ])
  
  model.compile(optimizer=keras.optimizers.Adam(hp.Choice('learning_rate', values=[1e-2, 1e-3])),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
  
  return model

from kerastuner import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters

tuner_search=RandomSearch(build_model,
                          objective='val_accuracy',
                          max_trials=5,directory='output',project_name="Mnist Fashion")

tuner_search.search(train_images,train_labels,epochs=3,validation_split=0.1)

model = tuner_search.get_best_models(num_models=1)[0]

model.summary()

model.fit(train_images,train_labels, epochs=10, validation_split=0.1, initial_epoch=3)

コード例 #6
0
    directory='nas_result')

# Display search overview.
tuner.search_space_summary()

# Performs the hypertuning.
tuner.search(x_train,
             y_train,
             epochs=100,
             validation_split=0.1,
             batch_size=128,
             callbacks=[keras.callbacks.EarlyStopping(patience=10)])

# Show the best models, their hyperparameters, and the resulting metrics.
tuner.results_summary()

# Retrieve the best model.
best_models = tuner.get_best_models(num_models=10)

# Evaluate the best model.
for i in range(10):
    loss, accuracy, precision, recall, f1 = best_models[i].evaluate(
        x_test, y_test)
    print('*************************----best_model_' + str(i) +
          '----*************************')
    print('loss:', loss)
    print('accuracy:', accuracy)
    print('precision:', precision)
    print('recall:', recall)
    print('f1:', f1)
コード例 #7
0
                    y_train_simple_net,
                    epochs=1000,
                    validation_split=0.2,
                    verbose=0,
                    callbacks=callbacks)

    best_hp = tuner_rs.get_best_hyperparameters(num_trials=10)
    model = tuner_rs.hypermodel.build(best_hp)

    # run1 = {'units': 8, 'activation': 'relu', 'kernel_initializer': 'random_normal', 'bias_initializer': 'lecun_normal', 'regularizers.l2': 0.001, 'learning_rate': 0.001}
    # run2 = {'units': 8, 'activation': 'elu', 'kernel_initializer': 'random_normal', 'bias_initializer': 'lecun_normal', 'regularizers.l2': 0.001, 'learning_rate': 0.001}
    # run3 = {'units': 8, 'activation': 'elu', 'kernel_initializer': 'random_uniform', 'bias_initializer': 'random_normal', 'regularizers.l2': 0.001, 'learning_rate': 0.001}
    # run4 = {'units': 6, 'activation': 'elu', 'kernel_initializer': 'random_normal', 'bias_initializer': 'random_uniform', 'regularizers.l2': 0.001, 'learning_rate': 0.001}
    # run5 = {'units': 8, 'activation': 'elu', 'kernel_initializer': 'random_uniform', 'bias_initializer': 'lecun_normal', 'regularizers.l2': 0.001, 'learning_rate': 0.001}

    best_model = tuner_rs.get_best_models(num_models=1)[0]
    # loss, mse = best_model.evaluate(x_valid_simple_net, y_valid_simple_net)

    # ----------------------------after finding the best hyper-parameters train the model-----------------------------

    # net_model, model_history, early_stoping = obj1.tuned_net(x_train_simple_net, y_train_simple_net)

    # extract the best model in the training and load
    # saved_model = load_model(r'C:\Users\nirro\Desktop\machine learning\ayyeka\models\model_4\best_model.h5')

    # weigths = early_stoping.best_weights

    # -----------------------------------------make prediction--------------------------------------------------------

    # test_predicted_flow = saved_model.predict(x_test_simple_net)
    # valid_predicted_flow = saved_model.predict(x_valid_simple_net)
コード例 #8
0
        keras.layers.Flatten(),
        keras.layers.Dense(units=hp.Int('dense_1_units',
                                        min_value=32,
                                        max_value=128,
                                        step=16),
                           activation='relu'),
        keras.layers.Dense(units=10, activation='softmax')
    ])
    model.compile(optimizer=keras.optimizers.Adam(
        hp.Choice('learning_rate', values=[1e-2, 1e-3])),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    return model


from kerastuner import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters

model_tuning = RandomSearch(build_model,
                            objective='val_accuracy',
                            max_trials=5)

model_tuning.search(images_train, label_train, epochs=3, validation_split=0.2)

best_model = model_tuning.get_best_models(num_models=1)[0]

best_model.fit(images_train,
               label_train,
               epochs=10,
               validation_split=0.2,
               initial_epoch=3)
コード例 #9
0
    return model


tuner = RandomSearch(
    build_model,
    objective='val_accuracy',
    max_trials=336,
    directory='test_directory'
)

tuner.search_space_summary()
tuner.search(x_train, y_train, batch_size=256, epochs=70, validation_split=0.1)

tuner.results_summary()

models = tuner.get_best_models(num_models=3)

for m in models:
    m.summary()
#    m.evaluate(x_test, y_test)
#    print()

top = tuner.get_best_models(num_models=1)[0]
history = top.fit(
    x_train, y_train, batch_size=256, epochs=70,
    validation_split=0.1, verbose=1
)

scores = top.evaluate(x_test, y_test, verbose=1)
print("Score: ", round(scores[1] * 100, 4))
top.save('nums_top_model.h5')
コード例 #10
0
    def start_hyper_parameter_tuning(self, split_number):
        """
            Method is responsible to find best RNN model fit the split data

            1. Use Keras Tuner to find best possible configuration for the RNN Model
            2. Train the candidate models
            3. Find the best performed model and train the model on the split data.
            4. After the training;
                4.1 Plot and save the model architecture to the file system.
                4.2 Save the RNN Model to the file system
                4.3 Save the training data (plots and csv) to the file system
            5. Append the best performed model to best_possible_models list to compare all the models easily after all the split training is over.

        :param split_number: The number of the split data
        """
        self.log_event('Training with HyperParameter Tuning is started..')

        tuner = RandomSearch(
            self.build_model,
            objective=self.hyper_parameters['objective_function'],
            max_trials=self.max_trials,
            executions_per_trial=self.executions_per_trial,
            seed=self.random_state,
            project_name=f'split_{str(split_number)}',
            directory=os.path.normpath(self.path_tuner_directory))

        tuner.search(self.train_features,
                     self.train_targets,
                     epochs=self.epochs,
                     batch_size=self.batch_size,
                     verbose=2,
                     validation_data=(self.test_features, self.test_targets))

        # Get the trials
        trials = tuner.oracle.trials
        best_model = tuner.oracle.get_best_trials()[0].trial_id

        self.model = tuner.get_best_models(num_models=1)[0]

        self.model_history = self.model.fit(
            self.train_features,
            self.train_targets,
            epochs=self.epochs,
            batch_size=self.batch_size,
            verbose=2,
            validation_data=(self.test_features, self.test_targets))

        self.print_hyper_parameter_results(split_number=split_number,
                                           trials=trials,
                                           best_model=best_model)

        keras.utils.plot_model(
            self.model,
            to_file=f'{self.path_model_directory}{self.model_alias}.png',
            show_shapes=True,
            show_layer_names=True)

        current_time = datetime.datetime.now()
        save_path = f'{self.path_tuner_directory}/split_{str(split_number)}/split_{str(split_number)}_{self.model_alias}_{str(current_time.strftime("%Y-%m-%d_%H-%M-%S"))}.h5'
        self.model.save(save_path)

        hist_df = pd.DataFrame(self.model_history.history)
        hist_df.to_csv(
            f'{self.path_tuner_directory}/split_{str(split_number)}/split_{str(split_number)}_{self.model_alias}_best_model_history.csv',
            index=False,
            header=True)
        self.best_possible_models.append(hist_df)

        self.plot_train_summary(plot_title=f'Split {str(split_number)}',
                                split_number=str(split_number))