Beispiel #1
0
    def fit_model(self, split_number):
        self.log_event('Training with HyperParameter Tuning is started..')

        tuner = RandomSearch(self.build_model,
                             objective=self.objective_function,
                             max_trials=self.max_trials,
                             executions_per_trial=self.execution_per_trial,
                             seed=self.random_state,
                             project_name='split_' + str(split_number),
                             directory=os.path.normpath(self.lstm_tuner_save_dir))

        tuner.search(self.train_features, self.train_targets,
                     epochs=self.epochs,
                     batch_size=self.batch_size,
                     verbose=2,
                     validation_data=(self.test_features, self.test_targets))

        model = tuner.get_best_models(num_models=1)[0]

        print(model.summary())
        keras.utils.plot_model(model,
                               to_file=self.lstm_model_save_dir + self.lstm_model_description + '.png',
                               show_shapes=True,
                               show_layer_names=True)

        # Fit the best model of the split with the data
        history = model.fit(x=self.train_features,
                            y=self.train_targets,
                            batch_size=self.batch_size,
                            epochs=self.epochs,
                            verbose=2,
                            validation_data=(self.test_features, self.test_targets))

        # Save the model
        current_time = datetime.datetime.now()
        model.save(self.lstm_tuner_save_dir + '/split_' + str(split_number) + '/' + self.lstm_model_description + '_' + str(current_time.strftime('%Y-%m-%d_%H-%M-%S')) + '_' + '.h5')

        self.lstm_model = model
        self.model_history = history



        hist_df = pd.DataFrame(history.history)
        hist_df.to_csv(self.lstm_tuner_save_dir + '/split_' + str(split_number) + '/best_model_history.csv', index=False, header=True)
        self.best_possible_models.append(hist_df)
        self.print_summary(split_number)
        self.log_event('Training with HyperParameter Tuning is finished..')
Beispiel #2
0
def tune_with_kerastuner1():
    photos = load('dogs_vs_cats_photos.npy')
    labels = load('dogs_vs_cats_labels.npy')
    (trainX, testX, trainY, testY) = train_test_split(photos,
                                                      labels,
                                                      test_size=0.25,
                                                      random_state=42)

    trainY = keras.utils.to_categorical(trainY, 2)
    testY = keras.utils.to_categorical(testY, 2)

    model = define_three_block_model()

    # history = model.fit(photos, labels, batch_size=16, epochs=10, validation_split=0.33, verbose=1, use_multiprocessing=True)

    tuner = RandomSearch(model,
                         objective='val_accuracy',
                         max_trials=5,
                         executions_per_trial=3,
                         directory="tuner_dir",
                         project_name="cats_vs_dogs_tuner")
    tuner.search_space_summary()

    # tuner.search(trainX, trainY,
    #              epochs=5,
    #              validation_data=(testX, testY))

    models = tuner.get_best_models(num_models=2)
    tuner.results_summary()
    return tuner
Beispiel #3
0
def tune_with_kerastuner2():
    # model = define_one_block_model()
    tuner = RandomSearch(define_one_block_model,
                         objective='val_accuracy',
                         max_trials=5,
                         executions_per_trial=3,
                         directory="tuner_dir",
                         project_name="cats_vs_dogs_tuner")

    train_datagen = ImageDataGenerator(rescale=1.0 / 255.0)
    # width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
    test_datagen = ImageDataGenerator(rescale=1.0 / 255.0)
    # prepare iterators
    train_it = train_datagen.flow_from_directory('dataset_dogs_vs_cats/train/',
                                                 class_mode='binary',
                                                 batch_size=16,
                                                 target_size=(200, 200))
    test_it = test_datagen.flow_from_directory('dataset_dogs_vs_cats/test/',
                                               class_mode='binary',
                                               batch_size=16,
                                               target_size=(200, 200))

    tuner.search(train_it,
                 steps_per_epoch=len(train_it),
                 validation_data=test_it,
                 validation_steps=len(test_it),
                 epochs=5,
                 use_multiprocessing=True)

    models = tuner.get_best_models(num_models=3)
    tuner.results_summary()
    return tuner
def load_results():
    tuner = RandomSearch(FuturePredictionModelHyperparameters(
        window_size=720, num_features=5, future_steps=144),
                         objective='val_loss',
                         max_trials=100,
                         directory='test_dir')
    tuner.reload()
    best = tuner.oracle.get_best_trials(20)
    best.sort(
        key=lambda trial: trial.metrics.get_best_value('val_accuracy')
        if not np.isnan(trial.metrics.get_best_value('val_loss')) else np.nan,
        reverse=True)

    best_accuracy = list(
        map(
            lambda trial: trial.metrics.get_best_value('val_accuracy')
            if not np.isnan(trial.metrics.get_best_value('val_loss')) else np.
            nan, best))
    print(best_accuracy)
 def tuning(self, method="random"):
     if method == "random":
         return RandomSearch(self.build_model,
                             objective='mae',
                             max_trials=5,
                             executions_per_trial=2,
                             directory='./tuning',
                             project_name='CNN2')
     if method == "hyperband":
         return Hyperband(self.build_model,
                          objective='mae',
                          max_epochs=100,
                          factor=2,
                          directory='./tuning',
                          project_name='CNN2_hyperband_huber')
def search(dt=600,
           window_size=360,
           future_steps=144,
           epochs=50,
           with_time=True,
           batch_size=128,
           max_trials=200):
    bathroom1 = Dataset.parse('dataset/', 'bathroom1')
    kitchen1 = Dataset.parse('dataset/', 'kitchen1')
    combined1 = bathroom1.combine(kitchen1)

    X, y = prepare_data_future_steps(combined1,
                                     window_size=window_size,
                                     dt=dt,
                                     with_time=with_time,
                                     future_steps=future_steps)
    X_train = X[:-4 * (3600 // dt) * 24, :, :]
    X_val = X[-4 * (3600 // dt) * 24:-2 * (3600 // dt) * 24, :, :]
    X_test = X[-2 * (3600 // dt) * 24:, :, :]

    # For now only sensor 24
    y_train = y[:-4 * (3600 // dt) * 24, :, 0]
    y_val = y[-4 * (3600 // dt) * 24:-2 * (3600 // dt) * 24, :, 0]
    y_test = y[-2 * (3600 // dt) * 24:, :, 0]

    tuner = RandomSearch(FuturePredictionModelHyperparameters(
        window_size=window_size,
        num_features=X.shape[2],
        future_steps=future_steps),
                         objective='val_loss',
                         max_trials=max_trials,
                         directory='test_dir')

    tuner.search_space_summary()

    tuner.search(x=X_train,
                 y=y_train,
                 epochs=epochs,
                 batch_size=batch_size,
                 validation_data=(X_val, y_val),
                 callbacks=[IsNanEarlyStopper(monitor='loss')])

    tuner.results_summary()
Beispiel #7
0
def search_model():
    data = get_data()
    data_train, data_val_model, data_val_interpretation, data_test = get_train_val_test_splits(data)

    train_features = np.load("train_features.npy")
    valid_features = np.load("valid_features.npy")
    train_y = data_train["outcome"].values
    valid_y = data_val_model["outcome"].values
    tuner = RandomSearch(
        get_hp_model,
        objective='val_accuracy',
        max_trials=20,
        executions_per_trial=1,
        directory='test',
        project_name='test')

    tuner.search(train_features, y=train_y, batch_size=32, epochs=300,
                 validation_data=(valid_features, valid_y), verbose=2,
                 class_weight=dict(enumerate(utils.get_class_weights(train_y))),
                 # callbacks=[EarlyStopping(patience=30)]
                 )
    tuner.results_summary()
    model.add(Flatten())
    model.add(
        Dense(hp.Int('dense1', min_value=300, max_value=800, step=50),
              activation="relu"))
    model.add(Dense(2, activation="softmax"))
    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model


tuner_search = RandomSearch(build_model,
                            objective='accuracy',
                            max_trials=5,
                            directory=r"C:\Users\khares\Work\output13")

tuner_search.search(training_set, epochs=5)

model = tuner_search.get_best_models(num_models=1)[0]

model.summary()

best_hyperparameters = tuner_search.get_best_hyperparameters(1)[0].values
best_hyperparameters

model = Sequential()
model.add(
    Conv2D(filters=48,
           kernel_size=5,
Beispiel #9
0
  model.add(keras.layers.Dense(1, activation='sigmoid'))
  hp_learning_rate = hp.Choice('learning_rate', values = [1e-2, 1e-3, 1e-4]) 
  
  model.compile(optimizer = keras.optimizers.Adam(learning_rate = hp_learning_rate),
                loss='binary_crossentropy', 
                metrics = ['accuracy'])
  
  return model

!pip install keras.tuner

from tensorflow import keras
from kerastuner import RandomSearch
tuner_search=RandomSearch(model_builder,
                     objective = 'val_accuracy', 
                     max_trials = 5,
                     directory = 'malaria_output',
                     project_name = 'Malaria_Detection')

tuner_search.search(x_train, y_train, epochs=3, validation_split=0.1)

model=tuner_search.get_best_models(num_models=1)[0]
model.summary()

from tensorflow.keras.callbacks import EarlyStopping
early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)

model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=50, initial_epoch=3, callbacks=[early_stop])

model.save('malariadisease.h5')
Beispiel #10
0
            metrics=['accuracy']
        )

        return model


def print_return(v):
    print(v.shape)
    return v


if __name__ == '__main__':
    # tf.keras.backend.set_floatx('float16')

    dt = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    hypermodel = HCNN()
    train, test = get_mnist()
    tuner = RandomSearch(
        hypermodel,
        objective='accuracy',
        max_trials=40,
        directory='models',
        project_name='H-MNIST-' + dt
    )

    print('Created tuner')
    log_dir = "logs/fit/" + dt
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
    tuner.search(*train, epochs=30, validation_data=test, batch_size=32,
                 callbacks=[tensorboard_callback])
Beispiel #11
0
hypermodel = Hyperinceptiontime(input_shape=x_train.shape[1:],
                                classes=NUM_CLASSES)

# Initialize the hypertuner

# tuner = RandomSearch(
#     hypermodel,
#     objective='val_loss',
#     max_trials=2,
#     project_name='AF_inceptiontime',
#     directory='test_directory')

tuner = RandomSearch(
    hypermodel,
    objective='val_loss',
    max_trials=20,
    # hyperband_iterations=20,
    project_name='UCI_inceptiontime',
    directory='nas_result')

# Display search overview.
tuner.search_space_summary()

# Performs the hypertuning.
tuner.search(x_train,
             y_train,
             epochs=100,
             validation_split=0.1,
             batch_size=128,
             callbacks=[keras.callbacks.EarlyStopping(patience=10)])
Beispiel #12
0
def hyper_tuner(month_path, log_path, time_step, train_data, valid_data,
                test_data, item, item_index, item_name):

    train_x, valid_x, test_x, train_y, valid_y, test_y = data_split_norm(
        train_data, valid_data, test_data, item_index, time_step)

    hyper_model = MyHyperModel(input_shape=train_x.shape[-2:])

    project_name = item_name + "_" + str(time_step)

    tuner = RandomSearch(hyper_model,
                         objective='val_loss',
                         max_trials=150,
                         executions_per_trial=5,
                         directory=log_path,
                         project_name=project_name)

    callbacks = [
        tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                         min_delta=1e-3,
                                         patience=15,
                                         restore_best_weights=True),
        tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                             factor=0.1,
                                             patience=5,
                                             verbose=0,
                                             mode='auto',
                                             min_delta=0.0001),
        ClearTrainingOutput()
    ]

    tuner.search_space_summary()

    tuner.search(train_x,
                 train_y,
                 epochs=200,
                 batch_size=256,
                 validation_data=(valid_x, valid_y),
                 callbacks=callbacks,
                 verbose=2)

    # Get the optimal hyperparameters
    best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]

    model = tuner.hypermodel.build(best_hps)
    history = model.fit(train_x,
                        train_y,
                        epochs=200,
                        batch_size=256,
                        validation_data=(valid_x, valid_y),
                        callbacks=callbacks,
                        verbose=2)
    models_path = os.path.join(month_path, "models")
    if not os.path.exists(models_path):
        os.mkdir(models_path)
    model_name = item + "_model.h5"
    model_path = os.path.join(models_path, model_name)
    model.save(model_path)
    # 清除tuner
    del tuner

    layers_path = os.path.join(month_path, item)
    if not os.path.exists(layers_path):
        os.mkdir(layers_path)
    plot_path_1 = os.path.join(layers_path, "loss_epoch.png")
    plot_path_2 = os.path.join(layers_path, "measure_predict.png")
    csv_path = os.path.join(layers_path, "measure_predict.csv")

    history = history.history

    plt.plot(history['loss'], linewidth=2, label='Train')
    plt.plot(history['val_loss'], linewidth=2, label='Test')
    plt.legend(loc='upper right')
    plt.ylabel('Mean Relative Error [$' + item + '$]')
    plt.xlabel('Epoch')
    plt.savefig(plot_path_1, dpi=300, bbox_inches="tight")
    plt.close()

    test_predictions = model.predict(test_x)
    plt.figure(figsize=(20, 6))
    plt.plot(test_y, label='measure')
    plt.plot(test_predictions, label='predict')
    plt.legend(loc='upper right')
    if item is 'PH':
        plt.ylabel(item)
    else:
        plt.ylabel(item + '(mg/L)')
    plt.xlabel('Test set')
    plt.savefig(plot_path_2, dpi=300, bbox_inches="tight")
    plt.close()

    test_predictions = test_predictions.flatten()  # flatten()降为一维
    valid_predictions = model.predict(valid_x).flatten()

    train_fvu, train_rmse, train_mre = history['loss'][-1], history[
        'root_mean_squared_error'][-1], history[
            "mean_absolute_percentage_error"][-1]

    valid_fvu, valid_rmse, valid_mre = model.evaluate(valid_x,
                                                      valid_y,
                                                      verbose=0)
    valid_nse, valid_cc = calc_nse_cc(valid_y, valid_predictions)
    test_fvu, test_rmse, test_mre = model.evaluate(test_x, test_y, verbose=0)

    test_nse, test_cc = calc_nse_cc(test_y, test_predictions)

    measure_predict_data = pd.DataFrame()
    measure_predict_data['predict'] = test_predictions
    measure_predict_data['measure'] = test_y
    measure_predict_data.to_csv(csv_path, encoding='utf-8')
    try:
        hidden_layer_1 = best_hps.get('units_0')
    except:
        hidden_layer_1 = 0
    try:
        hidden_layer_2 = best_hps.get('units_1')
    except:
        hidden_layer_2 = 0

    model_info = {
        'output': item,
        "time_step": time_step,
        'hidden_layer_1': hidden_layer_1,
        'hidden_layer_2': hidden_layer_2,
        'hidden_layer_3': best_hps.get('units_n'),
        'train_fvu': train_fvu,
        'train_rmse': train_rmse,
        'train_mre': train_mre,
        "valid_fvu": valid_fvu,
        'valid_nse': valid_nse,
        'valid_rmse': valid_rmse,
        'valid_mre': valid_mre,
        'valid_cc': valid_cc,
        "test_fvu": test_fvu,
        'test_nse': test_nse,
        'test_rmse': test_rmse,
        'test_mre': test_mre,
        'test_cc': test_cc
    }
    return model_info
Beispiel #13
0
green = (0,255,0)
blue = (0,0,255)
black = (0,0,0)
white = (255,255,255)

pygame.init()
display=pygame.display.set_mode((display_width,display_height))
clock=pygame.time.Clock()


training_data_x, training_data_y = generate_training_data(display,clock)

"""Создание тюнера"""
tuner = RandomSearch(
    build_model,
    objective='val_accuracy',
    max_trials=30,
    directory='C:\\Users\\agolovanov\\PycharmProjects\\Snake_tuner\\model'  # каталог, куда сохраняются обученные сети
)

tuner.search_space_summary()

tuner.search((np.array(training_data_x).reshape(-1,7)),
             ( np.array(training_data_y).reshape(-1,3)),
             batch_size=2048,
             epochs=7,
             validation_split=0.2,
             verbose=1

             )
tuner.results_summary()
Beispiel #14
0
    model.compile(optimizer=keras.optimizers.Adam(
        hp.Choice('learning_rate', values=[1e-2, 1e-3])),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    return model


#!pip install keras-tuner

from kerastuner import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters

tuner = RandomSearch(build_model,
                     objective='val_accuracy',
                     max_trials=2,
                     directory='output',
                     project_name="CIFAR102")

# model.fit(X_Train ,y_train)
tuner.search(X_train, y_train, epochs=10, validation_split=0.2)

model = tuner.get_best_models(num_models=1)[0]

model.summary()

model.fit(X_train, y_train, epochs=10, validation_split=0.1)

model.evaluate(X_test, y_test)

#Predictions
Beispiel #15
0
        keras.layers.Flatten(),
        keras.layers.Dense(units=hp.Int('dense_1_units',
                                        min_value=32,
                                        max_value=128,
                                        step=16),
                           activation='relu'),
        keras.layers.Dense(units=10, activation='softmax')
    ])
    model.compile(optimizer=keras.optimizers.Adam(
        hp.Choice('learning_rate', values=[1e-2, 1e-3])),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    return model


from kerastuner import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters

model_tuning = RandomSearch(build_model,
                            objective='val_accuracy',
                            max_trials=5)

model_tuning.search(images_train, label_train, epochs=3, validation_split=0.2)

best_model = model_tuning.get_best_models(num_models=1)[0]

best_model.fit(images_train,
               label_train,
               epochs=10,
               validation_split=0.2,
               initial_epoch=3)
                                  min_value=0.0,
                                  max_value=0.5,
                                  default=0.25,
                                  step=0.05)))
        model.add(Dense(4, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        return model


LOG_DIR = f"{int(time.time())}"

tuner = RandomSearch(build_model,
                     objective="val_accuracy",
                     max_trials=1,
                     executions_per_trial=1,
                     directory=LOG_DIR)

tuner.search(x=X_train,
             y=y_train,
             epochs=150,
             batch_size=64,
             validation_split=0.1)
print(tuner.get_best_hyperparameters()[0].values)
print(tuner.results_summary())

best_model = tuner.get_best_models(num_models=1)[0]
NN_pred = best_model.predict(X_test)
pred = list()
for i in range(len(NN_pred)):
Beispiel #17
0
        model.add(layers.Dropout(rate=0.5))
        model.add(layers.Dense(6, activation='softmax'))

        model.compile(optimizer=Optimizer.Adam(learning_rate=hp.Choice(
            'learning_rate', values=[1e-2, 1e-3, 1e-4])),
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])

        return model


# Initialize the model
hypermodel = RegressionHyperModel()

# Run the random search
tuner_rs = RandomSearch(hypermodel,
                        objective='val_accuracy',
                        max_trials=5,
                        executions_per_trial=1)

tuner_rs.search_space_summary()
tuner_rs.search(train_ds, train_classes, epochs=15, validation_split=0.30)

# Get the best model
best_model = tuner_rs.get_best_models(num_models=1)[0]

# Evaluate
val_ds, val_classes = getImages(
    '../input/intel-image-classification/seg_test/seg_test/', 150)
best_model.evaluate(val_ds, val_classes, verbose=1)
        filters=hp.Int('conv_2_filter', min_value=32, max_value=128, step=16),
        kernel_size=hp.Choice('conv_2_kernel', values=[3, 5]),
        activation='relu'
    ))
    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dense(
        units=hp.Int('dense_1_units', min_value=32, max_value=128, step=16),
        activation='relu'
    ))
    model.add(keras.layers.Dense(10, activation="softmax")
              )

    model.compile(optimizer=keras.optimizers.Adam(hp.Choice('learning_rate', values=[1e-2, 1e-3])),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy']
                  )

    return model


tuner_search = RandomSearch(build_model, objective='val_accuracy', max_trials=3, directory='output',
                            project_name="Fashion_MNIST-Using-Keras_Tuner")

tuner_search.search(train_images, train_labels, epochs=3, validation_split=0.3)

best_model = tuner_search.get_best_models(num_models=1)[0]

best_model.summary()

best_model.fit(train_images, train_labels, epochs=5, validation_split=0.3, initial_epoch=3)
Beispiel #19
0
        train_level, train_flow)
    x_valid_simple_net, y_valid_simple_net = obj1.organized_simple_net_data(
        valid_level, valid_flow)
    x_test_simple_net, y_test_simple_net = obj1.organized_simple_net_data(
        test_level, test_flow)

    # -------------------------find hyper-parameters with keras tuner----------------------------

    log_dir = os.path.normpath(
        r'C:\Users\nirro\Desktop\machine learning\ayyeka\models\model_4')
    input_shape = x_train_simple_net.shape[1]
    hypermodel = RegressionHyperModel((input_shape, ))

    tuner_rs = RandomSearch(hypermodel,
                            objective='mse',
                            seed=42,
                            max_trials=30,
                            executions_per_trial=2,
                            directory=log_dir)
    callbacks = [
        tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                         mode='min',
                                         patience=100,
                                         restore_best_weights=True,
                                         min_delta=0.00001,
                                         verbose=1)
    ]
    tuner_rs.search(x_train_simple_net,
                    y_train_simple_net,
                    epochs=1000,
                    validation_split=0.2,
                    verbose=0,
Beispiel #20
0
def hyper_tuner(log_path, time_step, train_data, valid_data, test_data, item,
                item_index, item_name):
    keras.backend.clear_session()

    train_x, valid_x, test_x, train_y, valid_y, test_y = data_split_norm(
        train_data, valid_data, test_data, item_index, time_step)

    hyper_model = MyHyperModel(input_shape=train_x.shape[-2:])

    project_name = item_name + "_" + str(time_step)

    tuner = RandomSearch(hyper_model,
                         objective='val_loss',
                         max_trials=150,
                         executions_per_trial=10,
                         directory=log_path,
                         project_name=project_name)

    callbacks = [
        tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                         min_delta=1e-3,
                                         patience=15,
                                         restore_best_weights=True),
        tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                             factor=0.1,
                                             patience=5,
                                             verbose=0,
                                             mode='auto',
                                             min_delta=0.0001),
        ClearTrainingOutput()
    ]

    tuner.search_space_summary()

    tuner.search(train_x,
                 train_y,
                 epochs=200,
                 batch_size=256,
                 validation_data=(valid_x, valid_y),
                 callbacks=callbacks,
                 verbose=2)

    # Get the optimal hyperparameters
    best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]

    try:
        hidden_layer_1 = best_hps.get('units_0')
    except:
        hidden_layer_1 = 0
    try:
        hidden_layer_2 = best_hps.get('units_1')
    except:
        hidden_layer_2 = 0

    model_info = {
        'output': item,
        "time_step": time_step,
        'hidden_layer_1': hidden_layer_1,
        'hidden_layer_2': hidden_layer_2,
        'hidden_layer_3': best_hps.get('units_n'),
    }

    # 清除tuner
    del tuner

    return model_info
Beispiel #21
0
        activation='relu'
    ),
            # in dense layer we can select how many no of nodes we can use between 32 -128. lot of permu and combi to select
    keras.layers.Dense(10, activation='softmax')
    # the last dense layer with 10 output nodes
  ])
  
  model.compile(optimizer=keras.optimizers.Adam(hp.Choice('learning_rate', values=[1e-2, 1e-3])),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
  
  return model

from kerastuner import RandomSearch # random search will try to find out which parameter will be best for problem statement
from kerastuner.engine.hyperparameters import HyperParameters

# run random search
# which conv layer and how many filters need to use

tuner_search = RandomSearch(build_model,objective='val_accuracy',max_trials=5,directory='CatsVsDogs',project_name='Mnist Fashion')

# will search for best parameters from the build_model
tuner_search.search(train_images,train_labels,epochs=3,validation_split=0.1) # will run only for 2 epochs by default. similar to fit 

model = tuner_search.get_best_models(num_models=1)[0]

model.summary()

# considering the above model, we'll train the images

model.fit(train_images,train_labels,epochs=10,validation_split=0.1,initial_epoch=3)
Beispiel #22
0
            keras.layers.Conv2D(
                filters = hp.Int('conv_2_filter', min_value = 32, max_value = 256, step = 16),
                kernel_size = hp.Choice('conv_2_kernel', values = [3, 5]),
                activation = 'relu'
            ),
            keras.layers.Flatten(),
            keras.layers.Dense(
                units = hp.Int('dense_1_units', min_value = 32, max_value = 256, step = 16),
                activation = 'relu'
            ),
            keras.layers.Dense(10, activation = 'softmax')
  ])
  model.compile(optimizer = keras.optimizers.Adam(hp.Choice('learning_rate', values = [1e-2, 1e-3])),
                loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
  return model

from kerastuner import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters

tuner_search = RandomSearch(build_model, objective = 'val_accuracy', max_trials = 5,
                            directory = '/users/siddharthsmac/desktop', project_name = 'Mnist Fashion')

tuner_search.search(train_images, train_labels, epochs = 3, validation_split = 0.1)

model = tuner_search.get_best_models(num_models = 1)[0]

model.summary()

model.fit(train_images, train_labels, epochs = 10, validation_split = 0.1, initial_epoch = 3)

Beispiel #23
0
from kerastuner import RandomSearch

# Import the Cifar10 dataset.
NUM_CLASSES = 10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train, NUM_CLASSES)
y_test = to_categorical(y_test, NUM_CLASSES)

# Import an hypertunable version of Xception.
hypermodel = HyperXception(input_shape=x_train.shape[1:], classes=NUM_CLASSES)

# Initialize the hypertuner: we should find the model that maximixes the
# validation accuracy, using 40 trials in total.
tuner = RandomSearch(hypermodel,
                     objective='val_accuracy',
                     max_trials=40,
                     project_name='cifar10_xception',
                     directory='test_directory')

# Display search overview.
tuner.search_space_summary()

# Performs the hypertuning.
tuner.search(x_train, y_train, epochs=10, validation_split=0.1)

# Show the best models, their hyperparameters, and the resulting metrics.
tuner.results_summary()

# Retrieve the best model.
best_model = tuner.get_best_models(num_models=1)[0]
    ),
    keras.layers.Flatten(),
    keras.layers.Dense(
        units=hp.Int('dense_1_units', min_value=32, max_value=128, step=16),
        activation='relu'
    ),
    keras.layers.Dense(10, activation='softmax')
  ])
  
  model.compile(optimizer=keras.optimizers.Adam(hp.Choice('learning_rate', values=[1e-2, 1e-3])),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
  
  return model

from kerastuner import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters

tuner_search=RandomSearch(build_model,
                          objective='val_accuracy',
                          max_trials=5,directory='output',project_name="Mnist Fashion")

tuner_search.search(train_images,train_labels,epochs=3,validation_split=0.1)

model = tuner_search.get_best_models(num_models=1)[0]

model.summary()

model.fit(train_images,train_labels, epochs=10, validation_split=0.1, initial_epoch=3)

Beispiel #25
0
                                           min_value=32,
                                           max_value=128,
                                           step=16),
                            kernel_size=hp.Int('Conv_2_Kernel', 3, 5),
                            activation='relu'),
        keras.layers.Flatten(),
        keras.layers.Dense(units=hp.Int('dense_units_1',
                                        min_value=30,
                                        max_value=128,
                                        step=16),
                           activation='relu'),
        keras.layers.Dense(10, activation='softmax')
    ])
    model.compile(optimizer=keras.optimizers.Adam(
        hp.Float('learning_rate', 0.1, 0.2)),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    return model


tuner_search = RandomSearch(build_model,
                            objective='val_accuracy',
                            max_trials=5,
                            executions_per_trial=1,
                            directory='C:/',
                            project_name="mnist_fashion")
tuner_search.search(train_images,
                    data_set_train_label,
                    epochs=3,
                    validation_split=0.1,
                    batch_size=1000)
Beispiel #26
0
    )

    model.add(Dense(units=hp.Int('units_hidden', min_value=128, max_value=800, step=32), activation=activation_choice))
    model.add(Dense(10, activation='softmax'))

    model.compile(
        optimizer=hp.Choice('optimizer', values=['adam', 'rmsprop', 'SGD']),
        loss='categorical_crossentropy', metrics=['accuracy']
    )

    return model


tuner = RandomSearch(
    build_model,
    objective='val_accuracy',
    max_trials=336,
    directory='test_directory'
)

tuner.search_space_summary()
tuner.search(x_train, y_train, batch_size=256, epochs=70, validation_split=0.1)

tuner.results_summary()

models = tuner.get_best_models(num_models=3)

for m in models:
    m.summary()
#    m.evaluate(x_test, y_test)
#    print()
Beispiel #27
0
    x = layers.Flatten()(x)
    outputs = layers.Dense(10, activation='softmax')(x)

    # Build model
    model = keras.Model(inputs, outputs)
    model.compile(optimizer=Adam(lr),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model


# Initialize the tuner by passing the `build_model` function
# and specifying key search constraints: maximize val_acc (objective),
# and the number of trials to do. More efficient tuners like UltraBand() can
# be used.
tuner = RandomSearch(build_model, objective='val_accuracy', max_trials=TRIALS,
                     project_name='hello_world_tutorial_results')

# Display search space overview
tuner.search_space_summary()

# Perform the model search. The search function has the same signature
# as `model.fit()`.
tuner.search(x_train, y_train, batch_size=128, epochs=EPOCHS,
             validation_data=(x_val, y_val))

# Display the best models, their hyperparameters, and the resulting metrics.
tuner.results_summary()

# Retrieve the best model and display its architecture
best_model = tuner.get_best_models(num_models=1)[0]
best_model.summary()
  #FC LAYER
  add_fc_layer(model,dense_units = hp.Int('dense_1_units',min_value =32, max_value=128,step = 8), dropout_rate = 0.5)
  #add_fc_layer(model,128,0.4)
  add_fc_layer(model,dense_units = hp.Int('dense_2_units',min_value =32, max_value=128,step = 8),dropout_rate = 0.3)
  #OUTPUT LAYER
  model.add(Dense(units = 1, activation = 'sigmoid'))

  model.compile(optimizer= 'Adam', loss= 'binary_crossentropy', metrics= ['accuracy'])

  return model

from kerastuner import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters

tuner_search = RandomSearch(create_model,
                            objective='val_accuracy',
                            max_trials=5,directory=basepath,
                            project_name='Tuner_Folder')

tuner_search.search(train_X,train_y,epochs=3,validation_split=0.2)

model = tuner_search.get_best_models(num_models=1)[0]

model.summary()

model_history = model.fit(train_X,train_y,epochs=10, validation_split=0.1,initial_epoch=3,callbacks= [
        tf.keras.callbacks.ModelCheckpoint('models/model_{val_accuracy:.3f}.h5',
        save_best_only = True,save_weights_only = False,
        monitor = 'val_accuracy')
    ])

model.save('model_final.h5')
    model.add(Dropout(.3))
    model.add(Dense(units=num_classes, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(
                      hp.Choice('learning_rate', values=[1e-4, 1e-3])),
                  metrics=['accuracy'])
    return model


from kerastuner import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters

tuner = RandomSearch(build_model,
                     objective='val_accuracy',
                     max_trials=5,
                     directory='output1',
                     project_name='EMNIST_Balanced_Tuned')
tuner.search(X_train, y_train, epochs=3, validation_data=(X_val, y_val))

model = tuner.get_best_models(num_models=1)[0]
# model.summary()

from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint

model = tuner.get_best_models(num_models=1)[0]

# Some callbacks that we're using

MCP = ModelCheckpoint('Best_points.h5',
                      verbose=1,
Beispiel #30
0
    set_df_values(df)
    df = clean_data(df)
    print(df.dtypes)
    ohe_cols = cols[1:36]
    # print(ohe_cols)
    df = one_hot_encode(df, colnames=ohe_cols)

    x_train, x_val, y_train, y_val, train_ids, val_ids = split_dataset(
        df, test_size=0.1, seed=42)
    # X_train, Y_train = np.array(x_train), np.array(y_train)
    # X_val, Y_val = np.array(x_val), np.array(y_val)

    model = get_model(input_size=118, output_size=2, magic='tanh', dropout=0.5)
    tuner = RandomSearch(get_tuned_model,
                         objective='val_binary_accuracy',
                         max_trials=7,
                         executions_per_trial=4,
                         directory='project',
                         project_name='Air Quality Index')

    print(tuner.search_space_summary())

    x_train = np.asarray(x_train).astype(np.float32)
    y_train = np.asarray(y_train).astype(np.float32)
    x_val = np.asarray(x_val).astype(np.float32)
    y_val = np.asarray(y_val).astype(np.float32)

    tuner.search(x_train, y_train, epochs=5, validation_data=(x_val, y_val))

    print(tuner.results_summary(num_trials=3))  # 3 best models

    test_acc, test_loss = fit_and_evaluate(model,