Exemplo n.º 1
0
    def setup_early_stopping(params):

        return EarlyStopping(monitor=params["monitor"],
                             mode=params["mode"],
                             patience=params["patience"],
                             verbose=params["verbose"],
                             min_delta=params["min_delta"])
def trainModel(model, x_train, y_train, x_val, y_val, epochs, batch_size):
    earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')
    mcp_save = ModelCheckpoint('temp/.mdl_wts.hdf5', save_best_only=True, monitor='val_loss', mode='min')
    reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min')

    history = model.fit(x_train, y_train,epochs = epochs, batch_size = batch_size, validation_data = (x_val, y_val),
                        callbacks = [earlyStopping, mcp_save, reduce_lr_loss])
    return model
Exemplo n.º 3
0
def create_model_talos(x_train_ts, y_train_ts, x_test_ts, y_test_ts, params):
    """
    function that builds model, trains, evaluates on validation data and returns Keras history object and model for
    talos scanning. Here I am creating data inside function because data preparation varies as per the selected value of 
    batch_size and time_steps during searching. So we ignore data that's received here as argument from scan method of Talos.
    """
    x_train_ts, y_train_ts, x_test_ts, y_test_ts = data(params)
    BATCH_SIZE = params["batch_size"]
    TIME_STEPS = params["time_steps"]

    lstm_model = Sequential()
    # (batch_size, timesteps, data_dim)
    lstm_model.add(
        LSTM(params["lstm1_nodes"],
             batch_input_shape=(BATCH_SIZE, TIME_STEPS, x_train_ts.shape[2]),
             dropout=0.2,
             recurrent_dropout=0.2,
             stateful=True,
             return_sequences=True,
             kernel_initializer='random_uniform'))
    if params["lstm_layers"] == 2:
        lstm_model.add(LSTM(params["lstm2_nodes"], dropout=0.2))
    else:
        lstm_model.add(Flatten())

    if params["dense_layers"] == 2:
        lstm_model.add(Dense(params["dense2_nodes"], activation='relu'))

    lstm_model.add(Dense(1, activation='sigmoid'))
    if params["optimizer"] == 'rms':
        optimizer = optimizers.RMSprop(lr=params["lr"])
    else:
        optimizer = optimizers.SGD(lr=params["lr"],
                                   decay=1e-6,
                                   momentum=0.9,
                                   nesterov=True)
    lstm_model.compile(loss='mean_squared_error',
                       optimizer=optimizer)  # binary_crossentropy
    early_stp = EarlyStopping(monitor='val_loss',
                              mode='min',
                              verbose=1,
                              patience=10,
                              min_delta=1)

    history = lstm_model.fit(x_train_ts,
                             y_train_ts,
                             epochs=params["epochs"],
                             verbose=2,
                             batch_size=BATCH_SIZE,
                             validation_data=[x_test_ts, y_test_ts],
                             callbacks=[csv_logger, early_stp])
    return history, lstm_model
Exemplo n.º 4
0
def getBasicCallbacks(monitor="val_loss", patience_es=200, patience_rlr=80):
    # Two callbacks are used by default for all models:
    # - EarlyStopping (stop training when validation loss increases)
    # - ReduceLROnPlateau (reduce learning rate to facilitate continued learning)

    return [
        EarlyStopping(
            monitor = monitor, min_delta = 0.00001, patience = patience_es, mode = 'auto', restore_best_weights=True
        ),
        ReduceLROnPlateau(
            monitor = monitor, factor = 0.5, patience = patience_rlr, verbose = 1, min_lr=5e-4,
        )
    ]
Exemplo n.º 5
0
def trainAutoEncoder(autoencoder: Model):

    try:
        autoencoder.load_weights(model_weight_name)
    except (OSError, ValueError):
        x,_ = loaddata.loadImages()
        autoencoder.fit(x, x,
                        epochs=100,
                        batch_size=128,
                        shuffle=True,
                        validation_split=0.2,
                        callbacks=[EarlyStopping(patience=20), TensorBoard(log_dir='/tmp/autoencoder')])
        autoencoder.save_weights(model_weight_name)
        return autoencoder
def DNN(X_train, X_test, Y_train, Y_test, lmbd, eta, epochs=1000):
    dnn = Sequential([
        Dense(1024,
              input_shape=(X_train.shape[1], ),
              activation='relu',
              kernel_regularizer=l2(lmbd)),
        Dense(Y_train.shape[1], activation='softmax'),
    ])
    adam = Adam(learning_rate=eta)
    dnn.compile(loss='categorical_crossentropy',
                optimizer=adam,
                metrics=['acc'])

    history = dnn.fit(X_train,
                      Y_train,
                      epochs=epochs,
                      batch_size=32,
                      validation_split=0.25,
                      callbacks=[
                          EarlyStopping(monitor='val_loss',
                                        min_delta=1e-5,
                                        patience=10,
                                        verbose=0,
                                        mode='auto',
                                        baseline=None,
                                        restore_best_weights=True)
                      ])

    # Plot training & validation accuracy values
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()

    # Plot training & validation loss values
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()

    score = dnn.evaluate(X_test, Y_test, batch_size=32)
    print(score)
    return dnn
Exemplo n.º 7
0
def trainModel(model, x, y, save_weights_name):

    model.fit(
        x,
        y,
        epochs=250,
        batch_size=128,
        shuffle=True,
        validation_split=.2,
        callbacks=[EarlyStopping(patience=20)]
    )

    model.save_weights(save_weights_name)

    return model
Exemplo n.º 8
0
 def useNetwork(self, train_base, val_base, epochs=10000, patience=1000):
     if os.path.isfile(self.name + '.hf5'):
         self.network.load_weights(self.name + ".hf5")
     else:
         Model = ModelCheckpoint(self.name + ".hf5")
         Early = EarlyStopping(patience=1000)
         self.network.fit(x=train_base[0],
                          y=train_base[1],
                          batch_size=20,
                          epochs=epochs,
                          callbacks=[Early, Model],
                          validation_split=0.1,
                          validation_data=val_base,
                          shuffle=True,
                          use_multiprocessing=True)
Exemplo n.º 9
0
def train_model(model_name, month, day, bp_type, epochs, X_train, y_train, X_test, y_test):

    model = Sequential()
    # model.add(Dense(units=100, input_shape=(1, X_train.shape[2])))
    # model.add(Dense(units=64))
    # model.add(Dense(units=32))
    # model.add(Bidirectional(LSTM(units=50, input_shape=(1, X_train.shape[2]), return_sequences=True)))
    model.add(LSTM(units=128, dropout=0.5, recurrent_dropout=0.5))
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1))

    # sgd = SGD(learning_rate=0.00002, momentum=0.9, nesterov=True)
    model.compile(loss='mse', optimizer='adam', metrics=['mean_absolute_error'])


    earlyStopping = EarlyStopping(monitor='val_mean_absolute_error', patience=15, verbose=0, mode='min')
    mcp_save = ModelCheckpoint('{}_{}_{}.h5'.format(model_name, month, day), save_best_only=True, monitor='val_mean_absolute_error', mode='min')
    history = model.fit(X_train, y_train, epochs=epochs, batch_size=32, validation_data=(X_test, y_test), callbacks=[mcp_save, earlyStopping])


    # Save model to HDF5 for later use
    # model.save("model.h5")
    print("Saved model to disk")


    plt.plot(history.history['mean_absolute_error'])
    plt.plot(history.history['val_mean_absolute_error'])
    plt.title('{} Blood Pressure Model Error (RNN)'.format(bp_type))
    plt.ylabel('Mean Absolute Error (mmHg)')
    plt.xlabel('Epoch')
    plt.legend(['Training', 'Testing'], loc='best')
    plt.savefig('./Models/{}_{}_{}.png'.format(model_name, month, day), dpi=600)
    print('Saved graph to disk')

    plt.close('all')
    del model
    model = load_model('./Models/{}_{}_{}.h5'.format(model_name, month, day))
    plt.plot(y_test.reshape(-1, 1))
    # X_train = X_train.reshape(-1, 2100, 1)
    plt.plot(model.predict(X_test).reshape(-1, 1))
    plt.title('{} Blood Pressure'.format(bp_type))
    plt.xlabel('Data point')
    plt.ylabel('{} BP (mmHg)'.format(bp_type))
    plt.legend(['Ground Truth', 'Prediction'])
    plt.show()
    return model, history
Exemplo n.º 10
0
def get_model(tr_x, tr_y, vali_x, vali_y, mem_num):
    model = models.Sequential()
    model.add(layers.LSTM(mem_num, input_shape=(1, 1)))
    model.add(layers.Dense(1, activation='linear'))

    model.compile(loss='mse', optimizer='adam')

    early_stop = EarlyStopping(monitor="val_loss", min_delta=0, patience=2, mode="auto", baseline=0.1,
                               restore_best_weights=True)

    history = model.fit(tr_x, tr_y, epochs=1500,
                        validation_data=(vali_x, vali_y),
                        shuffle=False,
                        callbacks=[early_stop])
    tr_loss = history.history['loss'][500:]
    vali_loss = history.history['val_loss'][500:]

    return tr_loss, vali_loss
Exemplo n.º 11
0
 def train_model(self):
     #callbacks
     es = EarlyStopping(monitor='val_loss',
                        patience=5,
                        restore_best_weights=True)
     check_point = ModelCheckpoint('Log/best_sequencial.h5',
                                   monitor='val_loss',
                                   save_best_only=True,
                                   mode='min')
     #train
     hist = self.model.fit(self.data[0],
                           self.data[1],
                           validation_data=(self.data[2], self.data[3]),
                           batch_size=128,
                           epochs=50,
                           callbacks=[es, check_point])
     print(self.model.summary())
     return hist
def train(args):
    data_path = args.data
    epochs = args.epochs
    early_stop = args.early_stop
    batch_size = args.batch_size
    weights = args.weights

    model = model_architecture()
    model.summary()

    data_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
    train_it = data_gen.flow_from_directory(data_path + '/train', target_size = (image_shape,image_shape), batch_size = batch_size)
    val_it = data_gen.flow_from_directory(data_path + '/val', target_size = (image_shape,image_shape), batch_size = 20)
    test_it = data_gen.flow_from_directory(data_path + '/test', target_size = (image_shape,image_shape), batch_size = 20)

    model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])

    if weights is not None:
        model.load_weights(weights)

    os.mkdir('logs')
    filepath="logs/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
    checkpoint = ModelCheckpoint(filepath, period=2)
    callbacks = [checkpoint]
    if early_stop:
        early_stopping = EarlyStopping(patience = 2, restore_best_weights = True)
        callbacks.append(early_stopping)

    model.fit_generator(train_it, epochs=epochs, callbacks=callbacks, validation_data=val_it)

    os.mkdir('model')
    model_json = model.to_json()
    with open("model/model_final.json", "w") as json_file:
        json_file.write(model_json)

    model.save_weights('model/model_final.hdf5')

    print("MODEL TRAINED")

    test_loss, test_acc = model.evaluate_generator(test_it)
    print("Test Results: ")
    print("Loss: " + str(test_loss))
    print("Test: " + str(test_acc))
Exemplo n.º 13
0
def lstm_model(batch_size, time_steps, sym, lr, epochs, dropout=.3):
    cols = ['open', 'high', 'low', 'close', 'volume']
    mat = get_data(sym).loc[:, cols].values
    x_train_ts, y_train_ts, x_test_ts, y_test_ts, scaler = data(
        mat, batch_size, time_steps)
    lstm = Sequential()
    lstm.add(
        LSTM(70,
             batch_input_shape=(batch_size, time_steps, x_train_ts.shape[2]),
             dropout=0.1,
             recurrent_dropout=dropout,
             stateful=True,
             kernel_initializer='random_uniform'))
    lstm.add(Flatten())
    lstm.add(Dense(1, activation='sigmoid'))
    opt = op.RMSprop(lr=lr)
    lstm.compile(loss='mean_squared_error',
                 optimizer=opt,
                 metrics=['accuracy'])

    csv_log = CSVLogger(sym + "_log.log", append=True)
    early_stp = EarlyStopping(monitor='val_loss',
                              mode='min',
                              verbose=1,
                              patience=15,
                              min_delta=1)
    checkpoint = ModelCheckpoint(str(sym) + '_best_model.h5',
                                 monitor='val_loss',
                                 mode='min',
                                 save_best_only=True,
                                 verbose=1)

    history = lstm.fit(x_train_ts,
                       y_train_ts,
                       epochs=epochs,
                       verbose=2,
                       batch_size=batch_size,
                       validation_data=[x_test_ts, y_test_ts],
                       callbacks=[csv_log, early_stp, checkpoint])

    return lstm, history, scaler, x_test_ts, y_test_ts
Exemplo n.º 14
0
    def fit(self):

        # Prepare data
        self.dl = dataloader(dataset=self.dataset,
                             norm_method=self.norm_method,
                             val_fold=self.val_fold,
                             crop=self.crop,
                             inv_thresh=self.inv_thresh,
                             custom_data=self.custom_data,
                             verbose=2)

        # Init network
        self.autoencoder, self.encoder, self.decoder = temporal_autoencoder_v3(input_dim=1,
                                                                               timesteps=self.dl.data_train.shape[1],
                                                                               n_filters=self.n_filters,
                                                                               kernel_size=self.kernel_size,
                                                                               strides=self.strides,
                                                                               pool_size=self.pool_size,
                                                                               n_units=self.n_units,
                                                                               latent_dim=self.latent_dim)
        # DTC model (autoencoder only)
        self.model = Model(inputs=self.autoencoder.input, outputs=self.autoencoder.output)

        # Compile model
        self.model.compile(loss=self.loss, optimizer=self.optimizer)

        # Prepare callbacks
        early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5, min_delta=0.000)
        filepath = "model-ae-epoch-{epoch:02d}-{val_loss:.2f}.hdf5"
        checkpoint = ModelCheckpoint(filepath, monitor='val_loss', mode='min', verbose=1, save_best_only=False, period=10)

        # Fit model
        self.history = self.model.fit(x=self.dl.data_train,
                                      y=self.dl.data_train,
                                      validation_data=[self.dl.data_val, self.dl.data_val],
                                      nb_epoch=self.epochs,
                                      batch_size=self.batch_size,
                                      verbose=2,
                                      callbacks=[early_stop, checkpoint]).history
Exemplo n.º 15
0
rmse_list = []
mae_list = []
loss_list = []
val_loss_list = []
min_epochs =500 # epochs that will show on the diagram

# separate inputs(X) and outputs(Y) from dataset
dataset = np.genfromtxt('dataset.data', delimiter='\t', dtype='float')
X = dataset[:, 0:N]
Y = dataset[:, N:N + M]

# create k-fold cross validation model with fold shuffle
kfold = KFold(n_splits=folds, shuffle=True)

#create early stopping callback
es = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=5, verbose=1, mode='min', baseline=None, restore_best_weights=True)

# start cross validation
for fold, (train, test) in enumerate(kfold.split(X, Y)):  # enumerate returns (i,values[i]) where values = kfold.split(X,Y) = (train,test)

    print(f'Fold number: {str(fold)}')

    # create network architecture one level at a time (sequentially)
    net = Sequential()
    net.add(Dense(hid_neurons_1, input_dim=N, activation='sigmoid'))  # hidden layer 1
    net.add(Dense(hid_neurons_2, input_dim=hid_neurons_1, activation='sigmoid'))  # hidden layer 2
    net.add(Dense(hid_neurons_3, input_dim=hid_neurons_2, activation='sigmoid'))  # hidden layer 3
    net.add(Dense(M, activation='tanh'))  # output layer

    # specify error functions
    def rmse(Y_pred, Y_true):
Exemplo n.º 16
0
    args.parsed_data_prefix,
    drop_fraction=args.drop_fraction,
    test_drop_fraction=args.test_drop_fraction,
    validate_fraction=args.validate_fraction,
    test_fraction=args.test_fraction)

# Label data comes in the form [visitors score, home score].
# Condense to just a spread.
y_train = np.asarray([points[0] - points[1] for points in y_train], dtype=int)
y_validate = np.asarray([points[0] - points[1] for points in y_validate],
                        dtype=int)
y_test = np.asarray([points[0] - points[1] for points in y_test], dtype=int)

# Early stopping with patience
early_stopper = EarlyStopping(monitor='val_loss',
                              verbose=1,
                              patience=args.patience)
model_checkpoint = ModelCheckpoint(args.model_path,
                                   monitor='val_loss',
                                   mode='min',
                                   save_best_only=True,
                                   verbose=1)
callbacks = [early_stopper, model_checkpoint]
if args.roster_shuffle:
    print('Roster shuffling (data augmentation) enabled.')
    callbacks.append(ShuffleCallback(x_train))

# Define and train the model
input_shape = x_train[0].shape
model = Sequential()
model.add(
Exemplo n.º 17
0
    def train(self, model: Sequential, *args) -> Tuple[History, Sequential]:
        '''
        THIS FUNCTION IS RESPONSIBLE FOR MAKE THE TRAINING OF MODEL
        :param model: Sequential model builded before, or passed (already trained model)
        :param args: only one value batch size
        :return: Sequential model --> trained model
        :return: History.history --> train and validation loss and metrics variation along epochs
        '''

        try:

            if model is None:
                raise CustomError.ErrorCreationModel(config.ERROR_NO_MODEL)

            # OPTIMIZER
            #opt = SGD(learning_rate=config.LEARNING_RATE, decay=config.DECAY, nesterov=True, momentum=0.9)
            opt = Adam(learning_rate=config.LEARNING_RATE, decay=config.DECAY)

            # COMPILE
            model.compile(optimizer=opt,
                          loss=config.LOSS_CATEGORICAL,
                          metrics=[config.ACCURACY_METRIC])

            #GET STRATEGIES RETURN DATA, AND IF DATA_AUGMENTATION IS APPLIED TRAIN GENERATOR
            train_generator = None

            # get data
            X_train = self.data.X_train
            y_train = self.data.y_train

            if self.StrategyList:  # if strategylist is not empty
                for i, j in zip(self.StrategyList,
                                range(len(self.StrategyList))):
                    if isinstance(i, DataAugmentation.DataAugmentation):
                        train_generator = self.StrategyList[j].applyStrategy(
                            self.data)
                    else:
                        X_train, y_train = self.StrategyList[j].applyStrategy(
                            self.data)

            es_callback = EarlyStopping(monitor=config.VALIDATION_LOSS,
                                        patience=5,
                                        restore_best_weights=True)
            decrease_callback = ReduceLROnPlateau(monitor=config.LOSS,
                                                  patience=1,
                                                  factor=0.7,
                                                  mode='min',
                                                  verbose=1,
                                                  min_lr=0.000001)
            decrease_callback2 = ReduceLROnPlateau(
                monitor=config.VALIDATION_LOSS,
                patience=1,
                factor=0.7,
                mode='min',
                verbose=1,
                min_lr=0.000001)

            if train_generator is None:  #NO DATA AUGMENTATION

                history = model.fit(
                    x=X_train,
                    y=y_train,
                    batch_size=args[0],
                    epochs=config.EPOCHS,
                    validation_data=(self.data.X_val, self.data.y_val),
                    shuffle=True,
                    callbacks=[
                        es_callback, decrease_callback, decrease_callback2
                    ],
                    #class_weight=config.class_weights
                    verbose=config.TRAIN_VERBOSE)

                return history, model

            #ELSE APPLY DATA AUGMENTATION
            history = model.fit_generator(
                generator=train_generator,
                validation_data=(self.data.X_val, self.data.y_val),
                epochs=config.EPOCHS,
                steps_per_epoch=X_train.shape[0] / args[0],
                shuffle=True,
                #class_weight=config.class_weights,
                verbose=config.TRAIN_VERBOSE,
                callbacks=[es_callback, decrease_callback, decrease_callback2])

            return history, model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_TRAINING)
Exemplo n.º 18
0
cv_score_ann = []
all_history = []
y_pred_ann_all = []
y_test_ann_all = []
for train_index, test_index in skf.split(X, y_attack_cat):
    print("TRAIN:", train_index, "TEST:", test_index)

    X_train, X_test = X[train_index], X[test_index]
    y_train, y_test = y_attack_cat[train_index], y_attack_cat[test_index]

    s = time.clock()

    es = EarlyStopping(monitor='val_loss',
                       mode='min',
                       patience=5,
                       restore_best_weights=True,
                       verbose=1)
    rlrop = ReduceLROnPlateau(monitor='val_loss',
                              mode='min',
                              patience=5,
                              factor=0.2,
                              min_lr=1e-6,
                              verbose=1)

    history = model.fit(X_train,
                        y_train,
                        epochs=50,
                        batch_size=512,
                        validation_split=0.2,
                        verbose=1,
Exemplo n.º 19
0
def basic_nn(X_train, y_train, X_test, y_test, class_lookup, dropout_rate,
             val_split, log1, labels_test):
    # adjust labels and determine shape
    y_train = to_categorical(y_train, len(class_lookup))
    y_test = to_categorical(y_test, len(class_lookup))
    input_shape = X_train.shape[1:]
    # build model
    inputs = Input(shape=input_shape)

    x = Dense(256, activation='relu')(inputs)
    x = Dropout(rate=dropout_rate)(x)
    x = Dense(128, activation='relu')(x)
    x = Dense(64, activation='relu')(x)
    #x = Dense(32, activation='relu')(x)

    # for binary models
    if len(class_lookup) == 2:
        outputs = Dense(len(class_lookup), activation='sigmoid')(x)

        model = Model(inputs, outputs)

        # compile & fit model
        model.compile(loss='binary_crossentropy',
                      optimizer=Adam(learning_rate=lr_schedule(0)),
                      metrics=['accuracy'])

    # for more than 2 classes
    else:
        outputs = Dense(len(class_lookup), activation='softmax')(x)

        model = Model(inputs, outputs)

        # compile & fit model
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(learning_rate=lr_schedule(0)),
                      metrics=['accuracy'])

    model.summary()

    # NN callbacks
    if use_callbacks:
        es = EarlyStopping(monitor='val_loss',
                           patience=75,
                           restore_best_weights=True)
        mc = ModelCheckpoint(results_dir + '/models/' + cur_datetime +
                             '_BasicNN.h5',
                             monitor='val_loss',
                             save_best_only=True)
        lr_scheduler = LearningRateScheduler(lr_schedule)
        lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                       cooldown=0,
                                       patience=5,
                                       min_lr=0.5e-6)
        cb_list = [es, mc, lr_scheduler, lr_reducer]
        log1.write(
            "Callbacks: EarlyStopping, ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau \n\n"
        )

        # create .ini file with parameters
        params = configparser.ConfigParser(
            interpolation=configparser.ExtendedInterpolation())
        params['Preprocessing'] = {
            'sel_WL': sel_WL,
            'range_low': range_low,
            'range_high': range_high,
            'method': preprocessing_method
        }
        with open(results_dir + '/models/' + cur_datetime + '_BasicNN.ini',
                  'w') as configfile:
            params.write(configfile)
        log1.write("ini-file created. \n\n")

    # log model architecture and parameters
    log1.write(
        "Basic Neural Net \n #epochs: %i \n Batch size: %i \n Initial learning rate: %.5f \n Dropout rate: %.1f \n\n"
        % (epochs, batch_size, lr_schedule(0), dropout_rate))
    model.summary(print_fn=lambda x: log1.write(x + '\n'))
    log1.write("\n")

    history = model.fit(x=X_train,
                        y=y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=1,
                        callbacks=cb_list,
                        validation_split=val_split,
                        shuffle=True)

    # Plot training & validation accuracy values
    plt.plot(history.history['accuracy'])
    plt.plot(history.history['val_accuracy'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    #plt.show()
    plt.savefig(results_dir + '/output/' + cur_datetime +
                '_basicNN_accuracy.png')
    plt.close()

    # Plot training & validation loss values
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    #plt.show()
    plt.savefig(results_dir + '/output/' + cur_datetime + '_basicNN_loss.png')
    plt.close()

    # calculate loss and accuracy on test data set
    score = model.evaluate(x=X_test, y=y_test)
    print('loss: {}, accuracy: {}'.format(score[0], score[1]))

    # log the results
    log1.write("#epochs trained for: %i \n" % (len(history.history['loss'])))
    log1.write("Loss: %.2f \nAccuracy: %.3f \n\n" % (score[0], score[1]))

    return score
Exemplo n.º 20
0
        y.append(label)

    X_data = np.nan_to_num(np.array(X_data), 0)
    y = np.array(y)
    print(("===X_data==>", X_data.shape))
    print("y shape", y.shape)

    x_train, x_test, y_train, y_test = train_test_split(X_data, y, shuffle=True, test_size=0.20)

    model = mymodel()
    model.compile(optimizer=Adam(lr=0.001, decay=1e-8), loss="binary_crossentropy",
                  metrics=['accuracy', f1_m, precision_m, recall_m])
    generator2 = generate_data(x_train, y_train, batch_size)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, mode='min')
    terminate_on_nan = TerminateOnNaN()
    early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='auto')

    class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
    model.fit_generator(
        generator2,
        steps_per_epoch=math.ceil(len(x_train) / batch_size),
        epochs=no_epochs,
        shuffle=True,
        class_weight=class_weights,
        verbose=1,
        validation_data=(x_test, y_test),
        callbacks=([terminate_on_nan, reduce_lr, early_stopping]))

    y_pred = model.predict(x_test)
    y_pred = np.array(y_pred)
    print(y_pred)
Exemplo n.º 21
0
model.add(Conv1D(filters, kernelsize, padding='Valid', activation='relu'))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense(hiddendims, activation='relu'))
model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
print(model.summary())

#Callback para o treinamento parar quando a acuracia parar de deminiuir
callback = EarlyStopping(monitor='val_accuracy',
                         min_delta=0,
                         patience=5,
                         verbose=0,
                         mode='max',
                         baseline=None,
                         restore_best_weights=False)

#Divisao dos dados de treinamento em treinamento e teste
X_train, X_test, Y_train, Y_test = train_test_split(x_tr,
                                                    y_tr,
                                                    test_size=0.2,
                                                    random_state=42)
print(X_train.shape, Y_train.shape)
print(X_test.shape, Y_test.shape)

#Treinamento do Modelo
starttime = time()
r = model.fit(X_train,
Exemplo n.º 22
0
def resnet_fc(X_train, y_train, X_test, y_test, batch_size, epochs, val_split,
              num_classes, num_filters, log1):
    # adjust labels and determine shape
    y_train = to_categorical(y_train, num_classes)
    y_test = to_categorical(y_test, num_classes)
    input_shape = X_train.shape[1:]
    # build and compile
    model = build_resnet_fc(input_shape,
                            num_classes=num_classes,
                            num_filters=num_filters)

    # for binary models
    if num_classes == 2:
        model.compile(loss='binary_crossentropy',
                      optimizer=Adam(learning_rate=lr_schedule(0)),
                      metrics=['accuracy'])
    # for more than 2 classes
    else:
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(learning_rate=lr_schedule(0)),
                      metrics=['accuracy'])

    model.summary()

    # NN callbacks
    if use_callbacks:
        es = EarlyStopping(monitor='val_loss',
                           patience=75,
                           restore_best_weights=True)
        mc = ModelCheckpoint(results_dir + '/models/' + cur_datetime +
                             '_ResNet.h5',
                             monitor='val_loss',
                             save_best_only=True)
        lr_scheduler = LearningRateScheduler(lr_schedule)
        lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                       cooldown=0,
                                       patience=5,
                                       min_lr=0.5e-6)
        cb_list = [es, mc, lr_scheduler, lr_reducer]
        log1.write(
            "Callbacks: EarlyStopping, ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau \n\n"
        )

        # create .ini file with parameters
        params = configparser.ConfigParser(
            interpolation=configparser.ExtendedInterpolation())
        params['Preprocessing'] = {
            'sel_WL': sel_WL,
            'range_low': range_low,
            'range_high': range_high,
            'method': preprocessing_method
        }
        with open(results_dir + '/models/' + cur_datetime + '_ResNet.ini',
                  'w') as configfile:
            params.write(configfile)
        log1.write("ini-file created. \n\n")

    # log model architecture and parameters
    log1.write(
        "ResNet - fully connected \n #epochs: %i \n Batch size: %i \n Initial learning rate: %.5f \n Dropout rate: %i \n #filters: %i \n\n"
        % (epochs, batch_size, lr_schedule(0), dropout_rate, num_filters))
    model.summary(print_fn=lambda x: log1.write(x + '\n'))
    log1.write("\n")

    # fit model
    history = model.fit(x=X_train,
                        y=y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=1,
                        callbacks=cb_list,
                        validation_split=val_split,
                        shuffle=True)

    # Plot training & validation accuracy values
    plt.plot(history.history['accuracy'])
    plt.plot(history.history['val_accuracy'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    #plt.show()
    plt.savefig(results_dir + '/output/' + cur_datetime +
                '_ResNetFC_accuracy.png')
    plt.close()

    # Plot training & validation loss values
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    #plt.show()
    plt.savefig(results_dir + '/output/' + cur_datetime + '_ResNetFC_loss.png')
    plt.close()

    score = model.evaluate(x=X_test, y=y_test)
    print('loss: {}, accuracy: {}'.format(score[0], score[1]))

    # log the results
    log1.write("#epochs trained for: %i \n" % (len(history.history['loss'])))
    log1.write("Loss: %.2f \nAccuracy: %.3f \n\n" % (score[0], score[1]))

    return score
Exemplo n.º 23
0
    partition["train"] = [i for i in files[:number_of_samples]]
    partition["validation"] = [i for i in files[number_of_samples:]]

    for i in range(number_of_samples - 1):
        train_labels[files[i]] = files[i + 1]
    for i in range(number_of_samples, len(files) - 1):
        val_labels[files[i]] = files[i + 1]

    modelcheckpoint = ModelCheckpoint("weights/",
                                      monitor="val_acc",
                                      save_best_only=False,
                                      verbose=1,
                                      save_weights_only=True)
    earlystopping = EarlyStopping(monitor="val_loss",
                                  min_delta=0.000001,
                                  patience=1)
    tensorboard = TensorBoard(log_dir=".logdir/")
    reduce_plateau = ReduceLROnPlateau(monitor="val_loss",
                                       factor=0.3,
                                       patience=1,
                                       min_lr=0.00001)
    val_generator = DataGenerator(partition['validation'],
                                  val_labels,
                                  batch_size=12)
    train_generator = DataGenerator(partition['train'],
                                    train_labels,
                                    batch_size=12)

    model.compile(loss="mean_squared_logarithmic_error", optimizer="adam")
    history = model.fit_generator(generator=train_generator,
Exemplo n.º 24
0
def RNN(sonnets, sonnetsAsNums, obs_map):
    ''' Runs the RNN model. '''
    # Note: ADJUST indicates parameters that can be adjusted

    sizeOfVocab = len(obs_map)
    # 1. PREPARE THE INPUT
    allSonnetText = ''
    endOfSonnetChar = '$'
    # Collect the sonnet text?
    for sonnet in sonnets:
        allSonnetText += sonnet[:-1] + endOfSonnetChar + sonnet[-1]
    # Retrieve all the characters in a set.
    chars = sorted(list(set(allSonnetText)))

    # encode the chars
    char_indices = dict((c, i) for i, c in enumerate(chars))
    indices_char = dict((i, c) for i, c in enumerate(chars))

    # Max length of the sequence, thought to be 40
    # CAN ADJUST
    maxlen = 40

    # relevant section of the problem:
    # Semi-redundant sequences (sequences starting every step_th char)
    # training data: sequences of fixed length from the corpus
    # take all possible subsequences of 40 consecutive chars from data set,
    # but for speed, using semi-redundant sequences

    # Train x is sequences of maxlen
    # ADJUST????
    step = 1
    sequences = []
    next_chars = []
    for sonnet in sonnets:
        for i in range(0, len(sonnet) - maxlen, step):
            sequences.append(list(sonnet[i:i + maxlen]))
            next_chars.append(sonnet[i + maxlen])
    # x: length(input, maximum length of sequence, length of all chars)
    #print(sequences[0])
    #print(len(sequences))
    #print(len(sequences[0]))
    #print(len(sequences[0][0]))
    #print(len(sequences[0][[0]]))
    #x = np.array(sequences)
    #print(x.ndim)
    #print(x)
    #x = np.reshape(x, (x.shape[0], 1, x.shape[1]))
    #print(x.shape)
    #print(len(x[0]))
    #x = np.reshape()
    #y = np.array(next_chars)
    #print(y.shape)
    # predict on 1, maxlen is 40, len(chars)
    x = np.zeros((len(sequences), maxlen, len(chars)), dtype=np.bool)
    #y: length(input, length of all chars)
    y = np.zeros((len(sequences), len(chars)), dtype=np.bool)
    for i, seq in enumerate(sequences):
        for t, char in enumerate(seq):
            #print(seq, ", ", str(i) + ", ", char, ", " + str(t))
            # ith sequence, tth char in sequence, the char's index in dict
            x[i, t, char_indices[char]] = 1
        # ith sequence, the char's index in dict
        #print(next_chars[i])
        #print(char_indices[next_chars[i]])
        y[i, char_indices[next_chars[i]]] = 1

    # 2. CREATE THE MODEL AND FIT THE DATA
    model = Sequential()
    # char based LSTM model, single layer of 100-200 LSTM
    # ADJUST: 100-200 LSTM
    model.add(LSTM(128, input_shape=(maxlen, len(chars))))
    # fully connected dense output layer with softmax nonlinearity
    model.add(Dense(len(chars), activation='softmax'))
    print("optimizer: Adam eta=0.001, temp = 0.75")
    optimizer = Adam(learning_rate=0.001)
    # minimize categorical cross-entropy
    model.compile(loss='categorical_crossentropy', optimizer=optimizer)
    # train for sufficient number of epochs to converge loss try different numbers,
    # graph
    # ADJUST EPOCHS
    # POSSIBLY ADJUST batch_size
    es = EarlyStopping(monitor='loss')
    model.fit(x, y, batch_size=40, epochs=500, callbacks=[es])

    #print(model.predict(x_pred))
    # 3. EXAMPLE 1 WHERE WE TRY TO GENERATE 40 CHARS
    # 40 random characters

    # TODO: Not quite sure how to do this
    # You can see the method
    predictedPoem = predictSonnet(allSonnetText, maxlen, chars, char_indices,
                                  indices_char, model)
    return predictedPoem
Exemplo n.º 25
0
     rice[train_index])).repeat().shuffle(10000).batch(bs)
valid_dt = tf.data.Dataset.from_tensor_slices(
    ((weather[valid_index], countyID[valid_index]),
     rice[valid_index])).repeat().batch(bs)
all_dt = tf.data.Dataset.from_tensor_slices(
    ((weather[all_index], countyID[all_index]),
     rice[all_index])).repeat().shuffle(10000).batch(bs)

from model import construct_model

merge_model = construct_model()

cbk = EarlyStopping(monitor='val_mean_squared_error',
                    min_delta=0,
                    patience=100,
                    verbose=1,
                    mode='min',
                    baseline=None,
                    restore_best_weights=True)

model_trained = merge_model.fit(train_dt,
                                epochs=500,
                                steps_per_epoch=len(train_index) // bs,
                                validation_data=valid_dt,
                                validation_steps=1,
                                callbacks=[cbk])

merge_model.evaluate(train_dt, steps=2 * 6)
merge_model.evaluate(valid_dt, steps=1 * 4)

Exemplo n.º 26
0
    original_dim = x_train.shape[1]
    latent_dim = args.latent_dim

    vae, encoder, decoder = get_model(original_dim, args.width_scale,
                                      latent_dim, args.loss_function)

    vae.summary()
    plot_model(vae, to_file=f'{model_name}.png', show_shapes=True)

    if args.weights:
        vae.load_weights(args.weights)
    else:
        if args.early_stopping:
            es = EarlyStopping(monitor="val_loss",
                               mode="min",
                               verbose=1,
                               patience=args.patience,
                               min_delta=args.min_delta)
            cb = [es]
        else:
            cb = []

        # train the autoencoder
        vae.fit(x_train,
                epochs=args.epochs,
                batch_size=args.batch_size,
                validation_data=(x_test, None),
                callbacks=cb)
        vae.save_weights(f'{model_name}.h5')

    sample_decoder(decoder,
Exemplo n.º 27
0
    #model.add(Conv3D(filters=64, kernel_size = 26, strides=1, padding= "same", activation = "relu", kernel_initializer="he_normal"))
    model.add(Dropout(0.4))
    model.add(MaxPool3D(pool_size=2, strides=1, padding="same"))
    model.add(Dropout(0.4))
    model.add(Flatten())
    model.add(Dense(units=50, activation="relu"))
    model.add(Dropout(0.4))
    model.add(Dense(units=3, activation="softmax"))
    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=['accuracy'])
    return model


cb = Callback()
critor = EarlyStopping(monitor="val_loss", patience=2)
cb_list = [critor]
my_model = model_one()

my_model.fit(X_train[0:75:, :, :, :],
             one_hot_Y_train[0:75, :],
             epochs=10,
             batch_size=15,
             validation_data=(X_test[0:25, :, :, :, :],
                              one_hot_Y_test[0:25, :]),
             callbacks=cb_list)

evaluation = my_model.evaluate(X_train, one_hot_Y_train)
print(evaluation)

tp = 0
Exemplo n.º 28
0
def create_train_save_model(x_train, y_train, x_test, y_test):

    # Hyperparameter values were calculated by keras tuners
    model = Sequential()
    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               input_shape=(28, 28, 1),
               kernel_constraint=max_norm(3),
               bias_constraint=max_norm(3),
               padding='same'))
    model.add(MaxPooling2D((2, 2), padding='same'))
    model.add(Dropout(0.1))

    model.add(
        Conv2D(256, (3, 3),
               activation='relu',
               kernel_constraint=max_norm(3),
               bias_constraint=max_norm(3),
               padding='same'))
    model.add(MaxPooling2D((2, 2), padding='same'))
    model.add(Dropout(0.15))

    model.add(
        Conv2D(256, (3, 3),
               activation='relu',
               kernel_constraint=max_norm(3),
               bias_constraint=max_norm(3),
               padding='same'))
    model.add(MaxPooling2D((2, 2), padding='same'))
    model.add(Dropout(0.2))

    model.add(
        Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=max_norm(3),
               bias_constraint=max_norm(3)))
    model.add(MaxPooling2D((2, 2), padding='same'))
    # model.add(Dropout(0.05))

    model.add(Flatten())
    # model.add(Dropout(0.25))  # Dropout for regularization
    model.add(Dense(768, activation='relu', kernel_regularizer=l2(l=0.001)))
    model.add(Dense(len(labels), activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.RMSprop(lr=1e-4),
                  metrics=['acc'])

    x_train = x_train[:math.ceil(0.8 * len(x_train))]
    x_val = x_train[math.ceil(0.8 * len(x_train)):]
    y_train = y_train[:math.ceil(0.8 * len(y_train))]
    y_val = y_train[math.ceil(0.8 * len(y_train)):]

    # tuner = define_random_tuner(num_classes=len(labels))
    # tuner.search(x_train, y_train, epochs=20, validation_data=(x_val, y_val))
    # tuner.results_summary()
    # print('-------------------------------------')
    # best_hp = tuner.get_best_hyperparameters()[0]
    # model = tuner.hypermodel.build(best_hp)
    # print(model.get_config())
    #
    # quit()

    train_data_gen = ImageDataGenerator()
    val_data_gen = ImageDataGenerator()
    train_generator = train_data_gen.flow(x_train,
                                          y_train,
                                          batch_size=batch_size)
    val_generator = val_data_gen.flow(x_val, y_val, batch_size=batch_size)

    early_stopping = EarlyStopping(monitor='val_loss',
                                   patience=7,
                                   verbose=0,
                                   mode='min')
    file_to_save_to = ''
    if number_of_images_per_label == 100000:
        file_to_save_to = 'vanilla_cnn_model_100k.h5'
    elif number_of_images_per_label == 10000:
        file_to_save_to = 'vanilla_cnn_model_10k.h5'
    else:
        file_to_save_to = 'vanilla_cnn_model.h5'
    mcp_save = ModelCheckpoint(file_to_save_to,
                               save_best_only=True,
                               monitor='val_loss',
                               mode='min')
    reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss',
                                       factor=0.1,
                                       patience=5,
                                       verbose=1,
                                       min_delta=1e-4,
                                       mode='min')

    # fit_generator is necessary for 100k, where using batches is required due to memory size limitations
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=len(x_train) // batch_size,
        epochs=64,
        validation_data=val_generator,
        validation_steps=len(x_val) // batch_size,
        callbacks=[early_stopping, mcp_save, reduce_lr_loss],
        verbose=2)

    # model.save(file_to_save_to) # not using this because of mcp_save
    print(model.evaluate(x_test, y_test))
    return history
Exemplo n.º 29
0
           kernel_initializer="he_normal"))
model.add(
    Conv1D(filters=21,
           kernel_size=3,
           strides=1,
           padding="same",
           activation="linear",
           kernel_initializer="he_normal"))
model.add(TimeDistributed(Dense(4, activation="softmax")))

np.random.seed(1000)
model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              metrics=['accuracy'])
stop_criteria = EarlyStopping(monitor="val_loss",
                              mode="min",
                              verbose=1,
                              patience=10)
best_model_path = "./my_model" + ".h5"
best_model = ModelCheckpoint(best_model_path,
                             monitor="val_loss",
                             verbose=2,
                             save_best_only=True)
my_model = load_model("./my_model.h5")
my_model.fit(all_one_hot_x_train,
             all_one_hot_y_train,
             epochs=20,
             batch_size=20,
             validation_split=0.05,
             callbacks=[stop_criteria])

evaluation = model.evaluate(all_one_hot_x_train, all_one_hot_y_train)
model = Model(input=input, output=output)
model.summary()

# ======================================================================================================================
# Compile and train model
model.compile(optimizer=RMSprop(learning_rate=learning_rate),
              loss='binary_crossentropy',
              metrics=['accuracy'])
hist = model.fit(x_train,
                 y_train,
                 batch_size=batch_size,
                 epochs=epochs,
                 validation_data=(x_test, y_test),
                 callbacks=[
                     EarlyStopping(monitor='val_loss',
                                   patience=5,
                                   restore_best_weights=True,
                                   min_delta=0.0001),
                     ReduceLROnPlateau(monitor='val_loss',
                                       factor=0.3,
                                       patience=2,
                                       min_delta=0.0001,
                                       min_lr=0.0001)
                 ])
model.save('B.h5')

# ======================================================================================================================
# Visualization
# Plot learning rate & train accuracy
plt.plot(hist.history['lr'], hist.history['accuracy'])
plt.plot(hist.history['lr'], hist.history['val_accuracy'])
plt.title('Hyperparameter')