Esempio n. 1
0
def fit_model(f_train, l_train, learning_rate, num_epochs):
    #build the model: to see the specs go to model.pyl we increased the number of hidden neurons
    #in order to introduce some overfitting
    model = design_model(features_train, learning_rate) 
    #train the model on the training data
    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20)
    history = model.fit(features_train, labels_train, epochs=num_epochs, batch_size= 16, verbose=0, validation_split = 0.2, callbacks = [es])
    return history
Esempio n. 2
0
def fit_model(f_train, l_train, learning_rate, num_epochs, bs):
    #build the model
    model = design_model(f_train, learning_rate)
    #train the model on the training data
    history = model.fit(f_train, l_train, epochs = num_epochs, batch_size = bs, verbose = 0, validation_split = 0.2)
    # plot learning curves
    plt.plot(history.history['loss'], label='train')
    plt.plot(history.history['val_loss'], label='validation')
    plt.title('lrate=' + str(learning_rate))
    plt.legend(loc="upper right")
Esempio n. 3
0
def fit_model(f_train, l_train, learning_rate, num_epochs, batch_size, ax):
    model = design_model(features_train, learning_rate)
    #train the model on the training data
    history = model.fit(features_train, labels_train, epochs=num_epochs, batch_size = batch_size, verbose=0, validation_split = 0.3)
    # plot learning curves
    ax.plot(history.history['mae'], label='train')
    ax.plot(history.history['val_mae'], label='validation')
    ax.set_title('batch = ' + str(batch_size), fontdict={'fontsize': 8, 'fontweight': 'medium'})
    ax.set_xlabel('# epochs')
    ax.set_ylabel('mae')
    ax.legend()
Esempio n. 4
0
def fit_model(f_train, l_train, learning_rate, num_epochs):
    #build the model: to see the specs go to model.pyl we increased the number of hidden neurons
    #in order to introduce some overfitting
    model = design_model(features_train, learning_rate) 
    #train the model on the training data
    #In the fit_model() method, just before calling model.fit(), create an instance of EarlyStopping that monitors the validation loss (val_loss), 
    #seeks minimal loss, that is verbose, and has patience equal to 20. Assign the result to a variable called es.
    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20)
    #Now that you have an instance of EarlyStopping assigned to es, you need to pass the instance as a callback function to the .fit() method. 
    history = model.fit(features_train, labels_train, epochs=num_epochs, batch_size= 16, verbose=0, validation_split = 0.2, callbacks = [es])
    return history
Esempio n. 5
0
#learns patterns specific to the training data that would not apply to new data. For that reason, 
#hyperparameters are chosen on a held-out set called validation set . 
#In TensorFlow Keras, validation split can be specified as a parameter in the .fit() function

##my_model.fit(data, labels, epochs = 20, batch_size = 1, verbose = 1,  validation_split = 0.2)

#where validation_split is a float between 0 and 1, denoting a fraction of the training data to be used as validation data. 
#In the example above, 20% of the data would be allocated for validation. It is usually a small fraction of the training data. 
#The model will set apart this fraction of the training data, will not train on it, and 
#will evaluate the loss and any model metrics on this data at the end of each epoch.
######################################################################################################################################################

#see neural network folder file for more details
from model import design_model, features_train, labels_train 

model = design_model(features_train, learning_rate = 0.01)
#Use the .fit() function to fit the model instance model to the training data features_train and training 
#features labels_train with 40 epochs, batch size set to 8, verbose set to true (1), and validation split set to 33%.

model.fit(features_train, labels_train, epochs = 40, batch_size = 8, verbose = 1, validation_split = 0.33)


########################################################################################################################################################
#Manual Tuning: Learning Rate
#Neural networks are trained with the gradient descent algorithm and one of the most important hyperparameters 
#in the network training is the learning rate. The learning rate determines how big of a change you apply to the network 
#weights as a consequence of the error gradient calculated on a batch of training data.
#A larger learning rate leads to a faster learning process at a cost to be stuck in a suboptimal solution (local minimum). 
#A smaller learning rate might produce a good suboptimal or global solution, but it will take it much longer to converge. 
#In the extremes, a learning rate too large will lead to an unstable learning process oscillating over the epochs. 
#A learning rate too small may not converge or get stuck in a local minimum.
Esempio n. 6
0
    loss_and_metrics = model.evaluate(NN_test, y_test[:, 0])
    print "test error is: ", loss_and_metrics
    
elif x==3:
    print 'Training LSTM and NN' 
    # Get rid of Y and REF because lstm doesn't want to train on this
    X_train = X_train[:, :, 0:1]
    X_test = X_test[:, :, 0:1]
    
    # define the input sizes for the LSTM
    lstm_data_dim = X_train.shape[2]
    nn_data_dim = NN_train.shape[1]
    timesteps = lstm_length

    # construct and compile the model
    model = mod.design_model(lstm_data_dim, nn_data_dim, timesteps)
    start_time = time.time()
    print "Compiling Model ..."
    model.compile(loss="mse", optimizer="rmsprop")
    print("Compile Time : %s seconds --- \n" % (time.time() - start_time))

   
    
    model.fit([X_train, NN_train], y_train, batch_size=my_batch_size, nb_epoch=my_epoch)
    print("Training Time : %s seconds --- \n" % (time.time() - start_time))
    # test the model
    U_hat = model.predict([X_test, NN_test], verbose=1)
    U_hat = U_hat.reshape((len(U_hat)))
    loss_and_metrics = model.evaluate([X_test, NN_test], y_test[:, 0])
    print "test error is: ", loss_and_metrics