def __set_model(self, task, n_layer_units, activation, kernel_regularizer, learning_rate, momentum, nesterov, sigma): model = Mlp() output_activation = "linear" if task == 'c': output_activation = "sigmoid" model.add(n_layer_units[0], activation=activation, input=self.__input_dim, kernel_initializer=np.sqrt(6) / np.sqrt(self.__input_dim + n_layer_units[0]), kernel_regularizer=kernel_regularizer) for layer in range(1, len(n_layer_units)): model.add(n_layer_units[layer], activation=activation, kernel_initializer=np.sqrt(6) / np.sqrt(n_layer_units[layer - 1] + n_layer_units[layer]), kernel_regularizer=kernel_regularizer) model.add(self.__out_dim, activation=output_activation, kernel_initializer=np.sqrt(6) / np.sqrt(self.__out_dim + n_layer_units[-1]), kernel_regularizer=kernel_regularizer) model.set_optimizer( SGD( lr=learning_rate, momentum=momentum, nesterov=nesterov, sigma=sigma, )) return model
import numpy as np import time print("Load Monk DataSet") X_train, Y_train = load_monk("2", "train") X_test, Y_test = load_monk("2", "test") print("Build the model") tk_reg = 0 #.000001 w_start = 0.7 model = Mlp() model.add(6, input=17, kernel_initializer=w_start, kernel_regularizer=tk_reg) model.add(6, kernel_initializer=w_start, kernel_regularizer=tk_reg) model.add(1, kernel_initializer=w_start, kernel_regularizer=tk_reg) model.set_optimizer(SGD(lr=0.812, momentum=0.8, nesterov=True)) # Batch start_time = time.time() model.fit( X_train, Y_train, epochs=800, #batch_size=31, validation_data=[X_test, Y_test], verbose=1) print("--- %s seconds ---" % (time.time() - start_time)) outputNet = model.predict(X_test) printMSE(outputNet, Y_test, type="test") printAcc(outputNet, Y_test, type="test")
np.random.seed(seed=42) print("Load Monk DataSet") X_train, Y_train = load_monk("3", "train") X_test, Y_test = load_monk("3", "test") print("Build the model") model = Mlp() model.add(4, input= 17, kernel_initializer = 0.003, kernel_regularizer = 0.001) model.add(1, kernel_initializer = 0.003, kernel_regularizer = 0.001) model.set_optimizer( SGD( lr = 0.8, momentum = 0.6, nesterov = True )) # model.set_optimizer( # NCG(tol=1e-20) # ) # model.set_optimizer( # LBFGS(m=3, c1= 1e-4, c2=0.4, tol=1e-20) # ) # Batch model.fit(X_train, Y_train, epochs=1000,
# Specify the range for the weights and lambda for regularization # Of course can be different for each layer kernel_initializer = 0.003 kernel_regularizer = 0.001 # Add many layers with different number of units model.add(4, input= 17, kernel_initializer, kernel_regularizer) model.add(1, kernel_initializer, kernel_regularizer) es = EarlyStopping(0.00009, 20) # eps_GL and s_UP #fix which optimizer you want to use in the learning phase model.setOptimizer( SGD(lr = 0.83, # learning rate momentum = 0.9, # alpha for the momentum nesterov = True, # Specify if you want to use Nesterov sigma = None # sigma for the Acc. Nesterov )) #start the learning phase model.fit(X_train, Y_train, epochs=600, #batch_size=31, validation_data = [X_test, Y_test], es = es, verbose=0) # after trained the model the prediction operation can be # perform with the predict method outputNet = model.predict(X_test)