kernel_regularizer=0.001) model.add(1, kernel_initializer=1 / np.sqrt(4), kernel_regularizer=0.001) # model.set_optimizer( # SGD( # lr = 0.8, # momentum = 0.9, # nesterov = True # )) # Batch # model.set_optimizer( # NCG() # ) model.set_optimizer(LBFGS(m=20, c1=1e-4, c2=0.9, tol=1e-20)) model.fit( X_train, Y_train, epochs=50, #batch_size=31, validation_data=[X_test, Y_test], verbose=1) outputNet = model.predict(X_test) printMSE(outputNet, Y_test, type="test") printAcc(outputNet, Y_test, type="test") plotHistory(model.history)
print("Build the model") model = Mlp() model.add(4, input=17, kernel_initializer=0.003, kernel_regularizer=reg) model.add(1, kernel_initializer=0.003, kernel_regularizer=reg) ############################# # L-BFGS ############################# c1 = 1e-4 c2 = .9 m = 30 ln_maxiter = 100 ############################# optimizer = LBFGS(m=m, c1=c1, c2=c2, ln_maxiter=ln_maxiter, norm_g_eps=ng_eps, l_eps=l_eps) model.set_optimizer(optimizer) print("Start the optimization process:") model.fit(X_train, Y_train, epochs=max_iter, verbose=verbose) f = model.history["loss_mse_reg"] ############################## # plot ############################## pos_train = (0, 0) figsize = (12, 4) plt.plot(f - f[-1], linestyle='-')
model.add(4, input= 17, kernel_initializer = 0.003, kernel_regularizer = 0.001) model.add(1, kernel_initializer = 0.003, kernel_regularizer = 0.001) # model.set_optimizer( # SGD( # lr = 0.8, # momentum = 0.6, # nesterov = True # )) # model.set_optimizer( # NCG(tol=1e-20) # ) model.set_optimizer( LBFGS(m=3, c1= 1e-4, c2=0.4, tol=1e-20) ) # Batch model.fit(X_train, Y_train, epochs=30, #batch_size=31, validation_data = [X_test, Y_test], verbose=1) outputNet = model.predict(X_test) printMSE(outputNet, Y_test, type = "test") printAcc(outputNet, Y_test, type = "test") plotHistory(model.history )