Beispiel #1
0
    def __set_model(self, task, n_layer_units, activation, kernel_regularizer,
                    learning_rate, momentum, nesterov, sigma):
        model = Mlp()

        output_activation = "linear"
        if task == 'c':
            output_activation = "sigmoid"

        model.add(n_layer_units[0],
                  activation=activation,
                  input=self.__input_dim,
                  kernel_initializer=np.sqrt(6) /
                  np.sqrt(self.__input_dim + n_layer_units[0]),
                  kernel_regularizer=kernel_regularizer)

        for layer in range(1, len(n_layer_units)):
            model.add(n_layer_units[layer],
                      activation=activation,
                      kernel_initializer=np.sqrt(6) /
                      np.sqrt(n_layer_units[layer - 1] + n_layer_units[layer]),
                      kernel_regularizer=kernel_regularizer)

        model.add(self.__out_dim,
                  activation=output_activation,
                  kernel_initializer=np.sqrt(6) /
                  np.sqrt(self.__out_dim + n_layer_units[-1]),
                  kernel_regularizer=kernel_regularizer)

        model.set_optimizer(
            SGD(
                lr=learning_rate,
                momentum=momentum,
                nesterov=nesterov,
                sigma=sigma,
            ))
        return model
from isanet.model import Mlp
from isanet.optimizer import SGD
from isanet.datasets.monk import load_monk
from isanet.utils.model_utils import printMSE, printAcc, plotHistory
import numpy as np
import time

print("Load Monk DataSet")
X_train, Y_train = load_monk("2", "train")
X_test, Y_test = load_monk("2", "test")

print("Build the model")
tk_reg = 0  #.000001
w_start = 0.7
model = Mlp()
model.add(6, input=17, kernel_initializer=w_start, kernel_regularizer=tk_reg)
model.add(6, kernel_initializer=w_start, kernel_regularizer=tk_reg)
model.add(1, kernel_initializer=w_start, kernel_regularizer=tk_reg)

model.set_optimizer(SGD(lr=0.812, momentum=0.8, nesterov=True))
# Batch
start_time = time.time()
model.fit(
    X_train,
    Y_train,
    epochs=800,
    #batch_size=31,
    validation_data=[X_test, Y_test],
    verbose=1)
print("--- %s seconds ---" % (time.time() - start_time))
Beispiel #3
0
sys.path.insert(0, "./")

from isanet.model import Mlp
from isanet.optimizer import SGD, NCG, LBFGS
from isanet.datasets.monk import load_monk
from isanet.utils.model_utils import printMSE, printAcc, plotHistory
import numpy as np

np.random.seed(seed=42)

print("Load Monk DataSet")
X_train, Y_train = load_monk("3", "train")
X_test, Y_test = load_monk("3", "test")

print("Build the model")
model = Mlp()
model.add(4, input= 17, kernel_initializer = 0.003, kernel_regularizer = 0.001)
model.add(1, kernel_initializer = 0.003, kernel_regularizer = 0.001)

model.set_optimizer(
    SGD(
        lr = 0.8,
        momentum = 0.6,
        nesterov = True
    ))

# model.set_optimizer(
#     NCG(tol=1e-20)
# )

# model.set_optimizer(
ng_eps = 10e-6
l_eps = 1e-6
max_iter = 1000
verbose = 0
#############################

#########################################
# Construct the Monk1 objective function
# and define a w0 with the seed
#########################################

np.random.seed(seed=seed)
print("Load Monk DataSet")
X_train, Y_train = load_monk(monk, "train")
print("Build the model")
model = Mlp()
model.add(4, input=17, kernel_initializer=0.003, kernel_regularizer=reg)
model.add(1, kernel_initializer=0.003, kernel_regularizer=reg)

#############################
#          L-BFGS
#############################
c1 = 1e-4
c2 = .9
m = 30
ln_maxiter = 100
#############################
optimizer = LBFGS(m=m,
                  c1=c1,
                  c2=c2,
                  ln_maxiter=ln_maxiter,
Beispiel #5
0
sys.path.insert(0, "./")

from isanet.model import Mlp
from isanet.optimizer import SGD, NCG, LBFGS
from isanet.datasets.monk import load_monk
from isanet.utils.model_utils import printMSE, printAcc, plotHistory
import numpy as np

np.random.seed(seed=189)

print("Load Monk DataSet")
X_train, Y_train = load_monk("2", "train")
X_test, Y_test = load_monk("2", "test")

print("Build the model")
model = Mlp()
model.add(4,
          input=17,
          kernel_initializer=1 / np.sqrt(17),
          kernel_regularizer=0.001)
model.add(1, kernel_initializer=1 / np.sqrt(4), kernel_regularizer=0.001)

# model.set_optimizer(
#     SGD(
#         lr = 0.8,
#         momentum = 0.9,
#         nesterov = True
#     ))

model.set_optimizer(NCG(beta_method="pr", c1=1e-4, c2=.9, tol=1e-9))
Beispiel #6
0
sys.path.insert(0, "./")

from isanet.model import Mlp
from isanet.optimizer import SGD, NCG, LBFGS
from isanet.datasets.monk import load_monk
from isanet.utils.model_utils import printMSE, printAcc, plotHistory
import numpy as np

np.random.seed(seed=189)

print("Load Monk DataSet")
X_train, Y_train = load_monk("2", "train")
X_test, Y_test = load_monk("2", "test")

print("Build the model")
model = Mlp()
model.add(4,
          input=17,
          kernel_initializer=1 / np.sqrt(17),
          kernel_regularizer=0.001)
model.add(1, kernel_initializer=1 / np.sqrt(4), kernel_regularizer=0.001)

# model.set_optimizer(
#     SGD(
#         lr = 0.8,
#         momentum = 0.9,
#         nesterov = True
#     ))
# Batch

# model.set_optimizer(
Beispiel #7
0
def optimize_monk_f(monk="1",
                    reg=0,
                    seed=1,
                    optimizer=None,
                    max_iter=1000,
                    verbose=0):
    """Allows you to build a target fusion from a monk dataset,
    and then optimize it with a specific "optimizer".
    The function is built as follows:
                    f_monk = MSE_monk + reg*||w||^2
    Parameters
    ----------

    monk : String
        Allow to specify the monk dataset.
        Accepted values:
            "1" : monk1
            "2" : monk2
            "3" : monk3
    
    reg : float 
        lamda value used in the regualrization term
        reg must be in 0 <= reg <= 1

    seed : int
        Allow to specify a starting point
    
    optimizer : isanet.optimizer
        Allow to specify the optimizer used in the process.
        Must be passed!
    
    max_iter : int
        Define the max iteration in the optimization 
        process as stop criteria.

    verbose : int [0, 1, 2, 3]
        Define the verbosity of the process.

            "0" : no output
            "1" : only iteration output
            "2" : iteration and optimizer log at each iteration
            "3" : same as above, plus the line search log 

    Returns
    -------
        model.history, optimizer.history, (end - start)
            It return the model and optimizer history plus the time of the whole process
    """

    if monk not in ["1", "2", "3"]:
        raise Exception("wrong monk function - accepeted only: '1','2', '3'")
    if optimizer is None:
        raise Exception("an optimatizer must be specifed")
    np.random.seed(seed=seed)
    X_train, Y_train = load_monk(monk, "train")
    kernel_initializer = {
        "1": [0.003, 0.003],
        "2": [1 / np.sqrt(17), 1 / np.sqrt(4)],
        "3": [0.003, 0.003]
    }
    model = Mlp()
    model.add(4,
              input=17,
              kernel_initializer=kernel_initializer[monk][0],
              kernel_regularizer=reg)
    model.add(1,
              kernel_initializer=kernel_initializer[monk][1],
              kernel_regularizer=reg)

    model.set_optimizer(optimizer)
    start = time.time()
    model.fit(X_train, Y_train, epochs=max_iter, verbose=verbose)
    end = time.time()
    return model.history, optimizer.history, (end - start)
Beispiel #8
0
from isanet.utils.model_utils import printMSE, printAcc, plotHistory
import numpy as np
import time

X, Y = load_iris()

X_train = X[:75, :]
Y_train = Y[:75, :]

X_val = X[75:112, :]
Y_val = Y[75:112, :]

X_test = X[112:, :]
Y_test = Y[112:, :]

model = Mlp()
model.add(6,
          input=4,
          kernel_initializer=1 / np.sqrt(4),
          kernel_regularizer=0.006)
model.add(3, kernel_initializer=1 / np.sqrt(6), kernel_regularizer=0.006)

model.set_optimizer(SGD(lr=0.39, momentum=0.8, nesterov=True))

start_time = time.time()
model.fit(
    X_train,
    Y_train,
    validation_data=[X_val, Y_val],
    epochs=1500,
    #batch_size=31,
Beispiel #9
0
sys.path.insert(0, "./")

from isanet.model import Mlp
from isanet.optimizer import SGD, NCG, LBFGS
from isanet.datasets.monk import load_monk
from isanet.utils.model_utils import printMSE, printAcc, plotHistory
import numpy as np

np.random.seed(seed=206)

print("Load Monk DataSet")
X_train, Y_train = load_monk("1", "train")
X_test, Y_test = load_monk("1", "test")

print("Build the model")
model = Mlp()
model.add(4, input=17, kernel_initializer=0.003, kernel_regularizer=0.001)
model.add(1, kernel_initializer=0.003, kernel_regularizer=0.001)

# model.set_optimizer(
#     SGD(
#         lr = 0.83,
#         momentum = 0.9,
#         nesterov = True
#         # # gain = 0
#     ))

model.set_optimizer(NCG(beta_method="fr", c1=1e-4, c2=.3, restart=3,
                        tol=1e-12))

# model.set_optimizer(
# ...
from isanet.model import Mlp
from isanet.optimizer import SGD, EarlyStopping
from isanet.datasets.monk import load_monk
import numpy as np

X_train, Y_train = load_monk("1", "train")
X_test, Y_test = load_monk("1", "test")

#create the model
model = Mlp()
# Specify the range for the weights and lambda for regularization
# Of course can be different for each layer
kernel_initializer = 0.003 
kernel_regularizer = 0.001

# Add many layers with different number of units
model.add(4, input= 17, kernel_initializer, kernel_regularizer)
model.add(1, kernel_initializer, kernel_regularizer)

es = EarlyStopping(0.00009, 20) # eps_GL and s_UP

#fix which optimizer you want to use in the learning phase
model.setOptimizer(
    SGD(lr = 0.83,          # learning rate
        momentum = 0.9,     # alpha for the momentum
        nesterov = True,    # Specify if you want to use Nesterov
        sigma = None        # sigma for the Acc. Nesterov
    ))

#start the learning phase