示例#1
0
def create_model(hyperparams):
    lr = hyperparams['eta']
    mom = hyperparams['alpha']
    l2 = hyperparams['lambda']
    dim_hid = int(hyperparams['hidden_nodes'])
    _fun = hyperparams['functions']
    kernel_init = hyperparams['kernel_init']
    module = __import__('kernel_initialization')

    KernelInit = getattr(module, kernel_init)

    model = NeuralNetwork(loss='mse', metric='mee')
    model.add_layer(dim_hid,
                    input_dim=INPUT_DIM,
                    activation=_fun,
                    kernel_initialization=KernelInit())
    model.add_output_layer(2,
                           activation=_fun,
                           kernel_initialization=KernelInit())

    model.compile(lr=lr, momentum=mom, l2=l2)
    return model
示例#2
0
parser = Monks_parser(path_tr, path_ts)

X_train, Y_train, X_test, Y_test = parser.parse(dim_in, dim_out, one_hot)

#Y_train = change_output_value(Y_train, 0, -1)
#Y_test = change_output_value(Y_test, 0, -1)

#X_train, Y_train, X_val, Y_val = train_test_split(X_train, Y_train, test_size=0.25)

dim_in = one_hot
dim_hid = 4

model = NeuralNetwork('mse', 'accuracy')

model.add_layer(dim_hid, input_dim=dim_in, activation='relu', kernel_initialization=RandomUniformInitialization(-0.5, 0.5))
model.add_output_layer(dim_out, activation='sigmoid', kernel_initialization=RandomUniformInitialization(-0.5, 0.5))

model.compile(0.8, 0.7, 0.0)
model.fit(
    X_train, Y_train, 500, X_train.shape[0], ts=(X_test, Y_test),
    verbose=True, tol=1e-2
)

err_tr, acc_tr = model.evaluate(X_train, Y_train)
err_ts, acc_ts = model.evaluate(X_test, Y_test)
acc_tr = acc_tr*100
acc_ts = acc_ts*100
errors = [err_tr, err_ts]
accuracy = [acc_tr, acc_ts]

res = {