Esempio n. 1
0
from neural_net.optimizers import momentum
from neural_net.plot import Plotter
from neural_net.regularizations import L1_regularization
from neural_net.weights import uniform_init
from neural_net.prepare_dataset import x_y_split, x_y_split_by_index

# Classification
train = pd.read_csv(
    'datasets/neural_net/classification/data.simple.train.100.csv')
x_train, y_train = x_y_split(train, 'cls')
test = pd.read_csv(
    'datasets/neural_net/classification/data.simple.test.100.csv')
x_test, y_test = x_y_split(test, 'cls')

nn = NeuralNet(2, weight_init=uniform_init)\
    .add_layer(Layer(10, sigmoid))\
    .add_layer(Layer(15, sigmoid))\
    .add_layer(Layer(10, sigmoid))\
    .add_layer(Layer(2, sigmoid))\
    .set_optimizer(momentum.set_params({"coef": 0.05}))\
    .set_loss(hinge)
nn.budget.set_epoch_limit(50).set_detection_limit(1.3)
nn.fit(x_train, y_train, x_test, y_test, learning_rate=0.02, batch_size=8)

# Regression
train = pd.read_csv('datasets/neural_net/regression/data.cube.train.100.csv')
x_train, y_train = x_y_split_by_index(train, -1)
test = pd.read_csv('datasets/neural_net/regression/data.cube.test.100.csv')
x_test, y_test = x_y_split_by_index(test, -1)

nn = NeuralNet(1, weight_init=uniform_init, visualize=True)\
Esempio n. 2
0
for dataset in datasets:
    np.random.seed(123)
    nns = []
    output_activation = softmax if dataset.task_type == "classification" else linear
    n_output_neurons = dataset.y_train.shape[
        1] if dataset.task_type == "classification" else 1
    loss = hinge if dataset.task_type == "classification" else MSE
    error_name = 'Accuracy' if dataset.task_type == "classification" else 'MSE'
    error_subplots = 2 if dataset.task_type == "classification" else 1
    first_layer_size = 10 if dataset.task_type == "classification" else 50
    layer_sizes = [10, 20, 50]
    layers_error = []
    for layer_size in layer_sizes:
        nn = NeuralNet(dataset.x_train.shape[1], weight_init=uniform_init, name=f"layers: {first_layer_size}, {layer_size}",
                       is_regression=dataset.task_type == "regression") \
            .add_layer(Layer(first_layer_size, tanh)) \
            .add_layer(Layer(layer_size, tanh)) \
            .add_layer(Layer(n_output_neurons, output_activation)) \
            .set_optimizer(RMSProp.set_params({"coef": 0.9})) \
            .set_regularization(L1_regularization.set_params({"coef": 0.0001})) \
            .set_loss(loss)
        nn.budget.set_epoch_limit(100).set_detection_limit(3)
        n = 10
        errors = [np.empty(n), np.empty(n), np.empty(n), np.empty(n)]
        for i in range(n):
            nn.fit(dataset.x_train,
                   dataset.y_train,
                   dataset.x_test,
                   dataset.y_test,
                   learning_rate=0.01,
                   batch_size=32)
Esempio n. 3
0
datasets = [Dataset(name, task_type, size) for name, task_type in names for size in sizes]

for dataset in datasets:
    np.random.seed(123)
    nns = []
    output_activation = softmax if dataset.task_type == "classification" else linear
    n_output_neurons = dataset.y_train.shape[1] if dataset.task_type == "classification" else 1
    loss = hinge if dataset.task_type == "classification" else MSE
    error_name = 'Accuracy' if dataset.task_type == "classification" else 'MSE'
    error_subplots = 2 if dataset.task_type == "classification" else 1
    activations = [linear, ReLU, sigmoid, tanh]
    activation_error = []
    for activation in activations:
        nn = NeuralNet(dataset.x_train.shape[1], weight_init=uniform_init, name=activation.name,
                       is_regression=dataset.task_type == "regression") \
            .add_layer(Layer(20, activation)) \
            .add_layer(Layer(n_output_neurons, output_activation)) \
            .set_optimizer(RMSProp.set_params({"coef": 0.9})) \
            .set_regularization(L1_regularization.set_params({"coef": 0.0001})) \
            .set_loss(loss)
        nn.budget.set_epoch_limit(100).set_detection_limit(3)
        n = 10
        errors = [np.empty(n), np.empty(n), np.empty(n), np.empty(n)]
        for i in range(n):
            nn.fit(dataset.x_train, dataset.y_train, dataset.x_test, dataset.y_test, learning_rate=0.01, batch_size=32)
            errors[0][i] = nn.get_loss_train()[-1]
            errors[1][i] = nn.get_loss_test()[-1]
            errors[2][i] = nn.get_MSE_train()[-1]
            errors[3][i] = nn.get_MSE_test()[-1]
        nns.append(nn)
        activation_error.append(errors)
Esempio n. 4
0
from neural_net.neural_net import NeuralNet, Layer
from neural_net.optimizers import RMSProp
from neural_net.regularizations import L1_regularization
from neural_net.weights import uniform_init
from neural_net.prepare_dataset import x_y_split

from neural_net.plot import Plotter

train = pd.read_csv(
    'datasets/neural_net/classification/data.simple.train.1000.csv')
test = pd.read_csv(
    'datasets/neural_net/classification/data.simple.test.1000.csv')
x_train, y_train = x_y_split(train, 'cls')
x_test, y_test = x_y_split(test, 'cls')

nn = NeuralNet(2, weight_init=uniform_init)\
    .add_layer(Layer(20, tanh))\
    .add_layer(Layer(2, softmax))\
    .set_optimizer(RMSProp.set_params({"coef": 0.1}))\
    .set_regularization(L1_regularization.set_params({"coef": 0.05}))\
    .set_loss(LogLoss)
nn.budget.set_epoch_limit(3).set_detection_limit(1.3)
nn.fit(x_train, y_train, x_test, y_test, learning_rate=0.02, batch_size=32)

print(f'MSE: {MSE.compute_loss(nn.predict(x_train), y_train)}')

plotter = Plotter(x_test, y_test, [nn])
plotter.plot_data_1d()
plotter.plot_measure_results_data(NeuralNet.get_loss_test, "LogLoss")
plotter.plot_measure_results_data(NeuralNet.get_MSE_test, "MSE test")
Esempio n. 5
0
x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                  y_train,
                                                  train_size=35000,
                                                  random_state=123)


def accuracy_of_model(nn):
    res = nn.predict(x_val / 255)
    res = np.argmax(res, axis=1)
    y_numeric = np.argmax(y_val, axis=1)
    return np.sum(y_numeric.transpose() == res) / y_numeric.shape[0]


np.random.seed(123)
nn = NeuralNet(x_train.shape[1], weight_init=uniform_init, name="mnist", is_regression=False) \
    .add_layer(Layer(180, sigmoid)) \
    .add_layer(Layer(40, sigmoid)) \
    .add_layer(Layer(y_train.shape[1], softmax)) \
    .set_loss(MSE)
nn.budget.set_epoch_limit(500)
nn.fit(x_train / 255,
       y_train,
       x_val / 255,
       y_val,
       learning_rate=0.002,
       batch_size=128)
print(accuracy_of_model(nn))

np.random.seed(123)
nn = NeuralNet(x_train.shape[1], weight_init=uniform_init, name="mnist", is_regression=False) \
    .add_layer(Layer(250, sigmoid)) \