Exemple #1
0
fp = open(filepath, "w")

config = 0

for epoch in epochs:
    for lr in learning_rates:
        for reg in regularizations:
            for alpha in momentums:
                mean_loss = 0
                mean_validation = 0

                for i in range(k):
                    model = NeuralNetwork()
                    model.add(InputLayer(10))
                    model.add(DenseLayer(50, fanin=10))
                    model.add(DenseLayer(30, fanin=50))
                    model.add(OutputLayer(2, fanin=30))
                    model.compile(size, epoch, lr / size, None, reg, alpha,
                                  "mean_squared_error")
                    (train, val) = data.kfolds(index=i, k=k)
                    mean_loss = mean_loss + model.fit(train[0], train[1])[-1]
                    mean_validation = mean_validation + model.evaluate(
                        val[0], val[1])

                fp.write("{}, {}, {}, {}, {}, {}, {}\n".format(
                    config, epoch, lr, reg, alpha, mean_loss / k,
                    mean_validation / k))

                config = config + 1

fp.close()
import dataset as ds
from neural_networks import NeuralNetwork
from layers import InputLayer, OutputLayer, DenseLayer
import matplotlib.pyplot as plt


data = ds.MLCupDataset()

model = NeuralNetwork()
model.add(InputLayer(10))
model.add(DenseLayer(50, fanin=10, activation="sigmoid"))
model.add(DenseLayer(30, fanin=50, activation="sigmoid"))
model.add(OutputLayer(2, fanin=30))

# configuration 322, line 324
model.compile(1142, 600, 0.03/1142, None, 0.000008, 0.3, "mean_squared_error")

loss = model.fit(data.train_data_patterns, data.train_data_targets)

i = 0
final_loss = []
while i < len(loss):
    final_loss.append((loss[i] + loss[i+1])/2)
    i += 2

print(final_loss[-1])
print(model.evaluate(data.model_assessment_patterns, data.model_assessment_targets))
plt.plot(final_loss)
plt.show()

model._blind_test(data.test_data_patterns)