/
test.py
executable file
·74 lines (60 loc) · 1.82 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import numpy as np
from mlp import MultiLayerPerceptron
from mlp_factory import MLP_Factory
# Customization of the network should be done in these lines
# Options for the second network (sin(x1 - x2 + x3 - x4)) set in mlp_factory
# Testing either "xor" or "sin"
testing = "xor"
# Activation type: "tanh", "relu" or "sigmoid"
# tanh has best overall results
activation_type = "tanh"
learning_rate = {
"sigmoid": 0.8,
"tanh": 0.5,
"relu": 0.2
}
learning_rate_change = {
"sigmoid": 1.1,
"tanh": 0.9,
"relu": 0.9
}
max_epochs = 1000
factory = MLP_Factory()
# If testing XOR:
number_inputs = 2
number_hidden_units = 4
number_outputs = 1
inputs = {
"xor": np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
"sin": np.zeros((200, 4))
}
target = {
"xor": np.array([0, 1, 1, 0]),
"sin": np.zeros((200, 1))
}
if testing == "sin":
factory.generate_sin_values(inputs[testing], target[testing])
output = np.zeros(target[testing].shape)
options = {
"xor": {
"number_inputs": number_inputs,
"number_hidden_units": number_hidden_units,
"number_outputs": number_outputs,
"activation_type": activation_type
},
"sin": factory.sin_finder(activation_type)
}
nn = MultiLayerPerceptron(options[testing])
for epoch in range(max_epochs):
error = 0
for index, value in enumerate(inputs[testing]):
output[index] = nn.forward(value)
error += nn.backward(target[testing][index])
nn.update_weights(learning_rate[activation_type])
if epoch % 100 == 0:
print('Epoch:\t{}\tError:\t{}'.format(epoch, error))
learning_rate[activation_type] *= learning_rate_change[activation_type]
if testing == "xor":
print('Output: {0}'.format(output))
print('Average difference between target and output: {0}'.format(
nn.average_miss(target[testing], output)))