예제 #1
0
# Compile network
print("Compiling model...", end='')
nn = NeuralNetwork(X_train.shape[1])
for x in range(num_layers):
    nn.add_layer(Dense(num_hidden_nodes))
    get_layer_for_setting(nn, setting)
nn.add_layer(Dense(10))
nn.add_layer(Activation('softmax'))
nn.compile(loss_fn='categorical_crossentropy', init_fn='lecun', pred_fn='argmax',
           learning_rate=learning_rate, use_normal=True)
print('finished!')

ap_loss = []
for i in range(1, num_training_iterations+1):
    start = time()
    nn.train(X_train, y_train)
    time_elapsed = time() - start
    s = "{},{},{}".format(i, nn.get_accuracy(X_train, y_train), time_elapsed)
    print(s)
    f.write(s+'\n')
    #ap_loss.append()
    #print(ap_loss[-1])

# Plotting
#import matplotlib.pyplot as plt
#ap_, = plt.plot(ap_loss, 'g.', label="activationpool")
#relu_, = plt.plot(relu_loss, 'b^', label="relu")
#plt.title("Activation Pool vs. ReLU loss over time")
#plt.xlabel("Iterations")
#plt.ylabel("Loss fn (categorical crossentropy)")
#plt.legend(handles=[ap_, relu_])
예제 #2
0
nn.compile(loss_fn='categorical_crossentropy', pred_fn='argmax', learning_rate=learning_rate)

weights = nn.layers[2].W.get_value()
print("Final loss: {}".format(nn.get_loss(X, y)))
for I in np.arange(0, 1, 0.05):
    for J in np.arange(0, 1, 0.05):
        weights[0][0] = I
        weights[0][1] = J
        nn.layers[2].W.set_value(weights)
        loss = nn.get_loss(X, y)
        x_plot_before.append(I)
        y_plot_before.append(J)
        z_plot_before.append(loss)

for x in range(num_training_iterations):
    nn.train(X, y)

weights = nn.layers[2].W.get_value()

for I in np.arange(0, 1, 0.05):
    for J in np.arange(0, 1, 0.05):
        weights[0][0] = I
        weights[0][1] = J
        nn.layers[2].W.set_value(weights)
        loss = nn.get_loss(X, y)
        x_plot_after.append(I)
        y_plot_after.append(J)
        z_plot_after.append(loss)

import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D