# # training data # x_train = np.array([[[0,0]], [[0,1]], [[1,0]], [[1,1]]]) # one_hot_encoded_y_train = np.array([[[1, 0]], [[0, 1]], [[0, 1]], [[0, 1]]]) # y_train =[0, 1, 1, 1] # this line is used to catch the errors arising from numpy. np.seterr(all='raise') input_number = x_train.shape[2] output_number = 6 size_of_hidden_layer = 10 neural_network = NeuralNetwork(cross_entropy, cross_entropy_prime) neural_network.add_layer( FCLayer(input_number, size_of_hidden_layer, diminishing_factor=10)) neural_network.add_layer(ActivationLayer(swish, swish_prime)) neural_network.add_layer(FCLayer(size_of_hidden_layer, output_number)) neural_network.add_layer(ActivationLayer(softmax, softmax_prime)) neural_network.fit(x_train, one_hot_encoded_y_train, epoch_number=10, initial_learning_rate=0.5, decay=0.01) out = neural_network.predict(x_train) predictions = argmax(out) print("confusion matrix:", confusion_matrix(y_train, predictions), sep="\n") print("accuracy: ", accuracy_score(y_train, predictions)) print("end")
import numpy as np import dataset as ds from neural_networks import NeuralNetwork from layers import InputLayer, OutputLayer, DenseLayer from functions._init_functions import init_functions from functions._activation_functions import activation_functions, activation_functions_derivatives from functions._loss_functions import loss_functions import plot as plt data = ds.MLCupDataset() data = ds.MLCupDataset() model = NeuralNetwork() model.add(InputLayer(10)) model.add(DenseLayer(50, fanin=10, activation="sigmoid")) model.add(DenseLayer(30, fanin=50, activation="sigmoid")) model.add(OutputLayer(2, fanin=30)) # configuration 322, line 324 model.compile(1143, 600, 0.03, None, 0.000008, 0.3, "mean_squared_error") loss = model.fit(data.train_data_patterns, data.train_data_targets) print(loss[-1]) plt.plot_loss(loss)
fp = open(filepath, "w") config = 0 for epoch in epochs: for lr in learning_rates: for reg in regularizations: for alpha in momentums: mean_loss = 0 mean_validation = 0 for i in range(k): model = NeuralNetwork() model.add(InputLayer(10)) model.add(DenseLayer(50, fanin=10)) model.add(DenseLayer(30, fanin=50)) model.add(OutputLayer(2, fanin=30)) model.compile(size, epoch, lr / size, None, reg, alpha, "mean_squared_error") (train, val) = data.kfolds(index=i, k=k) mean_loss = mean_loss + model.fit(train[0], train[1])[-1] mean_validation = mean_validation + model.evaluate( val[0], val[1]) fp.write("{}, {}, {}, {}, {}, {}, {}\n".format( config, epoch, lr, reg, alpha, mean_loss / k, mean_validation / k)) config = config + 1 fp.close()
if __name__ == '__main__': X, y = gen_mult_ser(3000) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy) clf.add(RNN(10, activation="tanh", bptt_trunc=5, input_shape=(10, 61))) clf.add(Activation('softmax')) tmp_X = np.argmax(X_train[0], axis=1) tmp_y = np.argmax(y_train[0], axis=1) print("Number Series Problem:") print("X = [" + " ".join(tmp_X.astype("str")) + "]") print("y = [" + " ".join(tmp_y.astype("str")) + "]") print() train_err, _ = clf.fit(X_train, y_train, n_epochs=500, batch_size=512) y_pred = np.argmax(clf.predict(X_test), axis=2) y_test = np.argmax(y_test, axis=2) accuracy = np.mean(accuracy_score(y_test, y_pred)) print(accuracy) print() print("Results:") for i in range(5): tmp_X = np.argmax(X_test[i], axis=1) tmp_y1 = y_test[i] tmp_y2 = y_pred[i] print("X = [" + " ".join(tmp_X.astype("str")) + "]") print("y_true = [" + " ".join(tmp_y1.astype("str")) + "]") print("y_pred = [" + " ".join(tmp_y2.astype("str")) + "]") print()