filepath = os.path.dirname(os.getcwd()) + DEFAULT_DIR + DEFAULT_NAME fp = open(filepath, "w") config = 0 for epoch in epochs: for lr in learning_rates: for reg in regularizations: for alpha in momentums: mean_loss = 0 mean_validation = 0 for i in range(k): model = NeuralNetwork() model.add(InputLayer(10)) model.add(DenseLayer(50, fanin=10)) model.add(DenseLayer(30, fanin=50)) model.add(OutputLayer(2, fanin=30)) model.compile(size, epoch, lr / size, None, reg, alpha, "mean_squared_error") (train, val) = data.kfolds(index=i, k=k) mean_loss = mean_loss + model.fit(train[0], train[1])[-1] mean_validation = mean_validation + model.evaluate( val[0], val[1]) fp.write("{}, {}, {}, {}, {}, {}, {}\n".format( config, epoch, lr, reg, alpha, mean_loss / k, mean_validation / k)) config = config + 1
X = np.zeros([nums, 10, 20], dtype=float) y = np.zeros([nums, 10, 20], dtype=float) for i in range(nums): start = np.random.randint(0, 10) num_seq = np.arange(start, start + 10) X[i] = to_categorical(num_seq, n_col=20) y[i] = np.roll(X[i], -1, axis=0) y[:, -1, 1] = 1 # Mark endpoint as 1 return X, y if __name__ == '__main__': X, y = gen_mult_ser(3000) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy) clf.add(RNN(10, activation="tanh", bptt_trunc=5, input_shape=(10, 61))) clf.add(Activation('softmax')) tmp_X = np.argmax(X_train[0], axis=1) tmp_y = np.argmax(y_train[0], axis=1) print("Number Series Problem:") print("X = [" + " ".join(tmp_X.astype("str")) + "]") print("y = [" + " ".join(tmp_y.astype("str")) + "]") print() train_err, _ = clf.fit(X_train, y_train, n_epochs=500, batch_size=512) y_pred = np.argmax(clf.predict(X_test), axis=2) y_test = np.argmax(y_test, axis=2) accuracy = np.mean(accuracy_score(y_test, y_pred)) print(accuracy) print()
import numpy as np import dataset as ds from neural_networks import NeuralNetwork from layers import InputLayer, OutputLayer, DenseLayer from functions._init_functions import init_functions from functions._activation_functions import activation_functions, activation_functions_derivatives from functions._loss_functions import loss_functions import plot as plt data = ds.MLCupDataset() data = ds.MLCupDataset() model = NeuralNetwork() model.add(InputLayer(10)) model.add(DenseLayer(50, fanin=10, activation="sigmoid")) model.add(DenseLayer(30, fanin=50, activation="sigmoid")) model.add(OutputLayer(2, fanin=30)) # configuration 322, line 324 model.compile(1143, 600, 0.03, None, 0.000008, 0.3, "mean_squared_error") loss = model.fit(data.train_data_patterns, data.train_data_targets) print(loss[-1]) plt.plot_loss(loss)
import numpy as np import dataset as ds from neural_networks import NeuralNetwork from layers import InputLayer, OutputLayer, DenseLayer import matplotlib.pyplot as plt data = ds.MonksDataset() my_model = NeuralNetwork() my_model.add(InputLayer(17)) my_model.add(DenseLayer(10, fanin=17, activation="sigmoid")) my_model.add(OutputLayer(1, fanin=10, activation="sigmoid")) my_model.compile(122, 600, 0.075, None, 0.0001, 0, "mean_squared_error") (loss, test_loss, accuracy, test_accuracy) = my_model.fit_monks( data.train_data_patterns, data.train_data_targets, data.test_data_patterns, data.test_data_targets) print("Loss: {}".format(loss[-1])) print("Test Loss: {}".format(test_loss[-1])) print("Accuracy: {}".format(accuracy[-1])) print("Test accuracy: {}".format(test_accuracy[-1])) plot1 = plt.figure(1) plt.plot(loss) plt.plot(test_loss, "--") plot2 = plt.figure(2)