def scenario3_1_3bis(): N = 100 X, T = generateDataSet(N, 1) multipleLayerNN = NN2.MultipleLayer(batch_size=100, nb_eboch=100, lr=0.001, hidden_layer_size=5) Tbis = np.reshape(T, (len(T), 1)) WHistoryMNN, eHistoryMNN = multipleLayerNN.fit(X, Tbis) graph.plotNNInformations("Multiple Layer NN", X, T, WHistoryMNN[-1], eHistoryMNN) graph.plotDecisionBoundaryAnim("Multiple Layer NN Anim", X, T, WHistoryMNN)
def scenario3_2_2(): N = 8 H = 3 X = np.ones((N, N)) * -1 indices_diagonal = np.diag_indices(N) X[indices_diagonal] = 1 multipleLayerNN = NN2.MultipleLayer(hidden_layer_size=H, batch_size=-1, nb_eboch=10000, lr=0.01) WHistory, eHistory = multipleLayerNN.fit(X, X) predict = multipleLayerNN.predict(X) for i, p in enumerate(predict): print("Encoder predicts : ") print(p) print("Good answer was : ") print(X[i]) if np.array_equal(X[i], p): print("GOOD!!!") else: print("FAIL!!!") print("--------------------------") graph.plotError("Encoder Learning Curve", eHistory)
def scenario3_2_1(): N = 100 V = 5 X = list(np.random.multivariate_normal([V, V], [[1, 0], [0, 1]], N)) # Blue X += list(np.random.multivariate_normal([-V, -V], [[1, 0], [0, 1]], N)) # Blue X += list(np.random.multivariate_normal([V, -V], [[1, 0], [0, 1]], N)) # Red X += list(np.random.multivariate_normal([-V, V], [[1, 0], [0, 1]], N)) # Red T = [1] * 2 * N + [-1] * 2 * N p = np.random.permutation(len(X)) X, T = (np.array(X)[p]).T, np.array(T)[p] Tbis = np.reshape(T, (len(T), 1)) multipleLayerNN = NN2.MultipleLayer(batch_size=-1, nb_eboch=100, lr=0.001, hidden_layer_size=2) WHistoryMNN, eHistoryMNN = multipleLayerNN.fit(X, Tbis) graph.plotNNInformations("Multiple Layer NN", X, T, WHistoryMNN[-1], eHistoryMNN) graph.plotDecisionBoundaryAnim("Multiple Layer NN Anim", X, T, WHistoryMNN)