def create_series(in_array, window_size, period, minV, maxV, layer_nodes=[2, 3], sigmoid='tanh', epochs=50000): global_max = maxV global_min = minV X_train = [] y_train = [] for i in range(len(in_array) - window_size): X = [] for j in range(window_size): X.append(_scale_to_binary(in_array[i + j], global_min, global_max)) X_train.append(X) y_train.append( _scale_to_binary(in_array[i + window_size], global_min, global_max)) X_train = np.array(X_train) y_train = np.array(y_train) layers = [] layers.append(window_size) for i in range(len(layer_nodes)): layers.append(layer_nodes[i]) n = NeuralNetwork(layers, sigmoid) n.fit(X_train, y_train, epochs) X_test = in_array[-window_size:] for i in range(len(X_test)): X_test[i] = _scale_to_binary(X_test[i], global_min, global_max) preds = [] X_test = deque(X_test) for i in range(period): val = n.predict(X_test) preds.append(rescale_from_binary(val[0], global_min, global_max)) X_test.rotate(-1) X_test[window_size - 1] = val[0] return preds
def create_series(in_array,window_size,period, minV, maxV, layer_nodes = [2,3], sigmoid = 'tanh', epochs = 50000): global_max = maxV global_min = minV X_train = [] y_train = [] for i in range(len(in_array)-window_size): X = [] for j in range(window_size): X.append(_scale_to_binary(in_array[i+j],global_min,global_max)) X_train.append(X) y_train.append(_scale_to_binary(in_array[i+window_size],global_min,global_max)) X_train = np.array(X_train) y_train = np.array(y_train) layers = [] layers.append(window_size) for i in range(len(layer_nodes)): layers.append(layer_nodes[i]) n = NeuralNetwork(layers,sigmoid) n.fit(X_train,y_train, epochs) X_test = in_array[-window_size:] for i in range(len(X_test)): X_test[i]=_scale_to_binary(X_test[i],global_min,global_max) preds = [] X_test = deque(X_test) for i in range(period): val = n.predict(X_test) preds.append(rescale_from_binary(val[0], global_min, global_max)) X_test.rotate(-1) X_test[window_size-1] = val[0] return preds
X = digits.data Y = digits.target Y_classes = np.zeros((X.shape[0], 10)) for i in range(Y.shape[0]): Y_classes[i, Y[i]] = 1 Y = Y_classes X_train, X_test, y_train, y_test = train_test_split(X, Y) nn = NeuralNetwork(X_train, y_train, X_train.shape[1], 0.01, 0.1, 1000, 100, 100, y_train.shape[1]) # nn.train_neural_network() # Save theta values. # nn.save_theta() # This is executed once we have trained the neural network. index = random.randrange(0, X_test.shape[0]) nn.load_theta('theta0.csv', 'theta1.csv', 'theta2.csv') prediction = np.argmax(nn.predict(X_test[index, :].reshape((-1, 1)))) label = np.argmax(y_test[index]) plt.gray() plt.matshow(X_test[index, :].reshape((8, 8))) plt.xlabel("Prediction: " + str(prediction) + " Label: " + str(label)) plt.show()
X_train, y_train = generate_halfmoon_dataset(noise=0.1) X_test, y_test = generate_halfmoon_dataset(noise=0.1) nn = NeuralNetwork([2, 4, 2, 1], 0.03) if (not os.path.isfile("nn_halfmoon_noise_0.1_tanh.npy")): train = [X_train, y_train] nn.train_network(train, n_epochs=0, threshold=0.001) np.save("nn_halfmoon_noise_0.1_tanh", nn.get_network()) else: W = np.load("nn_halfmoon_noise_0.1_tanh.npy") print("loaded weight matrix W = %s\n" % (W)) nn.load_network(W) y_test_test = [] for i in range(len(y_test)): y_test_test.append(np.around(np.squeeze(nn.predict(X_test[i])))) y_train_test = [] for j in range(len(y_test)): y_train_test.append(np.around(np.squeeze(nn.predict(X_train[j])))) plt.subplot(221) plt.title("Train Data") plt.scatter(X_test[:, 0], X_test[:, 1], c=y_train, s=40) plt.subplot(222) plt.title("Testing Training Data") plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train_test, s=40) plt.subplot(223) plt.title("Test Data")