def main(): l1 = NeuronLayer((28, 28), True, False) l2 = NeuronLayer((10, 10)) l3 = NeuronLayer((10,), False, True) network = NeuralNetwork() network.add_layer(l1) network.add_layer(l2) network.add_layer(l3) network.connect_layers() pr = cProfile.Profile() pr.enable() training_images = os.path.abspath(os.path.join(MAIN_MODULE_PATH, "..", "data", "train-images.idx3-ubyte")) training_labels = os.path.abspath(os.path.join(MAIN_MODULE_PATH, "..", "data", "train-labels.idx1-ubyte")) network.load_data(training_images, training_labels) test_images = os.path.join(MAIN_MODULE_PATH, "..", "data", "t10k-images.idx3-ubyte") test_labels = os.path.join(MAIN_MODULE_PATH, "..", "data", "t10k-labels.idx1-ubyte") network.load_test_data(test_images, test_labels) network.SGD(0.1, 0.1, 30, 10) pr.disable() pr.print_stats(sort="cumtime")
def run_neural_nets(dataset, url_weight="sp", encoder_length=24, encoder_size=15, decoder_length=8, decoder_size=9, is_test=False, restore=False, model="NN", pre_train=False, forecast_factor=0): tf.reset_default_graph() print("training %s with decoder_length = %i" % (model, decoder_length)) if model == "NN": model = NeuralNetwork(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length, decoder_vector_size=decoder_size) elif model == "SAE": model = StackAutoEncoder(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length, pre_train=pre_train, forecast_factor=forecast_factor) else: model = Adain(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length, forecast_factor=forecast_factor) print('==> initializing models') with tf.device('/%s' % p.device): model.init_model() init = tf.global_variables_initializer() saver = tf.train.Saver() tconfig = get_gpu_options() with tf.Session(config=tconfig) as session: if not restore: session.run(init) else: print("==> Reload pre-trained weights") saver.restore(session, "weights/%s_%ih.weights" % (url_weight, decoder_length)) print("==> Loading dataset") train, valid = utils.process_data_grid(len(dataset), p.batch_size, encoder_length, decoder_length, is_test) model.set_data(dataset, train, valid, None, session) if not is_test: best_val_epoch = 0 best_val_loss = float('inf') print('==> starting training') for epoch in xrange(p.total_iteration): print('Epoch {}'.format(epoch)) start = time.time() train_loss, _ = model.run_epoch(session, train, epoch, None, train_op=model.train_op, train=True) print('Training loss: {}'.format(train_loss)) valid_loss, _ = model.run_epoch(session, valid, epoch, None) print('Validation loss: {}'.format(valid_loss)) if valid_loss < best_val_loss: best_val_loss = valid_loss best_val_epoch = epoch print('Saving weights') saver.save(session, 'weights/%s_%ih.weights' % (url_weight, decoder_length)) if (epoch - best_val_epoch) > p.early_stopping: break print('Total time: {}'.format(time.time() - start)) else: # saver.restore(session, url_weight) print('==> running model') _, preds = model.run_epoch(session, model.train, shuffle=False, stride=2) return preds return None
def create_series(in_array, window_size, period, minV, maxV, layer_nodes=[2, 3], sigmoid='tanh', epochs=50000): global_max = maxV global_min = minV X_train = [] y_train = [] for i in range(len(in_array) - window_size): X = [] for j in range(window_size): X.append(_scale_to_binary(in_array[i + j], global_min, global_max)) X_train.append(X) y_train.append( _scale_to_binary(in_array[i + window_size], global_min, global_max)) X_train = np.array(X_train) y_train = np.array(y_train) layers = [] layers.append(window_size) for i in range(len(layer_nodes)): layers.append(layer_nodes[i]) n = NeuralNetwork(layers, sigmoid) n.fit(X_train, y_train, epochs) X_test = in_array[-window_size:] for i in range(len(X_test)): X_test[i] = _scale_to_binary(X_test[i], global_min, global_max) preds = [] X_test = deque(X_test) for i in range(period): val = n.predict(X_test) preds.append(rescale_from_binary(val[0], global_min, global_max)) X_test.rotate(-1) X_test[window_size - 1] = val[0] return preds
def main(): node_pairs_list = read_file("digit-examples-all.txt") train_split = 1 print("Split: {0}0% train, {1}0% test".format(str(train_split), str(10 - train_split))) train_set_size = (5620 // 10) * train_split training_set = node_pairs_list[0:train_set_size + 1] test_set = node_pairs_list[train_set_size + 1:] #training_set = node_pairs_list[0 : 50] #test_set = node_pairs_list[50 : 100] weights = [] for i in range(64): weights.append([random.uniform(-1, 1) for x in range(10)]) for pair in training_set: neural_net = NeuralNetwork(pair[0], pair[1], weights) neural_net.train_NN() weights = neural_net.weights_list euclidean_distance = 0 for pair in test_set: neural_net = NeuralNetwork(pair[0], pair[1], weights) euclidean_distance += neural_net.test_NN() weights = neural_net.weights_list if (euclidean_distance == 0): avg_euclidean_distance = 0 else: avg_euclidean_distance = euclidean_distance / len(test_set) print("Average Euclidean Distance: {}".format(avg_euclidean_distance))
def initialize(self, args=None): if args is None: #random.seed(1) self.nn = NeuralNetwork(5, 5, 1) else: self.nn = args['nn'] self.crashed = False # movementInfo = showWelcomeAnimation() # select random player sprites randPlayer = random.randint(0, len(settings.PLAYERS_LIST) - 1) #print(type(settings.IMAGES['player'][0][0])) self.movementInfo = { 'playery': int((settings.SCREENHEIGHT - settings.IMAGES['player'][0][0].get_height()) / 2), 'basex': -10, 'playerIndexGen': cycle([0, 1, 2, 1]), } self.basex = self.movementInfo['basex'] self.score = self.playerIndex = self.loopIter = 0 self.playerIndexGen = self.movementInfo['playerIndexGen'] self.playerx, self.playery = int(settings.SCREENWIDTH * 0.2), self.movementInfo['playery'] # player velocity, max velocity, downward accleration, accleration on flap self.playerVelY = -9 # player's velocity along Y, default same as playerFlapped self.playerMaxVelY = 10 # max vel along Y, max descend speed self.playerMinVelY = -8 # min vel along Y, max ascend speed self.playerAccY = 1 # players downward accleration self.playerRot = 45 # player's rotation self.playerVelRot = 3 # angular speed self.playerRotThr = 20 # rotation threshold self.playerFlapAcc = -9 # players speed on flapping self.playerFlapped = False # True when player flaps
def cross_validate(network_shape, epochs_num, learn_rate, _groups_x, _groups_y): k = _groups_x.shape[0] _sum = 0 results = np.zeros(k) for i in range(k): train_x = None train_y = None valid_x = np.copy( _groups_x[i]) # the validation set for th i'th iteration. valid_y = np.copy(_groups_y[i]) net = NeuralNetwork(network_shape, epochs_num, learn_rate) for j in range(k): if j != i: # arrange the train set for the i'th iteration. if train_x is None: train_x = np.copy(_groups_x[j]) train_y = np.copy(_groups_y[j]) else: train_x = np.concatenate((train_x, _groups_x[j]), axis=0) train_y = np.concatenate((train_y, _groups_y[j]), axis=0) old_mins, denoms = norm.minmax_params(train_x) train_x = norm.minmax(train_x, 0, 1) valid_x = norm.minmax(valid_x, 0, 1, old_mins, denoms) net.train(train_x, train_y) results[i] = net.accuracy(valid_x, valid_y) old_mins, denoms = norm.minmax_params(train_x) train_x = norm.minmax(train_x, 0, 1) valid_x = norm.minmax(valid_x, 0, 1, old_mins, denoms) print(results) return np.average(results)
#result = [a - b for a, b in zip(A, B)] result = np.subtract(A, B) return (result) #This is the next choice if __name__ == '__main__': test = test() print("getting dataset") test.getDataset(1) print(np.asarray(test.y).reshape((9, -1))) print(np.shape(test.X)) print(np.shape(test.y)) nn = NeuralNetwork([9, 18, 18, 9]) nn.train(X=np.asarray(test.X).reshape((9, -1)), y=np.asarray(test.y).reshape((9, -1)), batch_size=9, epochs=2, learning_rate=0.4, print_every=10, validation_split=0.2, tqdm_=False, plot_every=20000) #X is the current gamestate and y is the next move to make #X = np.random.random((1,9)) #print(X) #network = Network()
from NeuralNet import NeuralNetwork digits = load_digits() X = digits.data Y = digits.target Y_classes = np.zeros((X.shape[0], 10)) for i in range(Y.shape[0]): Y_classes[i, Y[i]] = 1 Y = Y_classes X_train, X_test, y_train, y_test = train_test_split(X, Y) nn = NeuralNetwork(X_train, y_train, X_train.shape[1], 0.01, 0.1, 1000, 100, 100, y_train.shape[1]) # nn.train_neural_network() # Save theta values. # nn.save_theta() # This is executed once we have trained the neural network. index = random.randrange(0, X_test.shape[0]) nn.load_theta('theta0.csv', 'theta1.csv', 'theta2.csv') prediction = np.argmax(nn.predict(X_test[index, :].reshape((-1, 1)))) label = np.argmax(y_test[index]) plt.gray() plt.matshow(X_test[index, :].reshape((8, 8)))
def __init__(self, args=None): if args is None: #random.seed(1) self.nn = NeuralNetwork(5, 4, 1) else: self.nn = args['nn']
def run_neural_nets(url_feature="", attention_url="", url_weight="sp", encoder_length=24, encoder_size=15, decoder_length=8, decoder_size=9, is_test=False, restore=False, model="NN", pre_train=False): if model == "NN": model = NeuralNetwork(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length, decoder_vector_size=decoder_size) elif model == "SAE": model = StackAutoEncoder(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length, pre_train=pre_train) else: model = Adain(encoder_length=encoder_length, encoder_vector_size=encoder_size, decoder_length=decoder_length) print('==> initializing models') with tf.device('/%s' % p.device): model.init_model() init = tf.global_variables_initializer() saver = tf.train.Saver() utils.assert_url(url_feature) tconfig = get_gpu_options() sum_dir = 'summaries' if not utils.check_file(sum_dir): os.makedirs(sum_dir) train_writer = None with tf.Session(config=tconfig) as session: if not restore: session.run(init) else: print("==> Reload pre-trained weights") saver.restore(session, url_weight) url_weight = url_weight.split("/")[-1] url_weight = url_weight.rstrip(".weights") if not is_test: suf = time.strftime("%Y.%m.%d_%H.%M") train_writer = tf.summary.FileWriter(sum_dir + "/" + url_weight + "_train", session.graph, filename_suffix=suf) valid_writer = tf.summary.FileWriter(sum_dir + "/" + url_weight + "_valid", session.graph, filename_suffix=suf) print("==> Loading dataset") dataset = utils.load_file(url_feature) if dataset: dataset = np.asarray(dataset, dtype=np.float32) lt = len(dataset) st = int(lt/2) lt = lt - st dataset = dataset[st:,:,:] train, valid = utils.process_data_grid(lt, p.batch_size, encoder_length, decoder_length, is_test) if attention_url: attention_data = utils.load_file(attention_url) else: attention_data = None model.set_data(dataset, train, valid, attention_data, session) if not is_test: best_val_epoch = 0 best_val_loss = float('inf') # best_overall_val_loss = float('inf') print('==> starting training') for epoch in xrange(p.total_iteration): print('Epoch {}'.format(epoch)) start = time.time() train_loss, _ = model.run_epoch(session, train, epoch, train_writer, train_op=model.train_op, train=True) print('Training loss: {}'.format(train_loss)) valid_loss, _ = model.run_epoch(session, valid, epoch, valid_writer) print('Validation loss: {}'.format(valid_loss)) if valid_loss < best_val_loss: best_val_loss = valid_loss best_val_epoch = epoch print('Saving weights') saver.save(session, 'weights/%s.weights' % url_weight) if (epoch - best_val_epoch) > p.early_stopping: break print('Total time: {}'.format(time.time() - start)) else: # saver.restore(session, url_weight) print('==> running model') _, preds = model.run_epoch(session, model.train, shuffle=False) pt = re.compile("weights/([A-Za-z0-9_.]*).weights") name = pt.match(url_weight) if name: name_s = name.group(1) else: name_s = url_weight utils.save_file("test_sp/%s" % name_s, preds)
import os.path from sklearn import datasets from matplotlib import pyplot as plt from NeuralNet import NeuralNetwork def generate_halfmoon_dataset(n_samples=200, shuffle=True, noise=0): np.random.seed(0) X, y = datasets.make_moons(n_samples, shuffle=shuffle, noise=noise) return X, y X_train, y_train = generate_halfmoon_dataset(noise=0.1) X_test, y_test = generate_halfmoon_dataset(noise=0.1) nn = NeuralNetwork([2, 4, 2, 1], 0.03) if (not os.path.isfile("nn_halfmoon_noise_0.1_tanh.npy")): train = [X_train, y_train] nn.train_network(train, n_epochs=0, threshold=0.001) np.save("nn_halfmoon_noise_0.1_tanh", nn.get_network()) else: W = np.load("nn_halfmoon_noise_0.1_tanh.npy") print("loaded weight matrix W = %s\n" % (W)) nn.load_network(W) y_test_test = [] for i in range(len(y_test)): y_test_test.append(np.around(np.squeeze(nn.predict(X_test[i])))) y_train_test = [] for j in range(len(y_test)):
from NeuralNet import NeuralNetwork from NeuronLayer import NeuronLayer import cProfile import os if __name__ == "__main__": l1 = NeuronLayer((28, 28), True, False) l2 = NeuronLayer((100, )) l3 = NeuronLayer((10, ), False, True) network = NeuralNetwork() network.add_layer(l1) network.add_layer(l2) network.add_layer(l3) network.connect_layers() pr = cProfile.Profile() pr.enable() network.load_data(os.path.abspath("data/train-images.idx3-ubyte"), os.path.abspath("data/train-labels.idx1-ubyte")) network.load_test_data(os.path.abspath("data/t10k-images.idx3-ubyte"), os.path.abspath("data/t10k-labels.idx1-ubyte")) network.SGD(0.1, 0.1, 30, 10) pr.disable() pr.print_stats(sort="cumtime")
plt.grid(1) plt.xlabel('epochs') plt.legend() plt.subplot(1, 2, 2) plt.plot(range(history['epochs'])[:n], history['train_acc'][:n], label='train_acc') plt.plot(range(history['epochs'])[:n], history['test_acc'][:n], label='test_acc') plt.title('train & test accuracy') plt.grid(1) plt.xlabel('epochs') plt.legend() #LINEAR PROBLEM data = datasets.make_blobs(n_samples=1000, centers=2, random_state=2) X = data[0].T y = np.expand_dims(data[1], 1).T neural_net = NeuralNetwork([2, 4, 4, 1], seed=0) history = neural_net.train(X=X, y=y, batch_size=16, epochs=100, learning_rate=0.4, validation_split=0.2) plot_history(history)