def load_net(sFileName): """The file must start with & and then a single integer of the number of input units into the first hidden layer; & must always be followed by an integer and then perceptron lines""" lstLayers = [] reader = csv.reader(open(sFileName, "r")) line = next(reader) lstPcpt = [] line = next(reader) cInputs = int(line[-1]) cInputsFirst = cInputs for line in reader: if line[-1] is '&': lstLayers.append(nn.NeuralNetLayer(cInputs, lstPcpt)) lstPcpt = [] elif len(line) is 1: cInputs = int(line[-1]) else: pcptLine = map(lambda x: float(x), line) lstPcpt.append( nn.Perceptron(pcptLine[:-2], pcptLine[-2], int(pcptLine[-1]))) lstLayers.append(nn.NeuralNetLayer(cInputs, lstPcpt)) #nn.print_net(nn.NeuralNet(cInputsFirst, lstLayers)) return nn.NeuralNet(cInputsFirst, lstLayers)
def build_net(listCLayer): listLayer = [] for cInput,cPcpt in zip(listCLayer[:-1],listCLayer[1:]): listPcpt = [] for ixPcpt in xrange(cPcpt): listDblW = randlist(-1.0, 1.0, cInput) listPcpt.append(nn.Perceptron(listDblW,0.0,ixPcpt)) listLayer.append(nn.NeuralNetLayer(cInput,listPcpt)) return nn.NeuralNet(listCLayer[0], listLayer)
def _load_model_from_stream(self, f): self.net = nn.NeuralNet() self.net.load_model_from_stream(f) self.rnn = load_rnn_from_stream(f) self._update_param_size() self.in_dim = self.net.in_dim self.out_dim = self.rnn.out_dim
def __init__(self): self.y_pos = 200 self.y_vel = 0 self.y_acc = 0.6 self.jump_strength = 9 self.fitness = 0 # inputs: y position, distance to next obstacle, end of next obstacle # y velocity, gap start position, gap end position self.brain = nn.NeuralNet(6, [4, 2], 2)
def copy_net(net): """returns a separate copy of net""" newListLayer = [] newCInputsFirst = copy.copy(net.cInputs) for layer in net.listLayer: newListPcpt = [] for pcpt in layer.listPcpt: newListDblW = copy.copy(pcpt.listDblW) newDblW0 = copy.copy(pcpt.dblW0) newPcptIx = copy.copy(pcpt.ix) newListPcpt.append(nn.Perceptron(newListDblW, newDblW0, newPcptIx)) newCInputs = copy.copy(layer.cInputs) newListLayer.append(nn.NeuralNetLayer(newCInputs, newListPcpt)) return nn.NeuralNet(newCInputsFirst, newListLayer)
def test_nn(X_train, y_train, X_test, y_test): model = nn.NeuralNet() model.add(nn.DenseLayer(512)) model.add(nn.SigmoidLayer()) model.add(nn.DropoutLayer(0.3)) model.add(nn.DenseLayer(512)) model.add(nn.SigmoidLayer()) model.add(nn.DropoutLayer(0.3)) model.add(nn.DenseLayer(10)) model.add(nn.SoftmaxLayer()) my_history = model.fit(X_train, y_train, num_epochs=20,\ learning_rate=0.01, batch_size=128,\ X_test=X_test, y_test=y_test) predictions = model.predict(X_test) predictions = np.argmax(predictions, axis=0) labels = np.argmax(y_test, axis=1) print "accuracy of my model: {}".format(sum(predictions == labels)*1.0/len(predictions))
move = pick_move(state, inputs, net, disp) state.make_move(move) target = np.zeros((9)) target[move.index] = 1 if state.turn == 1: Xlist.append((inputs, target)) else: Olist.append((inputs, target)) if disp: print(state) if state.win_state() == 0.5: return Xlist + Olist return Xlist if state.turn == 2 else Olist net = nn.NeuralNet((18, 12, 9)) train_its = 0 for _ in range(100000): win_pattern = play_game(net) if win_pattern is not None: net.train(win_pattern, its=10) train_its += 1 if train_its % 500 == 0: print(train_its) play_game(net, disp=True)
ITERATIONS_PER_FRAME = 25 # how many neural net training steps to do per frame ARCH = [2, 10, 20, 20, 3] # neural net architecture configuration PARAM_DELTA = 1e-2 # learning rate for adam optimizer img = pg.image.load(IMAGE_FILE).get_image_data() img_w, img_h = img.width, img.height pixels = img.get_data("RGB", img_w * 3) X = [[i, j] for i in range(img_w) for j in range(img_h)] pxl_arr = pixel_bytes_to_array(pixels, img_w, img_h) trained_pxl_arr = np.zeros_like(pxl_arr) trained_pixels = array_to_pixel_bytes(trained_pxl_arr) trained_img = pg.image.ImageData(img_w, img_h, "RGB", trained_pixels) net = nn.NeuralNet(ARCH, PARAM_DELTA) window = DemoWindow(W, H, trained_img, FULLSCREEN) def update(dt): global trained_img, img_w, img_h, net, pxl_arr for i in range(ITERATIONS_PER_FRAME): loss = net.train_iteration(X, pxl_arr) print("loss = %lf" % loss) trained_pxl_arr = net.eval(X) trained_pixels = array_to_pixel_bytes(trained_pxl_arr) trained_img.set_data("RGB", img_w * 3, trained_pixels) pg.clock.schedule_interval(update, 1. / FPS) pg.app.run()
import nn import math import random import numpy as np shape = (1, 4, 2, 1) net = nn.NeuralNet(shape) inputs = [np.random.random(1) for i in range(100)] outputs = [np.array([math.sin(x)]) for x in inputs] #outputs = [ np.array([0.8]) for x in inputs ] patterns = list(zip(inputs, outputs)) print('1') net.train(patterns[:50]) print('2') net.train(patterns[50:])
move = sorted(zip(legal_moves, move_values), key=lambda z: z[1])[-1][0] if state.turn == 1: xmoves.append(move) elif state.turn == 2: omoves.append(move) state.make_move(move) if state.win_state() == 0.5: return winner = 3 - state.turn if winner == 1: wmoves = xmoves lmoves = omoves elif winner == 2: wmoves = omoves lmoves = xmoves while len(lmoves) and len(wmoves): state.unmake_move(wmoves.pop()) state.unmake_move(lmoves.pop()) #### value_net = nn.NeuralNet((18, 10, 1)) play_through(value_net)
def run_test(arguments): # Reset Tensorflow graph tf.reset_default_graph() # Instantiate publisher and subscriber for Gazebo Interface.init() traj = Trajectory(Interface) visualizer = Visualizer(0.046 * 50) refresh_rate = 1 / 20. # Initialize policy network policy_net = nn.NeuralNet(shape=Config.POLICY_SHAPE) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) if isinstance(arguments.test, str): saver.restore(sess, arguments.test) for _ in range(1): # Generate random start position for the Quadcopter traj.set_pose(Trajectory.random_pose()) refresh_time = 0 TARGETS = [[0, 0, 0], [0, 0, 5], [5, 5, 5], [5, -5, 5], [-5, -5, 5], [-5, 5, 5], [5, 5, 5], [0, 0, 5], [0, 0, 0]] c = 0 positions = [] for i in range(2500): if i % 250 == 0: TARGET = TARGETS[c] c += 1 print("TARGET: ", TARGET) # Get state information from drone subscriber state = traj.get_state() orientation = state[0:9] position = state[9:12] angular = state[12:15] linear = state[15:18] positions.append(position) # Calculate rotation matrix from quaternion orientation = Quaternion( matrix=np.reshape(orientation, (3, 3))) state = { 'position': position, 'rotation_matrix': orientation.rotation_matrix } if refresh_time - refresh_rate < time(): visualizer.update(state, 0) visualizer.draw(positions) visualizer.draw_target(TARGETS) refresh_time = time() orientation = np.ndarray.flatten(orientation.rotation_matrix) # Calculate relative distance to target position #position = np.subtract(position, TARGET) position = np.subtract(position, [-0.29748929, -0.66516153, -0.43737027]) position = np.subtract(position, TARGET) # Concatenate all to generate an input state vector for the networks state = np.concatenate( (orientation, position, angular, linear)) # Predict action with policy network action = Utils.forward(sess, policy_net, [state])[0] action = Config.ACTION_SCALE * action # Feed action vector to the drone traj.step(action) input('Press Enter to exit')
def pretrain_layer(self, layer_idx, *args, **kwargs): """ Pretrain a specific layer, treat the layers before it as already trained. """ enc = self.ae.encoder dec = self.ae.decoder if layer_idx == 0: x = self.x else: base_enc = nn.NeuralNet(enc.in_dim, enc.layers[layer_idx].in_dim) for i in range(layer_idx): base_enc.add_layer(enc.layers[i].out_dim, nonlin_type=enc.layers[i].nonlin.get_name(), use_batch_normalization=enc.layers[i]. use_batch_normalization) base_enc.layers[i].params.set_param_from_vec( enc.layers[i].params.get_param_vec()) if base_enc.layers[i].use_batch_normalization: base_enc.layers[i].bn_layer.params.set_param_from_vec( enc.layers[i].bn_layer.params.get_param_vec()) x = base_enc.forward_prop(self.x, add_noise=False, compute_loss=False, is_test=True) enc_layer = enc.layers[layer_idx] dec_layer = dec.layers[len(dec.layers) - 1 - layer_idx] single_layer_enc = nn.NeuralNet(enc_layer.in_dim, enc_layer.out_dim) single_layer_enc.add_layer( 0, nonlin_type=enc_layer.nonlin.get_name(), dropout=enc_layer.params.dropout, sparsity=enc_layer.sparsity, sparsity_weight=enc_layer.sparsity_weight, use_batch_normalization=enc_layer.use_batch_normalization) single_layer_dec = nn.NeuralNet(dec_layer.in_dim, dec_layer.out_dim) single_layer_dec.add_layer( 0, nonlin_type=dec_layer.nonlin.get_name(), dropout=dec_layer.params.dropout, sparsity=dec_layer.sparsity, sparsity_weight=dec_layer.sparsity_weight, use_batch_normalization=dec_layer.use_batch_normalization) if dec_layer.loss is not None: single_layer_dec.set_loss(dec_layer.loss.get_name(), loss_weight=dec_layer.loss.weight) else: if dec_layer.nonlin.get_name() == ly.NONLIN_NAME_SIGMOID: single_layer_dec.set_loss(ls.LOSS_NAME_BINARY_CROSSENTROPY, loss_weight=1) else: single_layer_dec.set_loss(ls.LOSS_NAME_SQUARED, loss_weight=1) single_layer_ae = nn.AutoEncoder(single_layer_enc, single_layer_dec) print '' print '****************************************' print 'Pretraining layer %d' % layer_idx print '****************************************' print single_layer_ae print '' print 'Data: %dx%d' % x.shape print '' ae_learner = Learner(single_layer_ae) ae_learner.load_data(x, x) ae_learner.train_sgd(*args, **kwargs) # note that after training the parameters are all noise-less parameters, # this should be handled properly enc_layer.params.set_noiseless_param_from_vec( single_layer_ae.encoder.layers[0].params.get_param_vec()) if enc_layer.use_batch_normalization: enc_layer.bn_layer.params.set_param_from_vec( single_layer_ae.encoder.layers[0].bn_layer.params. get_param_vec()) dec_layer.params.set_noiseless_param_from_vec( single_layer_ae.decoder.layers[0].params.get_param_vec()) if dec_layer.use_batch_normalization: dec_layer.bn_layer.params.set_param_from_vec( single_layer_ae.decoder.layers[0].bn_layer.params. get_param_vec())
""" Class to crawl through web pages """ import pdb import re import warnings import sqlite3 import urllib import urllib.request from urllib.parse import urljoin from bs4 import BeautifulSoup import nn MYNET = nn.NeuralNet('nn.db') warnings.simplefilter(action='ignore', category=UserWarning) warnings.simplefilter(action='ignore', category=FutureWarning) # Create a list of words to ignore IGNORE_WORDS = { 'the': 1, 'of': 1, 'to': 1, 'and': 1, 'a': 1, 'in': 1, 'is': 1, 'it': 1 } DATA_PATH = "/home/ec2-user/environment/python_for_finance/research/ml_analysis/dev_work/dev_data/"