def __init__(self, step_size, discount, batch_size, log=None): self._step_size = step_size self._discount = discount self._batch_size = batch_size self._nn = neural_mini.NeuralNetwork2([2, 128, 3]) self._pos_offset = 0.35 self._pos_scale = 2 / 1.7 # -1.2 to 0.5 should be for NN self._vel_scale = 2 / 0.14 # maps vel to -1..1 if log is not None: log.add_param('type', 'neural network') log.add_param('nb_inputs', 2) log.add_param('hid_1_size', 128) log.add_param('hid_1_act', 'sigmoid') log.add_param('out_size', 3) log.add_param('out_act', 'linear')
def main(): np.random.seed(0) nn = neural_mini.NeuralNetwork2([2, 128, 1]) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') for i in range(10001): nn.train_SGD(data_vec, 12, eta=learning_rate) if i % 100 == 0: print(i) ax.clear() plot_nn(ax, nn) plt.pause(0.001) ax.clear() plot_nn(ax, nn) plt.show()
def test_speed(self): inputs = 2 hidden = 256 outputs = 3 dims = (inputs, hidden, outputs) if IMPLEMENTATION == 'neural': self.nn = neural.NeuralNetwork( dims, init='norm' ) elif IMPLEMENTATION == 'mini': self.nn = neural_mini.NeuralNetwork2( dims ) elif IMPLEMENTATION == 'tensor': neural_tf.reset_default_graph() self.nn = neural_tf.NeuralNetworkTF( dims ) elif IMPLEMENTATION == 'keras': self.nn = neural_keras.NeuralKeras( dims ) elif IMPLEMENTATION == 'reference': self.nn = nndl.Network( dims ) # reference neural network self.data_vec = [ (it[0].T, it[1]) for it in self.data_vec ] else: raise ValueError('Unknown implementation: ' + IMPLEMENTATION) # data = np.random.uniform(-1.0, 1.0, size=(100000, 10)) # labels = np.random.randint(0, 2, size=(100000, 10)).astype(float) # self.nn.train_SGD(data, labels, batch_size=100, eta=0.001) dtype = np.float32 data = np.random.uniform(-1.0, 1.0, size=(1000000, 2)).astype(dtype) labels = np.random.randint(0, 2, size=(1000000, 3)).astype(dtype) print(data.dtype) print(labels.dtype) ts = time.time() for i in range(1000): chunk_start = i*1000 chunk_end = i*1000 + 1000 self.nn.forward(data[chunk_start:chunk_end]) span = time.time() - ts print('Predict time:', span) ts = time.time() for i in range(1000): chunk_start = i*1000 chunk_end = i*1000 + 1000 self.nn.train_batch(data[chunk_start:chunk_end], labels[chunk_start:chunk_end], eta=0.001) span = time.time() - ts print('Training time:', span)
def setUp(self): random.seed(0) np.random.seed(0) # # Define weights for testing # weights_0 = np.array( [ [ 0.1, 0.4, 0.7 ], [ 0.2, 0.5, 0.8 ] ], dtype=np.float32 ) biases_0 = np.array( [ [ 0.3, 0.6, 0.9 ] ], dtype=np.float32 ) weights_1 = np.array( [ [ 1.0 ], [ 1.1 ], [ 1.2 ] ], dtype=np.float32) biases_1 = np.array( [ [ 1.3 ] ], dtype=np.float32 ) # # Define test data # self.data_vec = [ ( np.array( [[0.1, 0.1]] ), np.array( [[0]] ) ), ( np.array( [[0.1, 0.9]] ), np.array( [[1]] ) ), ( np.array( [[0.9, 0.1]] ), np.array( [[1]] ) ), ( np.array( [[0.9, 0.9]] ), np.array( [[0]] ) ) ] self.inputs = np.array([[0.1, 0.1], [0.1, 0.9], [0.9, 0.1], [0.9, 0.9]]) self.targets = np.array([[0.0], [1.0], [1.0], [0.0]]) if IMPLEMENTATION == 'neural': self.nn = neural.NeuralNetwork( (2, 3, 1), init='norm' ) elif IMPLEMENTATION == 'mini': self.nn = neural_mini.NeuralNetwork2( (2, 3, 1) ) elif IMPLEMENTATION == 'tensor': neural_tf.reset_default_graph() self.nn = neural_tf.NeuralNetworkTF( (2, 3, 1) ) elif IMPLEMENTATION == 'keras': self.nn = neural_keras.NeuralKeras( (2, 3, 1) ) elif IMPLEMENTATION == 'reference': self.nn = nndl.Network( (2, 3, 1) ) # reference neural network weights_0 = weights_0.T biases_0 = biases_0.T weights_1 = weights_1.T biases_1 = biases_1.T self.data_vec = [ (it[0].T, it[1]) for it in self.data_vec ] else: raise ValueError('Unknown implementation: ' + IMPLEMENTATION) # Make sure shapes match before asigning weights into neural network nn_weights_0 = self.nn.get_weights(0) nn_biases_0 = self.nn.get_biases(0) nn_weights_1 = self.nn.get_weights(1) nn_biases_1 = self.nn.get_biases(1) self.assertEqual( nn_weights_0.shape, weights_0.shape ) self.assertEqual( nn_biases_0.shape, biases_0.shape ) self.assertEqual( nn_weights_1.shape, weights_1.shape ) self.assertEqual( nn_biases_1.shape, biases_1.shape ) self.nn.set_weights(0, weights_0) self.nn.set_biases(0, biases_0) self.nn.set_weights(1, weights_1) self.nn.set_biases(1, biases_1) nn_weights_0 = self.nn.get_weights(0) nn_biases_0 = self.nn.get_biases(0) nn_weights_1 = self.nn.get_weights(1) nn_biases_1 = self.nn.get_biases(1) self.assertEqual(nn_weights_0.tolist(), weights_0.tolist()) self.assertEqual(nn_biases_0.tolist(), biases_0.tolist()) self.assertEqual(nn_weights_1.tolist(), weights_1.tolist()) self.assertEqual(nn_biases_1.tolist(), biases_1.tolist())
# print(' time_predict', time_predict) # start_time = time.time() # for i in range(500): # _, val = sess.run([optimizer, cost], # feed_dict={inputs:arr[i], targets:tar[i]}) # time_train = time.time() - start_time # print(' time_train', time_train) ############################## print('NUMPY') nn = neural_mini.NeuralNetwork2([2, 128, 3]) start_time = time.time() for i in range(500): nn.forward(arr[i]) nn.forward(arr2[i]) time_predict = time.time() - start_time print(' time_predict', time_predict) start_time = time.time() for i in range(500): nn.train_batch(arr[i], tar[i], eta=learning_rate) nn.train_batch(arr2[i], tar2[i], eta=learning_rate) time_train = time.time() - start_time print(' time_train', time_train)