class TestPersistance(unittest.TestCase): def setUp(self): X, Y = etalon self.net = BackpropNetwork(input_shape=(4,), layers=[ DenseLayer(30, activation="sigmoid"), DenseLayer(3, activation="softmax") ], cost="xent", optimizer="sgd") self.net.fit(X, Y, batch_size=len(X)//2, epochs=3, validation=etalon) self.cost1, self.acc1 = self.net.evaluate(*etalon) def test_dense_with_pickle(self): sleepy = pickle.dumps(self.net) netcopy = pickle.loads(sleepy) self._check_persistence_ok(netcopy) def test_dense_with_capsule(self): persistance.Capsule.encapsulate(self.net, dumppath="./PersistanceTest.bro") netcopy = persistance.load("./PersistanceTest.bro") self._check_persistence_ok(netcopy) def _check_persistence_ok(self, netcopy): cost2, acc2 = netcopy.evaluate(*etalon) self.assertAlmostEqual(self.cost1, cost2) self.assertAlmostEqual(self.acc1, acc2) self.assertFalse(self.net is netcopy)
def setUp(self): X, Y = etalon self.net = BackpropNetwork(input_shape=(4,), layers=[ DenseLayer(30, activation="sigmoid"), DenseLayer(3, activation="softmax") ], cost="xent", optimizer="sgd") self.net.fit(X, Y, batch_size=len(X)//2, epochs=3, validation=etalon) self.cost1, self.acc1 = self.net.evaluate(*etalon)
def run_brainforge(): net = BackpropNetwork(input_shape=inshape, layerstack=[ LSTM(60, activation="tanh"), DenseLayer(60, activation="tanh"), DenseLayer(outshape, activation="softmax") ], cost="xent", optimizer=RMSprop(eta=0.01)) net.fit_generator(data.batchgen(20), lessons_per_epoch=data.N)
def QannRecurrent(): brain = BackpropNetwork(env.observation_space.shape, layers=[ ClockworkLayer(120, activaton="tanh"), DenseLayer(60, activation="relu"), DenseLayer(nactions, activation="linear") ], cost="mse", optimizer=RMSprop(eta=0.0001)) return brain
def get_agent(): brain = BackpropNetwork( input_shape=env.observation_space.shape, layerstack=[DenseLayer(nactions, activation="softmax")], cost="xent", optimizer=SGD(eta=0.0001)) return brain
def test_fnn_softmax_1layer_full(): mnist = MNIST("../..", return_type="numpy") train_images, train_labels = mnist.load_training() test_images, test_labels = mnist.load_testing() train_labels = create_one_hot_labels(train_labels) np.set_printoptions(threshold=np.inf) inshape, outshape = train_images.shape[1:], train_labels.shape[1:] print ("%s -> %s" %(inshape, outshape)) layerstack = [DenseLayer(outshape, activation="softmax")] network = BackpropNetwork(input_shape=inshape, layerstack=layerstack, cost="xent", optimizer="sgd", eta=1e-3) network.fit(train_images, train_labels, epochs=1, batch_size=100, shuffle=False, verbose=1) #gcsuite = GradientCheck(network, epsilon=1e-5) #gcsuite.run(test_images[5000:5500], test_labels[5000:5500]) probs = network.predict(test_images) percision = (np.sum(np.argmax(probs, axis=1) == test_labels) / float(len(probs))) print("percision={}, len={}".format(percision, len(probs)))
def test_fnn_2layers(): mnist = MNIST("../..", return_type="numpy") train_images, train_labels = mnist.load_training() test_images, test_labels = mnist.load_testing() train_labels = create_one_hot_labels(train_labels) test_labels = create_one_hot_labels(test_labels) inshape, outshape = train_images.shape[1:], train_labels.shape[1:] print ("%s -> %s" %(inshape, outshape)) layerstack = [ DenseLayer(30, activation="relu"), DenseLayer(outshape, activation="softmax") ] network = BackpropNetwork(input_shape=inshape, layerstack=layerstack, cost="xent", optimizer="sgd") network.fit(train_images, train_labels, epochs=1, batch_size=100, verbose=0) gcsuite = GradientCheck(network, epsilon=1e-5) gcsuite.run(test_images, test_labels)
def build_normal_net(inshape, outshape): net = BackpropNetwork(input_shape=inshape, layerstack=[ DenseLayer(60, activation="tanh"), DenseLayer(outshape, activation="softmax") ], cost="xent", optimizer="adam") return net
def QannDense(): brain = BackpropNetwork(input_shape=env.observation_space.shape, layerstack=[ DenseLayer(24, activation="tanh"), DenseLayer(nactions, activation="linear") ], cost="mse", optimizer=RMSprop(eta=0.0001)) return brain
def _default_synth(self): synth = BackpropNetwork(input_shape=self.inshape, layerstack=[ DenseLayer(self.inshape[0], activation="tanh"), DenseLayer(self.inshape[0], activation="linear"), ], cost="mse", optimizer="sgd") return synth
def simulation(game, render=False): inshape, outshape = game.neurons_required ann = BackpropNetwork(input_shape=np.prod(inshape), layerstack=[ Flatten(), DenseLayer(300, activation="tanh"), DenseLayer(outshape, activation="softmax") ], cost="xent", optimizer="rmsprop") agent = DQN(ann, outshape) if render: plt.ion() obj = plt.imshow(game.reset(), vmin=-1, vmax=1, cmap="hot") episode = 1 while 1: print() print(f"Episode {episode}") canvas = game.reset() if render: obj.set_data(canvas) step = 0 done = 0 reward = None while not done: action = agent.sample(canvas, reward) canvas, reward, done = game.step(action) if render: obj.set_data(canvas) step += 1 # print(f"\rStep: {step}", end="") if render: plt.pause(0.1) print(f" Accumulating! Steps taken: {step}, {'died' if reward < 0 else 'alive'}") agent.accumulate(canvas, reward) if episode % 10 == 0: print("Updating!") episode += 1
from csxdata import roots, CData from brainforge import BackpropNetwork from brainforge.layers import DenseLayer from brainforge.optimization import SGD mnist = CData(roots["misc"] + "mnist.pkl.gz", cross_val=10000, fold=False) inshape, outshape = mnist.neurons_required network = BackpropNetwork(input_shape=inshape, layerstack=[ DenseLayer(30, activation="sigmoid"), DenseLayer(outshape, activation="softmax") ], cost="xent", optimizer=SGD(eta=3.)) network.fit(*mnist.table("learning"), validation=mnist.table("testing"))
from brainforge.util import etalon from brainforge import LayerStack, BackpropNetwork from brainforge.layers import DenseLayer, DropOut ls = LayerStack( (4, ), layers=[ DenseLayer(120, activation="tanh"), # DropOut(0.5), DenseLayer(3, activation="softmax") ]) net = BackpropNetwork(ls, cost="xent", optimizer="momentum") costs = net.fit(*etalon, epochs=300, validation=etalon, verbose=1)
from brainforge import BackpropNetwork from brainforge.layers import DenseLayer from brainforge.gradientcheck import GradientCheck from brainforge.util import etalon X, Y = etalon inshape, outshape = X.shape[1:], Y.shape[1:] network = BackpropNetwork(input_shape=inshape, layerstack=[ DenseLayer(10, activation="sigmoid"), DenseLayer(outshape, activation="softmax") ], cost="xent", optimizer="sgd") network.fit(X[5:], Y[5:], epochs=1, batch_size=len(X) - 5, verbose=0) gcsuite = GradientCheck(network, epsilon=1e-3) gcsuite.run(X[:5], Y[:5])
from csxdata import Sequence, roots from brainforge import BackpropNetwork from brainforge.layers import LSTM, DenseLayer from brainforge.optimization import RMSprop data = Sequence(roots["txt"] + "petofi.txt", n_gram=1, timestep=5) inshape, outshape = data.neurons_required net = BackpropNetwork(input_shape=inshape, layerstack=[ LSTM(60, activation="tanh"), DenseLayer(60, activation="tanh"), DenseLayer(outshape, activation="softmax") ], cost="xent", optimizer=RMSprop(eta=0.01)) net.fit(*data.table("learning"), validation=data.table("testing"))
from csxdata import CData, roots from brainforge import BackpropNetwork from brainforge.layers import ConvLayer, PoolLayer, Flatten, DenseLayer, Activation from brainforge.optimization import RMSprop data = CData(roots["misc"] + "mnist.pkl.gz", cross_val=10000, fold=True) ins, ous = data.neurons_required net = BackpropNetwork(input_shape=ins, layerstack=[ ConvLayer(3, 8, 8, compiled=False), PoolLayer(3, compiled=False), Activation("tanh"), Flatten(), DenseLayer(60, activation="tanh"), DenseLayer(ous, activation="softmax") ], cost="xent", optimizer=RMSprop(eta=0.01)) net.fit_generator(data.batchgen(bsize=20, infinite=True), lessons_per_epoch=60000, epochs=30, validation=data.table("testing"))