예제 #1
0
def QannRecurrent():
    brain = BackpropNetwork(env.observation_space.shape, layers=[
        ClockworkLayer(120, activaton="tanh"),
        DenseLayer(60, activation="relu"),
        DenseLayer(nactions, activation="linear")
    ], cost="mse", optimizer=RMSprop(eta=0.0001))
    return brain
예제 #2
0
파일: main.py 프로젝트: csxeba/ReSkiv
def forge_ann(inshape, outshape):
    """
    Use Brainforge to create an ANN.
    Brainforge is my ANN lib. See: https://github.com/csxeba/brainforge
    It has much more functionality than this project's "learning" submodule.
    I include a [fairly :)] stable version in case you want to experiment
    with more advanced architectures.
    (And because I couldn't get Keras to work...)
    There are some decent recurrent layer implementations like LSTM, GRU,
    ClockworkRNN. The ConvLayer is not very stable and it's also quite slow.
    
    A note on recurrent architectures:
    If you use a one of the recurrent layers, you'll need to preprocess the
    input data a bit more. This is in addition to the pixel subsampling.
    """
    from brainforge import Network
    from brainforge.layers import DenseLayer
    brain = Network(inshape,
                    layers=(DenseLayer(neurons=200, activation="tanh"),
                            DenseLayer(outshape, activation="softmax")))

    # Attention! The implicit optimizer of the network won't be used,
    # the agent will have its own optimizer, which is used instead!
    # So don't set finalize()'s parameters, they won't be utilized.
    brain.finalize("xent")
    return brain
예제 #3
0
def get_net(inshape, outputs):
    model = Network(inshape, [
        RLayer(180, activation="tanh"),
        DenseLayer(60, activation="tanh"),
        DenseLayer(outputs, activation="sigmoid")
    ])
    model.finalize("xent", "adam")
    return model
예제 #4
0
 def setUp(self):
     X, Y = etalon
     self.net = BackpropNetwork(input_shape=(4,), layers=[
         DenseLayer(30, activation="sigmoid"),
         DenseLayer(3, activation="softmax")
     ], cost="xent", optimizer="sgd")
     self.net.fit(X, Y, batch_size=len(X)//2, epochs=3, validation=etalon)
     self.cost1, self.acc1 = self.net.evaluate(*etalon)
예제 #5
0
def QannDense():
    brain = BackpropNetwork(input_shape=env.observation_space.shape,
                            layerstack=[
                                DenseLayer(24, activation="tanh"),
                                DenseLayer(nactions, activation="linear")
                            ],
                            cost="mse",
                            optimizer=RMSprop(eta=0.0001))
    return brain
예제 #6
0
def build_normal_net(inshape, outshape):
    net = BackpropNetwork(input_shape=inshape,
                          layerstack=[
                              DenseLayer(60, activation="tanh"),
                              DenseLayer(outshape, activation="softmax")
                          ],
                          cost="xent",
                          optimizer="adam")
    return net
예제 #7
0
 def _default_synth(self):
     synth = BackpropNetwork(input_shape=self.inshape,
                             layerstack=[
                                 DenseLayer(self.inshape[0],
                                            activation="tanh"),
                                 DenseLayer(self.inshape[0],
                                            activation="linear"),
                             ],
                             cost="mse",
                             optimizer="sgd")
     return synth
예제 #8
0
def run_brainforge():
    net = BackpropNetwork(input_shape=inshape,
                          layerstack=[
                              LSTM(60, activation="tanh"),
                              DenseLayer(60, activation="tanh"),
                              DenseLayer(outshape, activation="softmax")
                          ],
                          cost="xent",
                          optimizer=RMSprop(eta=0.01))

    net.fit_generator(data.batchgen(20), lessons_per_epoch=data.N)
예제 #9
0
 def build_encoder(hid):
     dims = data.shape[1]
     enc = Network(input_shape=dims, layers=(
         DenseLayer(hid[0], activation="tanh"),
     ))
     if len(hid) > 1:
         for neurons in hid[1:]:
             enc.add(DenseLayer(neurons, activation="tanh"))
         for neurons in hid[-2:0:-1]:
             enc.add(DenseLayer(neurons, activation="tanh"))
     enc.add(DenseLayer(dims, activation="linear"))
     enc.finalize(cost="mse", optimizer="rmsprop")
     return enc
예제 #10
0
def get_agent():
    brain = BackpropNetwork(
        input_shape=env.observation_space.shape,
        layerstack=[DenseLayer(nactions, activation="softmax")],
        cost="xent",
        optimizer=SGD(eta=0.0001))
    return brain
예제 #11
0
    def setUp(self):
        self.data = CData(mnist_tolearningtable(roots["misc"] + "mnist.pkl.gz",
                                                fold=False),
                          headers=None)
        self.data.transformation = "std"
        self.X, self.Y = self.data.table("testing", m=5, shuff=False)

        self.net = BackpropNetwork(self.data.neurons_required[0],
                                   name="NumGradTestNetwork")
        self.net.add(DenseLayer(30, activation="sigmoid"))
예제 #12
0
def simulation(game, render=False):
    inshape, outshape = game.neurons_required
    ann = BackpropNetwork(input_shape=np.prod(inshape), layerstack=[
        Flatten(),
        DenseLayer(300, activation="tanh"),
        DenseLayer(outshape, activation="softmax")
    ], cost="xent", optimizer="rmsprop")
    agent = DQN(ann, outshape)

    if render:
        plt.ion()
        obj = plt.imshow(game.reset(), vmin=-1, vmax=1, cmap="hot")

    episode = 1

    while 1:
        print()
        print(f"Episode {episode}")
        canvas = game.reset()
        if render:
            obj.set_data(canvas)
        step = 0
        done = 0
        reward = None
        while not done:
            action = agent.sample(canvas, reward)
            canvas, reward, done = game.step(action)
            if render:
                obj.set_data(canvas)
            step += 1
            # print(f"\rStep: {step}", end="")
            if render:
                plt.pause(0.1)
        print(f" Accumulating! Steps taken: {step}, {'died' if reward < 0 else 'alive'}")
        agent.accumulate(canvas, reward)
        if episode % 10 == 0:
            print("Updating!")
        episode += 1
예제 #13
0
from csxdata import Sequence, roots

from brainforge import BackpropNetwork
from brainforge.layers import LSTM, DenseLayer
from brainforge.optimization import RMSprop

data = Sequence(roots["txt"] + "petofi.txt", n_gram=1, timestep=5)
inshape, outshape = data.neurons_required
net = BackpropNetwork(input_shape=inshape,
                      layerstack=[
                          LSTM(60, activation="tanh"),
                          DenseLayer(60, activation="tanh"),
                          DenseLayer(outshape, activation="softmax")
                      ],
                      cost="xent",
                      optimizer=RMSprop(eta=0.01))

net.fit(*data.table("learning"), validation=data.table("testing"))
예제 #14
0
from brainforge.util import etalon
from brainforge import LayerStack, BackpropNetwork
from brainforge.layers import DenseLayer, DropOut

ls = LayerStack(
    (4, ),
    layers=[
        DenseLayer(120, activation="tanh"),
        # DropOut(0.5),
        DenseLayer(3, activation="softmax")
    ])

net = BackpropNetwork(ls, cost="xent", optimizer="momentum")
costs = net.fit(*etalon, epochs=300, validation=etalon, verbose=1)
예제 #15
0
 def test_xent_with_softmax_output(self):
     self.net.add(
         DenseLayer(self.data.neurons_required[1], activation="softmax"))
     self.net.finalize(cost="xent", optimizer="sgd")
     self._run_numerical_gradient_test()
예제 #16
0
 def test_mse_with_sigmoid_output(self):
     self.net.add(
         DenseLayer(self.data.neurons_required[1], activation="sigmoid"))
     self.net.finalize(cost="mse", optimizer="sgd")
     self._run_numerical_gradient_test()
예제 #17
0
def forge_layerstack():
    return LayerStack(input_shape=(1,), layers=[
        DenseLayer(30, activation="tanh"),
        DenseLayer(30, activation="tanh"),
        DenseLayer(1, activation="linear")
    ])
예제 #18
0
from csxdata import roots, CData

from brainforge import BackpropNetwork
from brainforge.layers import DenseLayer
from brainforge.optimization import SGD

mnist = CData(roots["misc"] + "mnist.pkl.gz", cross_val=10000, fold=False)
inshape, outshape = mnist.neurons_required

network = BackpropNetwork(input_shape=inshape,
                          layerstack=[
                              DenseLayer(30, activation="sigmoid"),
                              DenseLayer(outshape, activation="softmax")
                          ],
                          cost="xent",
                          optimizer=SGD(eta=3.))

network.fit(*mnist.table("learning"), validation=mnist.table("testing"))
예제 #19
0
from csxdata import CData, roots

from brainforge import BackpropNetwork
from brainforge.layers import ConvLayer, PoolLayer, Flatten, DenseLayer, Activation
from brainforge.optimization import RMSprop

data = CData(roots["misc"] + "mnist.pkl.gz", cross_val=10000, fold=True)
ins, ous = data.neurons_required
net = BackpropNetwork(input_shape=ins, layerstack=[
    ConvLayer(3, 8, 8, compiled=False),
    PoolLayer(3, compiled=False), Activation("tanh"),
    Flatten(), DenseLayer(60, activation="tanh"),
    DenseLayer(ous, activation="softmax")
], cost="xent", optimizer=RMSprop(eta=0.01))

net.fit_generator(data.batchgen(bsize=20, infinite=True), lessons_per_epoch=60000, epochs=30,
                  validation=data.table("testing"))