def buildNet(indim, hidden, outdim=2, temperature=1., recurrent=True): from pybrain import FullConnection, BiasUnit, TanhLayer, SoftmaxLayer, RecurrentNetwork, LinearLayer, LinearConnection, FeedForwardNetwork, SigmoidLayer if recurrent: net = RecurrentNetwork() else: net = FeedForwardNetwork() net.addInputModule(LinearLayer(indim, name = 'i')) net.addModule(TanhLayer(hidden, name = 'h')) net.addModule(BiasUnit('bias')) net.addModule(SigmoidLayer(outdim, name = 'unscaled')) net.addOutputModule(SoftmaxLayer(outdim, name = 'o')) net.addConnection(FullConnection(net['i'], net['h'])) net.addConnection(FullConnection(net['bias'], net['h'])) net.addConnection(FullConnection(net['bias'], net['unscaled'])) net.addConnection(FullConnection(net['h'], net['unscaled'])) lconn = LinearConnection(net['unscaled'], net['o']) lconn._setParameters([1./temperature]*outdim) # these are fixed. lconn.paramdim = 0 net.addConnection(lconn) if recurrent: net.addRecurrentConnection(FullConnection(net['h'], net['h'])) net.sortModules() print net print 'number of parameters', net.paramdim return net
def set_nn(inp, hid1, out): # Make a new FFN object: n = FeedForwardNetwork() # Constructing the input, output and hidden layers: inLayer = LinearLayer(inp) hiddenLayer1 = TanhLayer(hid1) # hiddenLayer2 = TanhLayer(hid2) outLayer = LinearLayer(out) # Adding layers to the network: n.addInputModule(inLayer) n.addModule(hiddenLayer1) # n.addModule(hiddenLayer2) n.addOutputModule(outLayer) # determining how neurons should be connected: in_to_hidden = FullConnection(inLayer, hiddenLayer1) # hid_to_hid = FullConnection(hiddenLayer1,hiddenLayer2) hidden_to_out = FullConnection(hiddenLayer1, outLayer) # Adding connections to the network n.addConnection(in_to_hidden) # n.addConnection(hid_to_hid) n.addConnection(hidden_to_out) # Final step that makes our MLP usable n.sortModules() return n
def testBank(): D = readData() print len(D), 'samples', D.indim, 'features' from pybrain import LinearLayer, FullConnection, FeedForwardNetwork, BiasUnit, SigmoidLayer net = FeedForwardNetwork() net.addInputModule(LinearLayer(D.indim, name='in')) net.addModule(BiasUnit(name='bias')) net.addOutputModule(SigmoidLayer(1, name='out')) net.addConnection(FullConnection(net['in'], net['out'])) net.addConnection(FullConnection(net['bias'], net['out'])) net.sortModules() p = net.params p *= 0.01 provider = ModuleWrapper(D, net, shuffling=False) algo = SGD( provider, net.params.copy(), #callback=printy, learning_rate=5.5e-5) #algo = vSGDfd(provider, net.params.copy(), #callback=printy # ) printy(algo, force=True) algo.run(len(D)) printy(algo, force=True) algo.run(len(D)) printy(algo, force=True) algo.run(len(D)) printy(algo, force=True) algo.run(len(D)) printy(algo, force=True) algo.run(len(D)) printy(algo, force=True)
def prepare(): """ Shape the dataset, and build the linear classifier """ from pybrain import LinearLayer, FullConnection, FeedForwardNetwork from pybrain.datasets import SupervisedDataSet D = SupervisedDataSet(3, 1) for c, f, i in data: D.addSample([1, f, i], [c]) net = FeedForwardNetwork() net.addInputModule(LinearLayer(D.indim, name='in')) net.addOutputModule(LinearLayer(1, name='out')) net.addConnection(FullConnection(net['in'], net['out'])) net.sortModules() return D, net
def testPlot1(): dim = 15 from scipy import rand, dot from pybrain.datasets import SupervisedDataSet from pybrain import LinearLayer, FullConnection, FeedForwardNetwork from pybrain.utilities import dense_orth net = FeedForwardNetwork() net.addInputModule(LinearLayer(dim, name='in')) net.addOutputModule(LinearLayer(1, name='out')) net.addConnection(FullConnection(net['in'], net['out'])) net.sortModules() ds = SupervisedDataSet(dim, 1) ds2 = SupervisedDataSet(dim, 1) R = dense_orth(dim) for _ in range(1000): tmp = rand(dim) > 0.5 tmp2 = dot(tmp, R) ds.addSample(tmp, [tmp[-1]]) ds2.addSample(tmp2, [tmp[-1]]) f = ModuleWrapper(ds, net) f2 = ModuleWrapper(ds2, net) # tracking progress by callback ltrace = [] def storer(a): ltrace.append(a.provider.currentLosses(a.bestParameters)) x = net.params x *= 0.001 algo = SGD(f, net.params.copy(), callback=storer, learning_rate=0.2) algo.run(1000) pylab.plot(ltrace, 'r-') del ltrace[:] algo = SGD(f2, net.params.copy(), callback=storer, learning_rate=0.2) algo.run(1000) pylab.plot(ltrace, 'g-') pylab.semilogy() pylab.show()
def train(self): """Train neuron grid by training sample""" self.net = FeedForwardNetwork() inLayer = LinearLayer(self.input_neurons) hiddenLayer = SigmoidLayer(self.hiden_neurons) outLayer = LinearLayer(self.OUTPUT_NEURONS) self.net.addInputModule(inLayer) self.net.addModule(hiddenLayer) self.net.addOutputModule(outLayer) in_to_hidden = FullConnection(inLayer, hiddenLayer) hidden_to_out = FullConnection(hiddenLayer, outLayer) self.net.addConnection(in_to_hidden) self.net.addConnection(hidden_to_out) self.net.sortModules() ds = ClassificationDataSet(self.input_neurons, self.OUTPUT_NEURONS, nb_classes=3) for i, coord in enumerate(self.X): ds.addSample(coord, (self.y[i], )) trainer = BackpropTrainer(self.net, dataset=ds, momentum=0.1, verbose=True, weightdecay=0.01) if self.maxErr: for i in range(self.maxEpochs): if trainer.train() < self.maxErr: print "Desired error reached" break else: trainer.trainUntilConvergence(maxEpochs=self.maxEpochs) print "Successfully finished"
from pybrain import LinearLayer, SigmoidLayer from pybrain import FullConnection import numpy as np from pybrain.datasets import SupervisedDataSet from pybrain.supervised.trainers import BackpropTrainer import matplotlib.pylab as plt dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m') data = pd.read_csv('AirPassengers.csv', parse_dates=['Month'], index_col='Month', date_parser=dateparse) ts = data['#Passengers'] # Make a new FFN object: n = FeedForwardNetwork() # Constructing the input, output and hidden layers: inLayer = LinearLayer(3) hiddenLayer = SigmoidLayer(4) outLayer = LinearLayer(1) # Adding layers to the network: n.addInputModule(inLayer) n.addModule(hiddenLayer) n.addOutputModule(outLayer) # determining how neurons should be connected: in_to_hidden = FullConnection(inLayer, hiddenLayer) hidden_to_out = FullConnection(hiddenLayer, outLayer)