コード例 #1
0
ファイル: brain.py プロジェクト: SlightlyCyborg/pybot
class MoveBrain:
    def __init__(self):
        self.n = RecurrentNetwork()
        inLayer = LinearLayer(8)
        hiddenLayer = SigmoidLayer(4)
        self.numInputs = 8
        outLayer = LinearLayer(4)
        self.n.addInputModule(inLayer)
        self.n.addModule(hiddenLayer)
        self.n.addOutputModule(outLayer)

        in_to_hidden = FullConnection(inLayer, hiddenLayer)
        hidden_to_out = FullConnection(hiddenLayer, outLayer)

        self.n.addConnection(in_to_hidden)
        self.n.addConnection(hidden_to_out)

        self.n.sortModules()
        self.ds = SupervisedDataSet(8, 4) 
        self.trainer = BackpropTrainer(self.n, self.ds)

    def run(inputs):
        if inputs.size() == self.numInputs:
            self.n.activate(inputs)
        else:
            print "num of inputs do not match"

    def addRule(self,rule):
        self.ds.append(rule)

    def saveNetwork(self):
        fileObject = open('networks/avoidandfindv1', 'w')
        pickle.dump(self.n, fileObject)

        fileObject.close()
コード例 #2
0
class BrainController:

    indim = 2
    outdim = 2

    def __init__(self, trained_net=None):
        if trained_net == None:
            self.net = RecurrentNetwork()
            self.init_network(self.net)
        else:
            self.net = trained_net

    def init_network(self, net):
        net.addInputModule(LinearLayer(2, 'in'))
        net.addModule(SigmoidLayer(3, 'hidden'))
        net.addOutputModule(LinearLayer(2, 'out'))
        net.addModule(BiasUnit(name='bias'))
        net.addConnection(FullConnection(net['in'], net['hidden']))
        net.addConnection(FullConnection(net['hidden'], net['out']))
        net.sortModules()

    def train(self, data):
        ds = SupervisedDataSet(2, 2)
        for i in range(0, len(data)):
            input, target = data[i]
            ds.addSample(input, target)

        trainer = BackpropTrainer(self.net,
                                  ds,
                                  learningrate=0.01,
                                  momentum=0.99,
                                  verbose=True)

        max_error = 1e-5
        error = 1
        while abs(error) >= max_error:
            error = trainer.train()

        #self.validate_net()
        f = open('neuro.net', 'w')
        pickle.dump(self.net, f)
        f.close()

    def validate_net(self):
        print self.net.activate([0, 0])
        print self.net.activate([0, 1])
        print self.net.activate([0, 2])
        print self.net.activate([1, 0])
        print self.net.activate([1, 1])
        print self.net.activate([1, 2])
コード例 #3
0
class BrainController:

    indim = 2
    outdim = 2

    def __init__(self, trained_net = None):
        if trained_net == None:
            self.net = RecurrentNetwork()
            self.init_network(self.net)
        else:
            self.net = trained_net

    def init_network(self, net):
        net.addInputModule(LinearLayer(2, 'in'))
        net.addModule(SigmoidLayer(3, 'hidden'))
        net.addOutputModule(LinearLayer(2, 'out'))
        net.addModule(BiasUnit(name='bias'))
        net.addConnection(FullConnection(net['in'], net['hidden']))
        net.addConnection(FullConnection(net['hidden'], net['out']))
        net.sortModules()

    def train(self, data):
        ds = SupervisedDataSet(2, 2)
        for i in range(0, len(data)):
            input, target = data[i]
            ds.addSample(input, target)

        trainer = BackpropTrainer(self.net, ds, learningrate=0.01, momentum=0.99,
                verbose=True)

        max_error = 1e-5
        error = 1
        while abs(error) >= max_error:
            error = trainer.train()

        #self.validate_net()
        f = open('neuro.net', 'w')
        pickle.dump(self.net, f)
        f.close()

    def validate_net(self):
        print self.net.activate([0, 0])
        print self.net.activate([0, 1])
        print self.net.activate([0, 2])
        print self.net.activate([1, 0])
        print self.net.activate([1, 1])
        print self.net.activate([1, 2])
コード例 #4
0
ファイル: networks.py プロジェクト: Boblogic07/pybrain
""" Recurrent networks are working in the same way, except that the recurrent connections
need to be explicitly declared upon construction.

We can modify our existing network 'net2' and add a recurrent connection on the hidden layer: """

n2.addRecurrentConnection(FullConnection(n2['h'], n2['h'], name='rec'))

""" After every structural modification, if we want ot use the network, we call 'sortModules()' again"""

n2.sortModules()
print n2

""" As the network is now recurrent, successive activations produce different outputs: """

print n2.activate([1, 2]),
print n2.activate([1, 2]),
print n2.activate([1, 2])

""" The 'reset()' method re-initializes the network, and with it sets the recurrent
activations to zero, so now we get the same results: """

n2.reset()
print n2.activate([1, 2]),
print n2.activate([1, 2]),
print n2.activate([1, 2])

""" This is already a good coverage of the basics, but if you're an advanced user
you might want to find out about the possibilities of nesting networks within
others, using weight-sharing, and more exotic types of networks, connections
and modules... but that goes beyond the scope of this tutorial.
コード例 #5
0
train_set, test_set = DS.splitWithProportion(0.7)

# build our recurrent network with 10 hidden neurodes, one recurrent
# connection, using tanh activation functions
net = RecurrentNetwork()
hidden_neurodes = 10
net.addInputModule(LinearLayer(len(train_set["input"][0]), name="in"))
net.addModule(TanhLayer(hidden_neurodes, name="hidden1"))
net.addOutputModule(LinearLayer(len(train_set["target"][0]), name="out"))
net.addConnection(FullConnection(net["in"], net["hidden1"], name="c1"))
net.addConnection(FullConnection(net["hidden1"], net["out"], name="c2"))
net.addRecurrentConnection(FullConnection(net["out"], net["hidden1"], name="cout"))
net.sortModules()
net.randomize()

# train for 30 epochs (overkill) using the rprop- training algorithm
trainer = RPropMinusTrainer(net, dataset=train_set, verbose=True)
trainer.trainOnDataset(train_set, 30)

# test on training set
predictions_train = np.array([net.activate(train_set["input"][i])[0] for i in xrange(len(train_set))])
plt.plot(train_set["target"], c="k")
plt.plot(predictions_train, c="r")
plt.show()

# and on test set
predictions_test = np.array([net.activate(test_set["input"][i])[0] for i in xrange(len(test_set))])
plt.plot(test_set["target"], c="k")
plt.plot(predictions_test, c="r")
plt.show()
コード例 #6
0
ファイル: languagelearner.py プロジェクト: sl/babble
class LanguageLearner:

	__OUTPUT = "Sample at {0} epochs (prompt=\"{1}\", length={2}): {3}"

	def __init__(self, trainingText, hiddenLayers, hiddenNodes):
		self.__initialized = False
		with open(trainingText) as f:
			self.raw = f.read()
		self.characters = list(self.raw)
		self.rawData = list(map(ord, self.characters))
		print("Creating alphabet mapping...")
		self.mapping = []
		for charCode in self.rawData:
			if charCode not in self.mapping:
				self.mapping.append(charCode)
		print("Mapping of " + str(len(self.mapping)) + " created.")
		print(str(self.mapping))
		print("Converting data to mapping...")
		self.data = []
		for charCode in self.rawData:
			self.data.append(self.mapping.index(charCode))
		print("Done.")
		self.dataIn = self.data[:-1:]
		self.dataOut = self.data[1::]
		self.inputs = 1
		self.hiddenLayers = hiddenLayers
		self.hiddenNodes = hiddenNodes
		self.outputs = 1

	def initialize(self, verbose):
		print("Initializing language learner...")
		self.verbose = verbose

		# Create network and modules
		self.net = RecurrentNetwork()
		inp = LinearLayer(self.inputs, name="in")
		hiddenModules = []
		for i in range(0, self.hiddenLayers):
			hiddenModules.append(LSTMLayer(self.hiddenNodes, name=("hidden-" + str(i + 1))))
		outp = LinearLayer(self.outputs, name="out")

		# Add modules to the network with recurrence
		self.net.addOutputModule(outp)
		self.net.addInputModule(inp)
		
		for module in hiddenModules:
			self.net.addModule(module)

		# Create connections

		self.net.addConnection(FullConnection(self.net["in"], self.net["hidden-1"]))
		for i in range(0, len(hiddenModules) - 1):
			self.net.addConnection(FullConnection(self.net["hidden-" + str(i + 1)], self.net["hidden-" + str(i + 2)]))
			self.net.addRecurrentConnection(FullConnection(self.net["hidden-" + str(i + 1)], self.net["hidden-" + str(i + 1)]))
		self.net.addRecurrentConnection(FullConnection(self.net["hidden-" + str(len(hiddenModules))],
			self.net["hidden-" + str(len(hiddenModules))]))
		self.net.addConnection(FullConnection(self.net["hidden-" + str(len(hiddenModules))], self.net["out"]))
		self.net.sortModules()

		self.trainingSet = SequentialDataSet(self.inputs, self.outputs)
		for x, y in zip(self.dataIn, self.dataOut):
			self.trainingSet.newSequence()
			self.trainingSet.appendLinked([x], [y])

		self.net.randomize()

		print("Neural network initialzed with structure:")
		print(self.net)

		self.trainer = BackpropTrainer(self.net, self.trainingSet, verbose=verbose)
		self.__initialized = True
		print("Successfully initialized network.")

	def train(self, epochs, frequency, prompt, length):
		if not self.__initialized:
			raise Exception("Attempted to train uninitialized LanguageLearner")
		print ("Beginning training for " + str(epochs) + " epochs...")
		if frequency >= 0:
			print(LanguageLearner.__OUTPUT.format(0, prompt, length, self.sample(prompt, length)))
		for i in range(1, epochs):
			print("Error at " + str(i) + " epochs: " + str(self.trainer.train()))
			if i % frequency == 0:
				print(LanguageLearner.__OUTPUT.format(i, prompt, length, self.sample(prompt, length)))
		print("Completed training.")

	def sample(self, prompt, length):
		self.net.reset()
		if prompt == None:
			prompt = chr(random.choice(self.mapping))
		output = prompt
		charCode = ord(prompt)
		for i in range(0, length):
			sampledResult = self.net.activate([charCode])
			charCode = int(round(sampledResult[0]))
			if charCode < 0 or charCode >= len(self.mapping):
				return output + "#TERMINATED_SAMPLE(reason: learner guessed invalid character)"
			output += chr(self.mapping[charCode])
		return output
コード例 #7
0
ファイル: tst.py プロジェクト: ElDonClaudio/NNET
hiddenLayerB = SigmoidLayer(hiddenVector, name='hiddenLayerB')
outputLayer = LinearLayer(outputVector, name='outputLayer')

n.addInputModule(inLayer)
n.addModule(hiddenLayerA)
n.addModule(hiddenLayerB)
n.addOutputModule(outputLayer)

n.addConnection(FullConnection(n['inputLayer'], n['hiddenLayerA'], name='c1'))
n.addConnection(FullConnection(n['hiddenLayerA'], n['hiddenLayerB'], name='c2'))
n.addConnection(FullConnection(n['hiddenLayerB'], n['outputLayer'], name='c3'))

n.addRecurrentConnection(FullConnection(n['hiddenLayerA'], n['hiddenLayerB'], name='rec3'))

n.sortModules()
print 'Network One (Recurrent)' + str(n.activate([1,2,3]))
print 'Network One (Recurrent)' + str(n.activate([1,2,3]))

####
#FEED FORWARD NETWORK
####

n2 = FeedForwardNetwork()

inLayer2 = LinearLayer(inputVector, name='inputLayer')
hiddenLayerA2 = SigmoidLayer(hiddenVector, name='hiddenLayerA')
hiddenLayerB2 = SigmoidLayer(hiddenVector, name='hiddenLayerB')
outputLayer2 = LinearLayer(outputVector, name='outputLayer')

n2.addInputModule(inLayer)
n2.addModule(hiddenLayerA)
コード例 #8
0
ファイル: Network.py プロジェクト: Oregand/4THYEARPROJECT
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)

n.sortModules()


r.addInputModule(LinearLayer(2, name='in'))
r.addModule(SigmoidLayer(3, name='hidden'))
r.addOutputModule(LinearLayer(1, name='out'))
r.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
r.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

r.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))

r.sortModules()

#Show trainable weights
print "These are the trainable weights"
print in_to_hidden.params
print hidden_to_out.params

#Test Prints
print n.activate([1, 2])
print n

print ""

print r.activate((2, 2))
print r
コード例 #9
0
# build our recurrent network with 10 hidden neurodes, one recurrent
# connection, using tanh activation functions
net = RecurrentNetwork()
hidden_neurodes = 10
net.addInputModule(LinearLayer(len(train_set["input"][0]), name="in"))
net.addModule(TanhLayer(hidden_neurodes, name="hidden1"))
net.addOutputModule(LinearLayer(len(train_set["target"][0]), name="out"))
net.addConnection(FullConnection(net["in"], net["hidden1"], name="c1"))
net.addConnection(FullConnection(net["hidden1"], net["out"], name="c2"))
net.addRecurrentConnection(
    FullConnection(net["out"], net["hidden1"], name="cout"))
net.sortModules()
net.randomize()

# train for 30 epochs (overkill) using the rprop- training algorithm
trainer = RPropMinusTrainer(net, dataset=train_set, verbose=True)
trainer.trainOnDataset(train_set, 30)

# test on training set
predictions_train = np.array(
    [net.activate(train_set["input"][i])[0] for i in xrange(len(train_set))])
plt.plot(train_set["target"], c="k")
plt.plot(predictions_train, c="r")
plt.show()

# and on test set
predictions_test = np.array(
    [net.activate(test_set["input"][i])[0] for i in xrange(len(test_set))])
plt.plot(test_set["target"], c="k")
plt.plot(predictions_test, c="r")
plt.show()
コード例 #10
0
more fancy than a stack of fully connected layers: """

n3 = buildNetwork(2, 3, 1, bias=False)
""" Recurrent networks are working in the same way, except that the recurrent connections
need to be explicitly declared upon construction. 

We can modify our existing network 'net2' and add a recurrent connection on the hidden layer: """

n2.addRecurrentConnection(FullConnection(n2['h'], n2['h'], name='rec'))
""" After every structural modification, if we want ot use the network, we call 'sortModules()' again"""

n2.sortModules()
print n2
""" As the network is now recurrent, successive activations produce different outputs: """

print n2.activate([1, 2]),
print n2.activate([1, 2]),
print n2.activate([1, 2])
""" The 'reset()' method re-initializes the network, and with it sets the recurrent 
activations to zero, so now we get the same results: """

n2.reset()
print n2.activate([1, 2]),
print n2.activate([1, 2]),
print n2.activate([1, 2])
""" This is already a good coverage of the basics, but if you're an advanced user
and would like to know more about the possibilities of nesting networks within
others, using weight-sharing, and more exotic types of networks, connections 
and modules, then please read on.

 
コード例 #11
0
  if tstresult <= 0.5 :
       print('Bingo !!!!!!!!!!!!!!!!!!!!!!')
       break

  # export network
  NetworkWriter.writeToFile(net, 'signal_weight.xml')

# run test
actual_price = np.array([n[3] for n in testing_input])
predict_short = []
predict_long = []
result_long = []
result_short = []

for i, (x, y) in enumerate(zip(testing_input, testing_output)):
  z = net.activate(x)
  predict_short.append(z[0])
  predict_long.append(z[1])
  result_long.append(abs(testing_output[0] - z[0]))
  result_short.append(abs(testing_output[1] - z[1]))

predict_short = np.asarray(predict_short)
predict_long = np.asarray(predict_long)
short_up_idxs = predict_short > 0
short_down_idxs = predict_short < 0
long_up_idxs = predict_long > 0
long_down_idxs = predict_long < 0

# print test
def normalize_result(data):
  max_v = np.max(data)
コード例 #12
0
#!/usr/bin/env python

from pybrain.structure import RecurrentNetwork
from pybrain.structure import LinearLayer, SigmoidLayer
from pybrain.structure import FullConnection


n = RecurrentNetwork()

n.addInputModule(LinearLayer(2, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='con1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='con2'))

#The RecurrentNetwork class has one additional method, .addRecurrentConnection(), which looks back in time one timestep. We can add one from the hidden to the hidden layer:
n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='con3'))

n.sortModules()


print n.activate((2, 2))
print n.activate((2, 2))
print n.activate((2, 2))


n.reset()
print n.activate((2, 2))
print n.activate((2, 2))
print n.activate((2, 2))
コード例 #13
0
ファイル: NN.py プロジェクト: cagatay/Evolution-9
class neural_network(object):
    def __init__(self, name, dataset, trained, store):
        self.name = name
        self.store = store
        self.trained = trained
        self.dataset = dataset

        self.net = RecurrentNetwork()
        self.net.addInputModule(LinearLayer(2, name='in'))
        self.net.addModule(SigmoidLayer(3, name='hidden'))
        self.net.addOutputModule(LinearLayer(2, name='out'))
        self.net.addConnection(FullConnection(self.net['in'], self.net['out'], name='c1'))
        self.net.addConnection(FullConnection(self.net['hidden'], self.net['out'], name='c2'))
        self.net.addRecurrentConnection(FullConnection(self.net['hidden'], self.net['hidden'], name='c3'))
        self.net.sortModules()
        '''
        self.net = buildNetwork(2, 3, 2)
        '''
        if not self.trained:
            self.train()

        return

    def save(self):
        self.store.save_neural_network(self.name, self.dataset, self.trained)
        return

    @classmethod
    def get_saved(cls, name, store):
        result = store.get_neural_network(name)

        return cls(name, result[0], result[1], store) if result else None

    @classmethod
    def get_list(cls, store):
        result = store.get_neural_network_list()
        print result
        return [x for x in result]

    @classmethod
    def new(cls, name, store, ds_file_uri):
        dataset = rttl.dataset_from_file(ds_file_uri)

        store.new_neural_network(name, dataset)
        return

    def evaluate(self, genome):
        err = 0.0
        for i in range(len(genome) - 1):
            print '---------- input ------------'
            print genome[i]
            output = self.net.activate(genome[i])
            print '--------- output ------------'
            print output
            target = genome[i + 1]
            err += (math.fabs(output[0] - target[0]) + math.fabs(output[1] - target[1]))

        return 1/err

    def train(self):
        ds_store = []
        for song in self.dataset:
            ds_in = song[:len(song) - 1]
            ds_out = song[1:]

            ds = SupervisedDataSet(2, 2)

            for i in range(len(song) -1):
                #if ds_in[i] not in ds_store:
                ds.addSample(ds_in[i], ds_out[i])
                ds_store.append(ds_in[i])

            if len(ds):
                trainer = BackpropTrainer(self.net, ds, verbose=True)
                trainer.trainUntilConvergence() 
        self.save()
コード例 #14
0
if __name__ == "__main__":

    from pybrain.structure import RecurrentNetwork
    from pybrain.structure import LinearLayer
    from pybrain.structure import SigmoidLayer
    from pybrain.structure import FullConnection

    net = RecurrentNetwork()

    net.addInputModule(LinearLayer(2, "in"))
    net.addModule(SigmoidLayer(3, "hidden"))
    net.addOutputModule(LinearLayer(1, "out"))

    net.addConnection(FullConnection(net["in"], net["hidden"], "c1"))
    net.addConnection(FullConnection(net["hidden"], net["out"], "c2"))
    net.addRecurrentConnection(FullConnection(net["hidden"], net["hidden"], "c3-recurrent"))

    net.sortModules()

    print net

    for i in xrange(5):
        print net.activate([2, 2])

    print "reset"
    net.reset()

    for i in xrange(5):
        print net.activate([2, 2])
コード例 #15
0
from pybrain.structure import RecurrentNetwork, LinearLayer, SigmoidLayer
from pybrain.structure import FullConnection

n           = RecurrentNetwork()
inLayer     = LinearLayer(2, name="Input")
hiddenLayer = SigmoidLayer(3, name="Hidden")
outLayer    = LinearLayer(1, name="Output")

n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)

n.addConnection(FullConnection(inLayer, hiddenLayer, name="C_IH"))
n.addConnection(FullConnection(hiddenLayer, outLayer, name="C_HO"))
n.addRecurrentConnection(FullConnection(n['Hidden'], n['Hidden'], name='C_HH'))

n.sortModules()
n.reset()
print n.activate((2,2))

コード例 #16
0
ファイル: Network.py プロジェクト: Oregand/4THYEARPROJECT
hidden_to_out = FullConnection(hiddenLayer, outLayer)

n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)

n.sortModules()

r.addInputModule(LinearLayer(2, name='in'))
r.addModule(SigmoidLayer(3, name='hidden'))
r.addOutputModule(LinearLayer(1, name='out'))
r.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
r.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

r.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))

r.sortModules()

#Show trainable weights
print "These are the trainable weights"
print in_to_hidden.params
print hidden_to_out.params

#Test Prints
print n.activate([1, 2])
print n

print ""

print r.activate((2, 2))
print r
コード例 #17
0
ファイル: neural_network.py プロジェクト: ammeyjohn/rubbish
ds.endOfData()

# Create bp trainer
trainer = BackpropTrainer(net, ds)

# Trains the datasets
print 'Training ...'
epoch = 1000
error = 1.0
while error > delta_error and epoch >= 0:
    error = trainer.train()
    epoch -= 1
    print 'Epoch = %d, Error = %f' % (epoch, error)

# To store the epoch error
err_array = []

for row in df.itertuples(index=False):
    result = net.activate(row[0:columns-2])
    expect = row[columns-2]
    error = abs(expect - result)
    err_array.append(error)
    print 'Result = %f, Expect = %f, Error = %f' % (result, expect, error)

err_df = pd.DataFrame(err_array)
err_df.plot()
plt.show()

print 'Sum Error = %f' % err_df.sum(axis=0)
コード例 #18
0
                    for i in range(Xtr.shape[0]):
                        ds.addSample(Xtr[i,:],Ytr[i])

                    for i in range(Xte.shape[0]):
                        dst.addSample(Xte[i,:],Yte[i])

                    #net = buildNetwork(ds.indim,ds.indim,ds.indim,ds.indim,ds.outdim,recurrent=False)
                    trainer = BackpropTrainer(net,learningrate=learnRate,momentum=moment,verbose=False)
                    #trainer.trainOnDataset(ds,30)
                    trainer.trainUntilConvergence(ds,10)

                    #trainer.testOnData(verbose=True)

                    mse = 0.0
                    for i in range(Xte.shape[0]):
                        mse += pow(net.activate(Xte[i])[0]-Yte[i],2)
                    mse /= Xte.shape[0]
                    mseTrain = 0.0
                    for i in range(Xtr.shape[0]):
                        mseTrain += pow(net.activate(Xtr[i])[0]-Ytr[i],2)
                    mseTrain /= Xtr.shape[0]
                    print 'mse(test):{},mse(train):{},epoch:{},width:{},depth:{},momentum:{},learnrate:{}'.format(mse,mseTrain,epochs,hidw,depth,moment,learnRate)
                    testdat.write('{},{},{},{},{},{},{}\n'.format(mse,mseTrain,epochs,hidw,depth,learnRate,moment))

testdat.close()
#modelfile = open('model.dat','w')
#pickle.dump(net,modelfile)
#modelfile.close()

#fh = open('predictions.csv','w')    # open file for upload
#fh.write('ID,Prediction\n')         # output header line
コード例 #19
0
from pybrain.structure import RecurrentNetwork
from pybrain.structure import LinearLayer, SigmoidLayer
from pybrain.structure import FullConnection

n = RecurrentNetwork()

n.addInputModule(LinearLayer(2, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))

n.sortModules()
n.reset()
print(n.activate((2, 2)))
コード例 #20
0
ファイル: musicnetwork.py プロジェクト: ml-lab/Bach_AI
    net.addConnection(FullConnection(net[('hidden' + str(x))], net['hidden' + str(x + 1)], name=('c' + str(x + 1))))
net.addConnection(FullConnection(net['hidden' + str(layerCount - 1)], net['out'], name='cOut'))
net.sortModules()
from pybrain.supervised import RPropMinusTrainer
trainer = RPropMinusTrainer(net, dataset=ds)

epochcount = 0
while True:
    startingnote = random.choice(range(1, 17))
    startingnote2 = random.choice(range(1, 17))
    startingduration = random.choice(range(1,17))
    startingduration2 = random.choice(range(1, 17))
    song = [[startingnote, startingduration, 1, 1, 0, startingnote2, startingduration2, 1, 1, 0]]
    length = 50
    while len(song) < length:
        song.append(net.activate(song[-1]).tolist())
    newsong = []
    for x in song:
        newx = []
        newy = []
        for i in x:
            if len(newx) < 5:
                newx.append(int(i))
            else:
                newy.append(int(i))
        newsong.append(newx)
        newsong.append(newy)

    print newsong
    print "The above song is after " + str(epochcount) + " epochs."
    trainer.trainEpochs(epochs=1)
コード例 #21
0
ファイル: xor.py プロジェクト: radut/aja
ds = SupervisedDataSet(2,1)


ds.addSample([1,1],[0])
ds.addSample([0,0],[0])
ds.addSample([0,1],[1])
ds.addSample([1,0],[1])

#Train the network
trainer = BackpropTrainer(network, ds, momentum=0.99)

print network

print "\nInitial weights: ", network.params

max_error = 1e-7
error, count = 1, 1000
#Train
while abs(error) >= max_error and count > 0:
    error = trainer.train()
    count = count - 1

print "Final weights: ", network.params
print "Error: ", error

#Test data
print '\n1 XOR 1:',network.activate([1,1])[0]
print '1 XOR 0:',network.activate([1,0])[0]


コード例 #22
0
from pybrain.supervised import RPropMinusTrainer
trainer = RPropMinusTrainer(net, dataset=ds)

epochcount = 0
while True:
    startingnote = random.choice(range(1, 17))
    startingnote2 = random.choice(range(1, 17))
    startingduration = random.choice(range(1, 17))
    startingduration2 = random.choice(range(1, 17))
    song = [[
        startingnote, startingduration, 1, 1, 0, startingnote2,
        startingduration2, 1, 1, 0
    ]]
    length = 50
    while len(song) < length:
        song.append(net.activate(song[-1]).tolist())
    newsong = []
    for x in song:
        newx = []
        newy = []
        for i in x:
            if len(newx) < 5:
                newx.append(int(i))
            else:
                newy.append(int(i))
        newsong.append(newx)
        newsong.append(newy)

    print newsong
    print "The above song is after " + str(epochcount) + " epochs."
    trainer.trainEpochs(epochs=1)
コード例 #23
0
ファイル: network.py プロジェクト: sukki89/Word-Prediction
from pybrain.structure import FullConnection

#get no of words from corpus and store in numInputNodes and numOutputNodes
numInputNodes = 10
numHiddenNodes = 5
numOutputNodes = 10

#Creating a recurrent network with 1 input node, 1 output node and 10 hidden nodes
network = RecurrentNetwork()
network.addInputModule(LinearLayer(numInputNodes, name='in'))
network.addModule(SigmoidLayer(numHiddenNodes, name='hidden'))
network.addOutputModule(LinearLayer(numOutputNodes, name='out'))
in_to_hidden = FullConnection(network['in'], network['hidden'], name='connection1')
hidden_to_out = FullConnection(network['hidden'], network['out'], name='connection2')
hidden_to_hidden = FullConnection(network['hidden'], network['hidden'], name='connection3')
network.addConnection(in_to_hidden)
network.addConnection(hidden_to_out)
network.addRecurrentConnection(hidden_to_hidden)
network.sortModules()
print network.activate([1,0,0,0,0,0,0,0,0,0])
'''
print in_to_hidden.params
print "\n\n"
print hidden_to_out.params
print "\n\n"
print hidden_to_hidden.params
print "\n\n"
'''

#Input value initially: 1 of n coding of word + previous state s(t-1)
コード例 #24
0
ファイル: QimbWorkbook.py プロジェクト: bilykigor/qimb
#Train net
from pybrain.supervised.trainers import BackpropTrainer
trainer = BackpropTrainer(net, ds, momentum=0.1, verbose=True, weightdecay=0.01)

#for i in range(10):
#    if i%20==0:
#        print i
#    trainer.trainEpochs(1)
    
trnerr,testerr = trainer.trainUntilConvergence(dataset=ds,maxEpochs=10)
plt.plot(trnerr,'b',valerr,'r')

# <codecell>

print net.activate(Xtest.ix[1,:])
print ytest.ix[1,:]

# <codecell>

to_hidden=numpy.dot(in_to_hidden.params.reshape(hiddenLayer.dim,inLayer.dim),Xtest.ix[0,:].as_matrix())

# <codecell>

to_out=hiddenLayer.activate(to_hidden)

# <codecell>

in_to_hidden.params.reshape(hiddenLayer.dim,inLayer.dim)

# <codecell>
コード例 #25
0
net.addConnection(conn_in_to_hid)
net.addConnection(conn_hid_to_out)
net.addRecurrentConnection(recurrent_connection)

net.sortModules()

# Since our preprocessor_engine does stuff and writes output to output.txt,
# neural_engine takes its input from it
input_file = open('output.txt', 'r')

# each line in output.txt is a preprocessed token.
# Read line by line and remove endline character
input_tokens = input_file.readlines()
input_tokens = [t.strip() for t in input_tokens]
input_file.close()

# for each token, convert it to vector of dimension equal to our vectorizer_engine's dimension
# send that vector to neural network
# output of the network at every time step will be a vector of dimension equal to our vectorizer_engine's dimension
# convert that vector to word using our vectorizer engine and write it to file 'summary-output.txt'
summary_file = open('summary-text.txt', 'w+')
for word in input_tokens:
    input_vec = vec_engine.word2vec(word)
    output_vec = net.activate(input_vec)
    output_word = vec_engine.vec2word(output_vec)
    summary_file.write(output_word + " ")

summary_file.flush()
summary_file.close()
コード例 #26
0
ファイル: xor.py プロジェクト: Ppfox/FGA-IA2015
max_error = 1
error, epocas = 5, 1000
epocasPercorridas = 0

# Train
while epocas > 0:
    error = trainer.train()
    epocas = epocas - 1
    epocasPercorridas = epocasPercorridas + 1

    if error == 0:
        break

# print "\n Treinando ate a convergencia. . ."

# trainer.trainUntilConvergence()

# print "\n\nRNA treinada ate a convergencia!"

print "\n\nPesos finais: ", network.params
print "\nErro final: ", error

print "\n\nTotal de epocas percorridas: ", epocasPercorridas

# Test data

print '\n\n1 XOR 1: Esperado = 0, Calculado = ', network.activate([1, 1])[0]
print '1 XOR 0: Esperado = 1, Calculado =', network.activate([1, 0])[0]
print '0 XOR 1: Esperado = 1, Calculado =', network.activate([0, 1])[0]
print '0 XOR 0: Esperado = 0, Calculado =', network.activate([0, 0])[0]
コード例 #27
0
ファイル: networks.py プロジェクト: Angeliqe/pybrain
""" Recurrent networks are working in the same way, except that the recurrent connections
need to be explicitly declared upon construction.

We can modify our existing network 'net2' and add a recurrent connection on the hidden layer: """

n2.addRecurrentConnection(FullConnection(n2['h'], n2['h'], name='rec'))

""" After every structural modification, if we want ot use the network, we call 'sortModules()' again"""

n2.sortModules()
print(n2)

""" As the network is now recurrent, successive activations produce different outputs: """

print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]))

""" The 'reset()' method re-initializes the network, and with it sets the recurrent
activations to zero, so now we get the same results: """

n2.reset()
print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]))

""" This is already a good coverage of the basics, but if you're an advanced user
you might want to find out about the possibilities of nesting networks within
others, using weight-sharing, and more exotic types of networks, connections
and modules... but that goes beyond the scope of this tutorial.
コード例 #28
0
""" Recurrent networks are working in the same way, except that the recurrent connections
need to be explicitly declared upon construction.

We can modify our existing network 'net2' and add a recurrent connection on the hidden layer: """

n2.addRecurrentConnection(FullConnection(n2['h'], n2['h'], name='rec'))

""" After every structural modification, if we want ot use the network, we call 'sortModules()' again"""

n2.sortModules()
print(n2)

""" As the network is now recurrent, successive activations produce different outputs: """

print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]))

""" The 'reset()' method re-initializes the network, and with it sets the recurrent
activations to zero, so now we get the same results: """

n2.reset()
print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]))

""" This is already a good coverage of the basics, but if you're an advanced user
you might want to find out about the possibilities of nesting networks within
others, using weight-sharing, and more exotic types of networks, connections
and modules... but that goes beyond the scope of this tutorial.
コード例 #29
0
if __name__ == "__main__":

    from pybrain.structure import RecurrentNetwork
    from pybrain.structure import LinearLayer
    from pybrain.structure import SigmoidLayer
    from pybrain.structure import FullConnection

    net = RecurrentNetwork()

    net.addInputModule(LinearLayer(2, "in"))
    net.addModule(SigmoidLayer(3, "hidden"))
    net.addOutputModule(LinearLayer(1, "out"))

    net.addConnection(FullConnection(net["in"], net["hidden"], "c1"))
    net.addConnection(FullConnection(net["hidden"], net["out"], "c2"))
    net.addRecurrentConnection(FullConnection(net["hidden"], net["hidden"],
        "c3-recurrent"))

    net.sortModules()

    print net

    for i in xrange(5):
        print net.activate([2, 2])

    print "reset"
    net.reset()

    for i in xrange(5):
        print net.activate([2, 2])
コード例 #30
0
            final_delta_delta_mfcc = np.concatenate(
                (final_delta_delta_mfcc, [temp_delta_delta_mfcc]), axis=0)
            final_dataset = np.concatenate((final_dataset, [temp_dataset]),
                                           axis=0)

    dataset_op = 0
    dataset_ip = 0
    ds.clear()
    parameters = np.load('parameters.npy')
    LSTMre._setParameters(parameters)

    output = np.empty((100, 5))
    #print(LSTMre.params)
    print('____________>output')
    for i in range(100):
        output[i] = LSTMre.activate(final_dataset[i])
    final_op = np.mean(output, axis=0)
    print('YOU SAID--------------->')
    state = generate_target.final_output(final_op)

    #is_it_correct = input('Is it correct?')
    '''if is_it_correct == 'n'
        is_correct = False
    else:
        is_correct = True'''

    if is_correct:
        car_send(state)
    '''do_train = input('Do you want to train? Enter --> x <-- to cancel')
        
    if do_train != 'x':
コード例 #31
0
n.addInputModule(LinearLayer(
    2,
    name='entradas'))  #agrega el modulo de 'entrada' a la red con 2 entradas
n.addModule(SigmoidLayer(3, name='ocultas')
            )  #agrega el modulo de 'oculta' a la red con 3 capas ocultas
#n.addModule(TanhLayer(3, name = 'ocultas'))#agrega el modulo de 'oculta' a la red con 3 capas ocultas
n.addOutputModule(LinearLayer(
    1, name='salidas'))  #agrega el modulo de 'salida' a la red con 1 salida

#flujo de informacion entre las capas
n.addConnection(
    FullConnection(n['entradas'], n['ocultas'], name='con1')
)  #agrega y conecta el modulo de conexion de capa 'entrada' a la red con las capas ocultas
n.addConnection(
    FullConnection(n['ocultas'], n['salidas'], name='con2')
)  #agrega y conecta el modulo de conexion de capa 'salida' a la red con las capas ocultas

n.addRecurrentConnection(
    FullConnection(n['ocultas'], n['ocultas'], name='con3'))  #metodo adicional

n.sortModules()  #ordena la red

#visualizar valores (pesos)
print n  #visualizar estructura de la red
print ''
print n.params  #todos los pesos
print ''
print n.activate([1, 2])  #prenguntar a la red la salida para cierta entrada
#n.reset()#limpiar la red