Ejemplo n.º 1
0
    def neuralnetworktrain(self):
        dataset = self.getdata()

        # Constructing a multiple output neural network.
        # Other neural network architectures will also be experimented,
        # like using different single output neural networks.
        net = FeedForwardNetwork()
        inp = LinearLayer(9)
        h1 = SigmoidLayer(20)
        h2 = TanhLayer(10)
        outp = LinearLayer(3)

        # Adding the modules to the architecture
        net.addOutputModule(outp)
        net.addInputModule(inp)
        net.addModule(h1)
        net.addModule(h2)

        # Creating the connections
        net.addConnection(FullConnection(inp, h1))
        net.addConnection(FullConnection(h1, h2))
        net.addConnection(FullConnection(h2, outp))
        net.sortModules()

        # Training the neural network using Backpropagation
        t = BackpropTrainer(net, learningrate=0.01, momentum=0.5, verbose=True)
        t.trainOnDataset(dataset, 5)
        t.testOnData(verbose=False)

        # Saving the trained neural network information to file
        self.writetrainedinfo(net)
Ejemplo n.º 2
0
	def computeModel(self, path, user):
		# Create a supervised dataset for training.
		trndata = SupervisedDataSet(24, 1)
		tstdata = SupervisedDataSet(24, 1)
		
		#Fill the dataset.
		for number in range(0,10):
			for variation in range(0,7):
				# Pass all the features as inputs.
				trndata.addSample(self.getSample(user, number, variation),(user.key,))
				
			for variation in range(7,10):
				# Pass all the features as inputs.
				tstdata.addSample(self.getSample(user, number, variation),(user.key,))
				
		# Build the LSTM.
		n = buildNetwork(24, 50, 1, hiddenclass=LSTMLayer, recurrent=True, bias=True)

		# define a training method
		trainer = BackpropTrainer(n, dataset = trndata, momentum=0.99, learningrate=0.00002)

		# carry out the training
		trainer.trainOnDataset(trndata, 2000)
		valueA = trainer.testOnData(tstdata)
		print '\tMSE -> {0:.2f}'.format(valueA)
		self.saveModel(n, '.\NeuralNets\SavedNet_%d' %(user.key))
		
		return n
Ejemplo n.º 3
0
def train_dataset1(waze_speeds, drive_speeds):
    ds = SupervisedDataSet(1, 1)

    drive_speeds = np.sort(drive_speeds)
    i = 0
    while i < len(waze_speeds):
        indata = drive_speeds[i]
        outdata = drive_speeds[i]
        print drive_speeds[i]
        ds.addSample(indata, outdata)
        i = i + len(waze_speeds) / 5

    n = buildNetwork(ds.indim, 8, 8, ds.outdim, recurrent=True)
    t = BackpropTrainer(n, learningrate=0.001, momentum=0.05, verbose=True)
    t.trainOnDataset(ds, 3000)
    t.testOnData(verbose=True)

    predicted_values = []

    predicted_values.append(n.activate(20))
    predicted_values.append(n.activate(40))

    print predicted_values

    return predicted_values
Ejemplo n.º 4
0
    def neuralnetworktrain(self):
        dataset = self.getdata()

        # Constructing a multiple output neural network.
        # Other neural network architectures will also be experimented,
        # like using different single output neural networks.
        net = FeedForwardNetwork()
        inp = LinearLayer(9)
        h1 = SigmoidLayer(20)
        h2 = TanhLayer(10)
        outp = LinearLayer(3)

        # Adding the modules to the architecture
        net.addOutputModule(outp)
        net.addInputModule(inp)
        net.addModule(h1)
        net.addModule(h2)

        # Creating the connections
        net.addConnection(FullConnection(inp, h1))
        net.addConnection(FullConnection(h1, h2))
        net.addConnection(FullConnection(h2, outp))
        net.sortModules()

        # Training the neural network using Backpropagation
        t = BackpropTrainer(net, learningrate=0.01, momentum=0.5, verbose=True)
        t.trainOnDataset(dataset, 5)
        t.testOnData(verbose=False)

        # Saving the trained neural network information to file
        self.writetrainedinfo(net)
Ejemplo n.º 5
0
def train():
    logging.basicConfig(filename="logfile.log", level=logging.DEBUG)
    accents = ['CH', 'EN', 'IN', 'IR', 'IT', 'JA', 'KO']
    net = buildNetwork(13, 3, 7, hiddenclass=TanhLayer)
    ds = SupervisedDataSet(13, 7)

    logging.info("Started training network")

    try:
        speakers = os.listdir('samples')
        for speaker in speakers:
            samples = glob('samples/{}/part*'.format(speaker))
            for sample in samples:
                s = json.load(open(sample))
                out = [0, 0, 0, 0, 0, 0, 0]
                out[accents.index(s['accent'])] = 1

                for c in s['ceps']:
                    ds.addSample(c, out)

        trainer = BackpropTrainer(net)
        trainer.trainOnDataset(ds, 10000)
        trainer.testOnData(ds, verbose=True)

        NetworkWriter.writeToFile(net, 'models/model-{}.xml'.format(int(time())))
    except Exception as e:
        msg = "Something bad happened: {}".format(e.message)
        print msg
        logging.error(msg)
Ejemplo n.º 6
0
def simpleNeuralNetworkTrain(fileName, numFeatures, numClasses, possibleOutputs, numHiddenNodes, numTrainingEpochs):

    data = np.genfromtxt(fileName)
    trnIn = data[:, 0:5]
    trnOut = data[:, 6]
    trnOut = [int(val) for val in trnOut]

    normalizeData(trnIn, numFeatures)
    trndata = ClassificationDataSet(numFeatures, possibleOutputs, nb_classes=numClasses)
    for row in range(0, len(trnIn)):
        tempListOut = []
        tempListIn = []
        tempListOut.append(int(trnOut[row]))
        for i in range(0, numFeatures):
            tempListIn.append(trnIn[row][i])
        trndata.addSample(tempListIn, tempListOut)

    trndata._convertToOneOfMany()

    #  When running for the first time
    myNetwork = buildNetwork(numFeatures, numHiddenNodes, numClasses, outclass=SoftmaxLayer, bias=True, recurrent=False)

    # Read from file after the first try.
    #  myNetwork = NetworkReader.readFrom('firstTime.xml')    # Use saved results.
    trainer = BackpropTrainer(myNetwork, dataset=trndata, momentum=0.0, verbose=True, weightdecay=0.0)
    for i in range(numTrainingEpochs):
        trainer.trainOnDataset(dataset=trndata)
Ejemplo n.º 7
0
def train_net(data_set, n, epochs=1):
    num_inputs = len(data_set[0][0][n])
    ds = SupervisedDataSet(num_inputs, 2)
    for i in range(len(data_set)):
        try:
            ds.appendLinked(data_set[i][0][n],
                            (data_set[i][1], data_set[i][2]))
        except:
            continue
    print str(len(ds)) + ' points successfully aquired'

    net = FeedForwardNetwork()
    net.addInputModule(LinearLayer(num_inputs, name='input'))
    net.addInputModule(BiasUnit(name='bias'))
    net.addOutputModule(LinearLayer(2, name='output'))
    net.addModule(SigmoidLayer(int((num_inputs + 2) / 2.), name='sigmoid'))
    net.addModule(TanhLayer(10, name='tanh'))
    net.addConnection(FullConnection(net['bias'], net['sigmoid']))
    net.addConnection(FullConnection(net['bias'], net['tanh']))
    net.addConnection(FullConnection(net['input'], net['sigmoid']))
    net.addConnection(FullConnection(net['sigmoid'], net['tanh']))
    net.addConnection(FullConnection(net['tanh'], net['output']))
    net.sortModules()

    trainer = BackpropTrainer(net,
                              learningrate=0.01,
                              momentum=0.1,
                              verbose=True)

    trainer.trainOnDataset(ds)
    trainer.trainEpochs(epochs)

    return net
Ejemplo n.º 8
0
    def buildBMTrainer(self):

        x, y = self.readexcel()

        per = int(len(x))
        # 对数据进行归一化处理(一般来说使用Sigmoid时一定要归一化)
        sx = MinMaxScaler()
        sy = MinMaxScaler()
        xTrain = x[:per]
        xTrain = sx.fit_transform(xTrain)
        yTrain = y[:per]
        yTrain = sy.fit_transform(yTrain)

        # 初始化前馈神经网络

        DS = SupervisedDataSet(x.shape[1], self.rescol)
        for i in range(len(x)):
            DS.addSample(x[i], y[i])
        self.__fnn = buildNetwork(DS.indim,28,28,DS.outdim,recurrent=True)


        # 采用BP进行训练,训练至收敛,最大训练次数为1000
        trainer = BackpropTrainer(self.__fnn,  learningrate=0.01, verbose=self.verbose)
        trainer.trainOnDataset(DS,1000)
        trainer.testOnData(verbose=True)
        # trainer.
        # trainingErrors = trainer.trainUntilConvergence(maxEpochs=10000,continueEpochs=2000, validationProportion=0.5)
        # for i in range(10):
        #     trainingErrors = trainer.trainUntilConvergence(maxEpochs=15000, validationProportion=0)
        #     print '测试:'+str(trainingErrors[0][-2])
        # self.finalError = trainingErrors[0][-2]
        # if (self.verbose):
        #     print '最后总体容差:', self.finalError
        self.__sy = sy
Ejemplo n.º 9
0
def entrenarSonoliento(red):
    #Se inicializa el dataset
    ds = SupervisedDataSet(4096,1)

    """Se crea el dataset, para ello procesamos cada una de las imagenes obteniendo los rostros,
       luego se le asignan los valores deseados del resultado la red neuronal."""


    for i,c in enumerate(os.listdir(os.path.dirname(path + '/static/img/sleepy/'))):
        a = 0
        while a < 50:
            try:
                a += 1 
                im3 = cv2.imread(path + '/static/img/sleepy/'+c)
                procesado = p.procesarImagen(im3)
                cara = d.deteccionFacial1(procesado)
                ds.addSample(cara.flatten(),10)
            except:
                pass
            

    trainer = BackpropTrainer(red, ds)
    print "Entrenando hasta converger"
    trainer.trainOnDataset(ds,100)
    NetworkWriter.writeToFile(red, 'rna_somnolencia.xml')



#para entrenar operadores manualmente
#red_operador = NetworkReader.readFrom('rna_operador.xml')
#entrenarOperador(red_operador)

#para entrenar somnolencia manualmente
#red_somno = NetworkReader.readFrom('rna_somnolencia.xml')
#entrenarSonoliento(red_somno)
Ejemplo n.º 10
0
def anntrain(xdata,ydata):#,epochs):
    #print len(xdata[0])
    ds=SupervisedDataSet(len(xdata[0]),1)
    #ds=ClassificationDataSet(len(xdata[0]),1, nb_classes=2)
    for i,algo in enumerate (xdata):
        ds.addSample(algo,ydata[i])
    #ds._convertToOneOfMany( ) esto no
    net= FeedForwardNetwork()
    inp=LinearLayer(len(xdata[0]))
    h1=SigmoidLayer(1)
    outp=LinearLayer(1)
    net.addOutputModule(outp) 
    net.addInputModule(inp) 
    net.addModule(h1)
    #net=buildNetwork(len(xdata[0]),1,1,hiddenclass=TanhLayer,outclass=SoftmaxLayer)
    
    net.addConnection(FullConnection(inp, h1))  
    net.addConnection(FullConnection(h1, outp))

    net.sortModules()

    trainer=BackpropTrainer(net,ds)#, verbose=True)#dataset=ds,verbose=True)
    #trainer.trainEpochs(40)
    trainer.trainOnDataset(ds,40) 
    #trainer.trainUntilConvergence(ds, 20, verbose=True, validationProportion=0.15)
    trainer.testOnData()#verbose=True)
    #print 'Final weights:',net.params
    return net
def train_neural_network():
    start = time.clock()
    ds = get_ds()

    # split main data to train and test parts
    train, test = ds.splitWithProportion(0.75)

    # build nn with 10 inputs, 3 hidden layers, 1 output neuron
    net = buildNetwork(10,3,1, bias=True)

    # use backpropagation algorithm
    trainer = BackpropTrainer(net, train, momentum = 0.1, weightdecay = 0.01)

    # plot error
    trnError, valError = trainer.trainUntilConvergence(dataset = train, maxEpochs = 50)

    plot_error(trnError, valError)

    print "train the model..."
    trainer.trainOnDataset(train, 500)
    print "Total epochs: %s" % trainer.totalepochs

    print "activate..."
    out = net.activateOnDataset(test).argmax(axis = 1)
    percent = 100 - percentError(out, test['target'])
    print "%s" % percent

    end = time.clock()
    print "Time: %s" % str(end-start)
Ejemplo n.º 12
0
def train_callback():
    trainer = BackpropTrainer(net,
                              learningrate=0.01,
                              momentum=0.0,
                              verbose=True)
    print 'MSE before', trainer.testOnData(ds, verbose=True)
    trainer.trainOnDataset(ds, 2000)
    print 'MSE after', trainer.testOnData(ds, verbose=True)
Ejemplo n.º 13
0
def update_nn(net, inputs, outputs, learningrate=0.001, momentum=0.99):
    ds = SupervisedDataSet(3, 3)
    ds.addSample(list(inputs), outputs)

    train = BackpropTrainer(net,
                            ds,
                            learningrate=learningrate,
                            momentum=momentum)
    train.trainOnDataset(ds)
    return net
Ejemplo n.º 14
0
def nn(tx, ty, rx, ry, iterations):
    network = buildNetwork(14, 5, 5, 1)
    ds = ClassificationDataSet(14,1, class_labels=["<50K", ">=50K"])
    for i in xrange(len(tx)):
        ds.addSample(tx[i], [ty[i]])
    trainer = BackpropTrainer(network, ds)
    trainer.trainOnDataset(ds, iterations)
    NetworkWriter.writeToFile(network, "network.xml")
    results = sum((np.array([round(network.activate(test)) for test in rx]) - ry)**2)/float(len(ry))
    return results
Ejemplo n.º 15
0
def nn(tx, ty, rx, ry, iterations):
    network = buildNetwork(14, 5, 5, 1)
    ds = ClassificationDataSet(14, 1, class_labels=["<50K", ">=50K"])
    for i in xrange(len(tx)):
        ds.addSample(tx[i], [ty[i]])
    trainer = BackpropTrainer(network, ds)
    trainer.trainOnDataset(ds, iterations)
    NetworkWriter.writeToFile(network, "network.xml")
    results = sum((np.array([round(network.activate(test))
                             for test in rx]) - ry)**2) / float(len(ry))
    return results
Ejemplo n.º 16
0
def build_network():

    # get iris data
    iris = datasets.load_iris()
    d, t = iris.data, iris.target

    # build dataset
    ds = _get_classification_dataset()
    for i in range(len(d)):
        ds.addSample(d[i], t[i])

    print "Dataset input: {}".format(ds['input'])
    print "Dataset output: {}".format(ds['target'])
    print "Dataset input length: {}".format(len(ds['input']))
    print "Dataset output length: {}".format(len(ds['target']))
    print "Dataset length: {}".format(len(ds))
    print "Dataset input|output dimensions are {}|{}".format(
        ds.indim, ds.outdim)

    # split dataset
    train_data, test_data = _split_with_proportion(ds, 0.70)

    print "Train Data length: {}".format(len(train_data))
    print "Test Data length: {}".format(len(test_data))

    # encode with one output neuron per class
    train_data._convertToOneOfMany()
    test_data._convertToOneOfMany()

    print "Train Data input|output dimensions are {}|{}".format(
        train_data.indim, train_data.outdim)
    print "Test Data input|output dimensions are {}|{}".format(
        test_data.indim, test_data.outdim)

    # build network
    network = buildNetwork(INPUT, HIDDEN, CLASSES, outclass=SoftmaxLayer)

    # train network
    trainer = BackpropTrainer(network,
                              dataset=train_data,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)
    trainer.trainOnDataset(train_data, 500)

    print "Total epochs: {}".format(trainer.totalepochs)

    # test network
    output = network.activateOnDataset(test_data).argmax(axis=1)

    print "Percent error: {}".format(percentError(output, test_data['class']))

    # return network
    return network
def estimateNot():
    ds_not = SupervisedDataSet(1, 1)
    ds_not.addSample( (0,) , (1,))
    ds_not.addSample( (1,) , (0,))
    net = buildNetwork(1, 100, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_not, 3000)
    trainer.testOnData() 
    print '\nthe prediction for NOT value:'
    print 'NOT 0  = ', net.activate((0,))
    print 'NOT 1  = ', net.activate((1,))
Ejemplo n.º 18
0
def estimateNot():
    ds_not = SupervisedDataSet(1, 1)
    ds_not.addSample( (0,) , (1,))
    ds_not.addSample( (1,) , (0,))
    net = buildNetwork(1, 10, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_not, 3000)
    trainer.testOnData() 
    print '\nthe prediction for NOT value:'
    print 'NOT 0  = ', net.activate((0,))
    print 'NOT 1  = ', net.activate((1,))
Ejemplo n.º 19
0
def RunNet(net, dataset, train_epochs):
	"a function to build a neural net and test on it, for testing purposes right now"
	#print net.activate([2, 1])
	#ds = SupervisedDataSet(15, 1)
	#ds.addSample((1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), (100))
	#ds.addSample((0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0))

	#trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99, verbose = True)
	trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.5, verbose = True)
	
	trainer.trainOnDataset(dataset, train_epochs)
	
	trainer.testOnData(verbose = True)
Ejemplo n.º 20
0
def buildAndTrain(ds):
  
  net = buildNetwork(2, 4, 1, bias=True)

  # try:
        #         f = open('_learned', 'r')
  #   net = pickle.load(f)
  #   f.close()
  # except:
  trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
  trainer.trainOnDataset(ds, 1000)
  trainer.testOnData()
  return net
Ejemplo n.º 21
0
    def xtrain(self):
        dataset = self.getdata()

        # Constructing a two hidden layes Neural Network
        net = buildNetwork(9, 15, 5, 1, recurrent=True)

        # Training using Back Propagation
        trainer = BackpropTrainer(net, learningrate=0.01, momentum=0.75,
                                  weightdecay=0.02, verbose=True)
        trainer.trainOnDataset(dataset, 10)
        trainer.testOnData(verbose=False)

        # Saving the trained neural network information to file
        self.writetrainedinfo(net)
Ejemplo n.º 22
0
class BackpropNet:
    def __init__(self, input_size, hidden_layer_size, output_size):
        self._input_size = input_size
        self._output_size = output_size
        self._net = buildNetwork(input_size, hidden_layer_size, output_size)
        self._trainer = BackpropTrainer(self._net, learningrate=0.001)

    def process(self, inp):
        return self._net.activate(inp)

    def train(self, inp, output):
        dataset = SupervisedDataSet(self._input_size, self._output_size)
        dataset.addSample(inp, output)
        self._trainer.trainOnDataset(dataset)
Ejemplo n.º 23
0
def estimateNor():
    ds_nor = SupervisedDataSet(2, 1)
    ds_nor.addSample( (0,0) , (1,))
    ds_nor.addSample( (0,1) , (0,))
    ds_nor.addSample( (1,0) , (0,))
    ds_nor.addSample( (1,1) , (0,))
    net = buildNetwork(2, 10, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_nor, 3000)
    trainer.testOnData() 
    print '\nthe prediction for NOR value:'
    print '1 NOR 1 = ', net.activate((1,1))
    print '1 NOR 0 = ', net.activate((1,0))
    print '0 NOR 1 = ', net.activate((0,1))
    print '0 NOR 0 = ', net.activate((0,0))
def estimateAnd():
    ds_and = SupervisedDataSet(2, 1)
    ds_and.addSample( (0,0) , (0,))
    ds_and.addSample( (0,1) , (0,))
    ds_and.addSample( (1,0) , (0,))
    ds_and.addSample( (1,1) , (1,))
    net = buildNetwork(2, 4, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_and, 3000)
    trainer.testOnData() 
    print '\nthe prediction for AND value:'
    print '1 AND 1 = ', net.activate((1,1))
    print '1 AND 0 = ', net.activate((1,0))
    print '0 AND 1 = ', net.activate((0,1))
    print '0 AND 0 = ', net.activate((0,0))
Ejemplo n.º 25
0
def estimateAnd():
    ds_and = SupervisedDataSet(2, 1)
    ds_and.addSample((0, 0), (0, ))
    ds_and.addSample((0, 1), (0, ))
    ds_and.addSample((1, 0), (0, ))
    ds_and.addSample((1, 1), (1, ))
    net = buildNetwork(2, 4, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate=0.01, momentum=0.99)
    trainer.trainOnDataset(ds_and, 3000)
    trainer.testOnData()
    print '\nthe prediction for AND value:'
    print '1 AND 1 = ', net.activate((1, 1))
    print '1 AND 0 = ', net.activate((1, 0))
    print '0 AND 1 = ', net.activate((0, 1))
    print '0 AND 0 = ', net.activate((0, 0))
def estimateNor():
    ds_nor = SupervisedDataSet(2, 1)
    ds_nor.addSample( (0,0) , (1,))
    ds_nor.addSample( (0,1) , (0,))
    ds_nor.addSample( (1,0) , (0,))
    ds_nor.addSample( (1,1) , (0,))
    net = buildNetwork(2, 100, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_nor, 3000)
    trainer.testOnData() 
    print '\nthe prediction for NOR value:'
    print '1 NOR 1 = ', net.activate((1,1))
    print '1 NOR 0 = ', net.activate((1,0))
    print '0 NOR 1 = ', net.activate((0,1))
    print '0 NOR 0 = ', net.activate((0,0))
Ejemplo n.º 27
0
def build_network():

    # get iris data
    iris = datasets.load_iris()
    d,t = iris.data, iris.target

    # build dataset
    ds = _get_classification_dataset()
    for i in range(len(d)):
        ds.addSample(d[i],t[i])

    print "Dataset input: {}".format(ds['input'])
    print "Dataset output: {}".format(ds['target'])
    print "Dataset input length: {}".format(len(ds['input']))
    print "Dataset output length: {}".format(len(ds['target']))
    print "Dataset length: {}".format(len(ds))
    print "Dataset input|output dimensions are {}|{}".format(ds.indim, ds.outdim)

    # split dataset
    train_data,test_data = _split_with_proportion(ds, 0.70)
    
    print "Train Data length: {}".format(len(train_data))
    print "Test Data length: {}".format(len(test_data))

    # encode with one output neuron per class
    train_data._convertToOneOfMany()
    test_data._convertToOneOfMany()

    print "Train Data input|output dimensions are {}|{}".format(train_data.indim, train_data.outdim)
    print "Test Data input|output dimensions are {}|{}".format(test_data.indim, test_data.outdim)

    # build network
    network = buildNetwork(INPUT,HIDDEN,CLASSES,outclass=SoftmaxLayer)

    # train network
    trainer = BackpropTrainer(network,dataset=train_data,momentum=0.1,verbose=True,weightdecay=0.01)
    trainer.trainOnDataset(train_data, 500)

    print "Total epochs: {}".format(trainer.totalepochs)

    # test network
    output = network.activateOnDataset(test_data).argmax(axis=1)
    
    print "Percent error: {}".format(percentError(output, test_data['class']))

    # return network
    return network
class PHC_NN(PHC_FA):
    '''PHC with neural function approximation. '''
    delta=0.1
    maxNumberofAverage=30
    weightdecay=0.001
    trainingEpochPerUpdateWight=2
    
    def __init__(self, num_features, num_actions, indexOfAgent=None):    
        PHC_FA.__init__(self, num_features, num_actions, indexOfAgent)
        self.linQ = buildNetwork(num_features + num_actions, (num_features + num_actions), 1, hiddenclass = SigmoidLayer, outclass = LinearLayer)
        self.linPolicy = buildNetwork(num_features, (num_features + num_actions), num_actions, hiddenclass = SigmoidLayer,outclass = SigmoidLayer)
        self.trainer4LinQ=BackpropTrainer(self.linQ,weightdecay=self.weightdecay)
        self.trainer4LinPolicy=BackpropTrainer(self.linPolicy,weightdecay=self.weightdecay)

    def _pi(self, state):
        """Given state, compute probabilities for each action."""
        values = np.array(self.linPolicy.activate(r_[state]))
        z=np.sum(values)
        return (values/z).flatten()
    
    def _qValues(self, state):
        """ Return vector of q-values for all actions, 
        given the state(-features). """
        values = np.array([self.linQ.activate(r_[state, one_to_n(i, self.num_actions)]) for i in range(self.num_actions)])
        return values.flatten()

            
    def _updateWeights(self, state, action, reward, next_state):
        """ state and next_state are vectors, action is an integer. """
        #update Q-value function approximator
        target=reward + self.rewardDiscount * max(self._qValues(next_state))
        inp=r_[asarray(state), one_to_n(action, self.num_actions)]
        self.trainer4LinQ=BackpropTrainer(self.linQ,weightdecay=self.weightdecay)
        ds = SupervisedDataSet(self.num_features+self.num_actions,1)
        ds.addSample(inp, target)
        self.trainer4LinQ.trainOnDataset(ds)
        #Update policy
        bestAction=r_argmax(self._qValues(state))
        target= one_to_n(bestAction, self.num_actions)
        inp=r_[asarray(state)]
        ds = SupervisedDataSet(self.num_features,self.num_actions)
        ds.addSample(inp, target)
        self.trainer4LinPolicy=BackpropTrainer(self.linPolicy,
                                               learningrate=self.delta,
                                               weightdecay=self.weightdecay)
        self.trainer4LinPolicy.setData(ds)
        self.trainer4LinPolicy.trainEpochs(epochs=self.trainingEpochPerUpdateWight)
Ejemplo n.º 29
0
def Treinar():
    print 'Inicializando o treinamento da Rede......Aguarde'
    ds = SupervisedDataSet(50,1)
    with open('trainning.txt') as f:
        for line in f:
            if line[0] != '#':
                line = line.replace('\n','')
                line = line.split(',')
                exemplo = []
                for x in line: exemplo.append(x)
                ds.addSample(exemplo[1:],exemplo[:1]) # o 1: pega o primeiro valor que e targer.
    ## Dataset
    #trainer = BackpropTrainer(net, learningrate = 0.04, momentum = 0.07, verbose = False)
    trainer = BackpropTrainer(net, learningrate = 0.04, momentum = 0.07, verbose = False)
    trainer.trainOnDataset(ds,10000) 
    NetworkWriter.writeToFile(net, 'filename.xml')
    print 'Treinado e Pronto'
Ejemplo n.º 30
0
    def xtrain(self):
        dataset = self.getdata()

        # Constructing a two hidden layes Neural Network
        net = buildNetwork(9, 15, 5, 1, recurrent=True)

        # Training using Back Propagation
        trainer = BackpropTrainer(net,
                                  learningrate=0.01,
                                  momentum=0.75,
                                  weightdecay=0.02,
                                  verbose=True)
        trainer.trainOnDataset(dataset, 10)
        trainer.testOnData(verbose=False)

        # Saving the trained neural network information to file
        self.writetrainedinfo(net)
Ejemplo n.º 31
0
def main():
	# Declare length of input and output vectors here, to change later
	lenInput = 6
	lenOutput = 4
	lenHidden = 8 # change this?
	labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']

	# Initialize data set used for training
	data = ClassificationDataSet(lenInput, lenOutput, class_labels=labels)

	# Get input and output vectors from file, build dataset
	with open('../heartsMoves.csv', 'rb') as csvfile:
		reader = csv.reader(csvfile, delimiter=",", quotechar="|")
		firstLine = True
		for row in reader:
			if firstLine:
				labels = row
			if not firstLine:
				rowInput = map(float,row[:lenInput])
				rowOutput = map(float, row[lenInput:])
				#pdb.set_trace()
				data.addSample(rowInput, rowOutput)
			firstLine = False

	##########################################
	# DATASET IS BUILT, NOW TRAIN NEURAL NET #
	##########################################

	# build network
	fnn = buildNetwork(lenInput, lenHidden, lenOutput)
	# train it with data
	trainer = BackpropTrainer(fnn, dataset=data, momentum=.1, verbose=False, weightdecay=0.01)
	trainer.trainOnDataset(data)

	#print 'final weights:', fnn.params

	####################################
	# NET IS TRAINED, NOW SAVE TO DISK #
	####################################

	fileObject = open(filename, 'w')
	pickle.dump(fnn, fileObject)
	fileObject.close()
Ejemplo n.º 32
0
def genTrain(rnaNmbr, gen, epochs, genPath, dataset):
	"""
	#Treinamento de uma geracao (backpropagation)

	##Parametros

	-rnaNmbr-> numero de redes neurais de cada geracao - Inteiro
	-gen-> numero da geracao para o treinamento - Inteiro
	-epochs-> numero de iteracoes de treino para cada rna - Inteiro
	-genPath->	endereco para a pasta que aloca as geracoes de um sistema genetico - String
	-dataset-> array contendo os dados do dataset - Array de Float
	
	##Retorno

	none

	"""

	k = 0

	inNmbr = len(dataset[0])
	outNmbr = len(dataset[1])

	ds = SupervisedDataSet(inNmbr, outNmbr)

	for i in range(len(dataset)/2):

		ds.addSample(dataset[k], dataset[k+1])
		k = k+2
	
	for l in range(rnaNmbr):

		rna = NetworkReader.readFrom(genPath+'/gen'+str(gen)+'/rna'+str(l+1)+'.xml')

		t = BackpropTrainer(rna , learningrate=0.01, momentum=0.1, verbose=True)	
		t.trainOnDataset(ds, epochs)

		NetworkWriter.writeToFile(rna, genPath+'/gen'+str(gen)+'/rna'+str(l+1)+'.xml')

		print "Geracao: {:}".format(l+1)
Ejemplo n.º 33
0
def vali():
    from pybrain.tools.validation import ModuleValidator
    from pybrain.tools.validation import CrossValidator
    with open('new_data1.txt') as data_file:
        data = json.load(data_file)
    m = [d[0] for d in data]
    case = [min([a for a, s, d in m]), float(max([a for a, s, d in m])-min([a for a, s, d in m]))]
    week = [min([s for a, s, d in m]), float(max([s for a, s, d in m])-min([s for a, s, d in m]))]
    grid = [min([d for a, s, d in m]), float(max([d for a, s, d in m])-min([d for a, s, d in m]))]
    ds = SupervisedDataSet(3, 1)
    import random
    random.shuffle(data)
    print len(data)
    for i in xrange(0, len(data)):
        # print "Adding {}th data sample".format(i),
        x1 = float(data[i][0][0] - case[0])/case[1]
        x2 = float(data[i][0][1] - week[0])/week[1]
        x3 = float(data[i][0][2] - grid[0])/grid[1]
        input = (x1, x2, x3)
        output = data[i][1]
        ds.addSample(input, output)
        # print ":: Done"

    print "Train"
    net = buildNetwork(3, 3, 1, bias=True)
    tstdata, trndata = ds.splitWithProportion( 0.33 )
    trainer = BackpropTrainer(net, trndata)
    mse = []
    modval = ModuleValidator()
    for i in range(100):
        trainer.trainEpochs(1)
        trainer.trainOnDataset(dataset=trndata)
        cv = CrossValidator(trainer, trndata, n_folds=10, valfunc=modval.MSE)
        mse_val = cv.validate()
        print "MSE %f @ %i" % (mse_val, i)
        mse.append(mse_val)

    with open('cross_validation.json', 'w') as outfile:
            json.dump(mse, outfile, indent=4)
Ejemplo n.º 34
0
def problemC():
    ds_and = SupervisedDataSet(3, 1)
    ds_and = SupervisedDataSet(3, 1)
    ds_and.addSample( (0,0,0) , (1,))
    ds_and.addSample( (0,0,1) , (1,))
    ds_and.addSample( (0,1,0) , (1,))
    ds_and.addSample( (0,1,1) , (0,))
    ds_and.addSample( (1,0,0) , (1,))
    ds_and.addSample( (1,0,1) , (0,))
    ds_and.addSample( (1,1,0) , (1,))
    ds_and.addSample( (1,1,1) , (0,))
    net = buildNetwork(3, 10, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_and, 3000)
    trainer.testOnData() 
    print '\n3) NOT ( (A OR B) AND C) '
    print '0 0 0  = ', net.activate((0,0,0))
    print '0 0 1  = ', net.activate((0,0,1))
    print '0 1 0  = ', net.activate((0,1,0))
    print '0 1 1  = ', net.activate((0,1,1))
    print '1 0 0  = ', net.activate((1,0,0))
    print '1 0 1  = ', net.activate((1,0,1))
    print '1 1 0  = ', net.activate((1,1,0))
    print '1 1 1  = ', net.activate((1,1,1))
Ejemplo n.º 35
0
# Read all info on the csv. 
for line in tf.readlines():
    data = [float(x) for x in line.strip().split(',') if x != '']
    indata =  tuple(data[:8])
    outdata = tuple(data[8:])
    ds.addSample(indata,outdata)

'''
Make the actual neural networks
TOPOLOGY: 8 in, 8 hidden-0, 8 hidden-1, 8 out. Hidden layers subject to change
'''
n = buildNetwork(ds.indim,8,8,ds.outdim,recurrent=True)

#change learningrate based on gradient
t = BackpropTrainer(n,learningrate=0.01,momentum=0.5,verbose=True)

# 100 iterations
t.trainOnDataset(ds,100)
t.testOnData(verbose=True)

# Our prediction given 8 inputs, will print 8 estimated outputs
guess = n.activate((1,2,3,4,5,6,7,8)) 
print 'Final weights:',n.params

# Print our Guess 
print '\nGUESS???' + str(guess)

#print n['in'], n['out'], n[h0], n['h1']
print (printConnections(n))
Ejemplo n.º 36
0
from pybrain.datasets import *
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
import pickle

if __name__ == "__main__":
    ds = SupervisedDataSet(2, 1)
    ds.addSample((0, 0), (0, ))
    ds.addSample((0, 1), (1, ))
    ds.addSample((1, 0), (1, ))
    ds.addSample((1, 1), (0, ))

    net = buildNetwork(2, 4, 1, bias=True)

    # try:
    #         f = open('_learned', 'r')
    # 	net = pickle.load(f)
    # 	f.close()
    # except:
    trainer = BackpropTrainer(net, learningrate=0.01, momentum=0.99)
    trainer.trainOnDataset(ds, 3000)
    trainer.testOnData()
    # f = open('_learned', 'w')
    # pickle.dump(net, f)
    # f.close()

    print net.activate((1, 1))
    print net.activate((1, 0))
    print net.activate((0, 1))
    print net.activate((0, 0))
start_time=time.time()

#our data set has 4 input parameters and 3 classes
ds = SupervisedDataSet(4,3)

tf=open('IRIS.csv','r')
for line in tf.readlines():
    data = [float(x) for x in line.strip().split(',') if x != '']
    indata =  tuple(data[:4])
    outdata = tuple(data[4:])
    ds.addSample(indata,outdata)
print ds.indim
print ds.outdim
# pybrain.tools.shortcuts.buildNetwork(*layers, **options)
#Build arbitrarily deep networks.
#layers should be a list or tuple of integers, that indicate how many neurons the layers should have.
#change the hidden layer neurons to maximise the accuracy
n = buildNetwork(ds.indim,3,ds.outdim,recurrent=True)

#bpa
t = BackpropTrainer(n,learningrate=0.01,momentum=0.5,verbose=True)

t.trainOnDataset(ds,5)
t.testOnData(verbose=True)


end_time=time.time()

print "time taken is ",end_time-start_time," seconds"
    def nn_run(self, hidden_dim, num_epoch, learningrate_, lrdecay_, weightdecay_, num_classes, X_train, y_train):
        # # NN HYP
        # # hidden_dim = 100
        # bias_ = True
        # # SoftmaxLayer, TanhLayer, SigmoidLayer, LSTMLayer, LinearLayer, GaussianLayer
        # hiddenclass_ = TanhLayer
        # outclass_ = SoftmaxLayer
        # # num_epoch = 4
        # # if len(sys.argv)>0:
        # #     num_epoch = int(sys.argv[1])
        # # learningrate_ = 0.01
        # # lrdecay_ = 1.0
        # momentum_ = 0.1
        # batchlearning_ = False
        # # weightdecay_ = 0.01
        # # NN HYP

        net = buildNetwork(SHAPE[0] * SHAPE[1] * 3, hidden_dim,
                           num_classes, bias=self.bias_, hiddenclass=self.hiddenclass_, outclass=self.outclass_)

        train_ds = SupervisedDataSet(SHAPE[0] * SHAPE[1] * 3, num_classes)
        test_ds = SupervisedDataSet(SHAPE[0] * SHAPE[1] * 3, num_classes)

        if batch_size ==0:
            # for feature, label in zip(X_train, y_train):
            for feature, label in zip(X_train, y_train):
                train_ds.addSample(feature, label)

            # for feature, label in zip(X_test, y_test):
            #     test_ds.addSample(feature, label)

            # checking for model
            if os.path.isfile("models/" + model_name + ".pkl"):
                tmp = "Using previous " + model_name + " model...\n"
                print(tmp)
                f.write(tmp)
                trainer = pickle.load(open("models/" + model_name + ".pkl", "rb"))
            else:
                # tmp = "Training " + model_name + " on set " + str(division_num) + "\n"
                # print(tmp)
                # f.write(tmp)
                trainer = BackpropTrainer(net, train_ds, learningrate=learningrate_, lrdecay=lrdecay_,
                                          momentum=self.momentum_, verbose=True, batchlearning=self.batchlearning_,
                                          weightdecay=weightdecay_)
                # different trainig calls
                # trainer.train()
                trainer.trainEpochs(epochs=num_epoch)
                # trainer.trainOnDataset(dataset)
                # trainer.trainUntilConvergence(dataset=None, maxEpochs=None,
                #                               verbose=None, continueEpochs=10, validationProportion=0.25)
                # different trainig calls

                # print("Saving model")
                # pickle.dump(trainer, open("models/"+ model_name+ ".pkl", "wb"))
        elif batch_size>0:
            trainer = BackpropTrainer(net, learningrate=learningrate_, lrdecay=lrdecay_,
                                      momentum=self.momentum_, verbose=True,
                                      batchlearning=self.batchlearning_,
                                      weightdecay=weightdecay_)
            for epoch in range(num_epoch):
                print("\n epoch {}".format(epoch))
                for i in range(X_train.shape[0] // batch_size):
                    X_ = X_train[i * batch_size:(i + 1) * batch_size][:]
                    y_ = y_train[i * batch_size:(i + 1) * batch_size]

                    tmp = "epoch {}, batch {}".format(epoch, i)
                    print(tmp)
                    f.write(tmp)

                    train_ds = SupervisedDataSet(SHAPE[0] * SHAPE[1] * 3, num_classes)

                    for feature, label in zip(X_, y_):
                        train_ds.addSample(feature, label)

                    # train_ds.batches("batches", batch_size)

                    # for feature, label in zip(X_test, y_test):
                    #     test_ds.addSample(feature, label)

                    # checking for model
                    if os.path.isfile("models/" + model_name + ".pkl"):
                        tmp = "Using previous " + model_name + " model...\n"
                        print(tmp)
                        f.write(tmp)
                        trainer = pickle.load(open("models/" + model_name + ".pkl", "rb"))
                    else:
                        # tmp = "Training " + model_name + " on set " + str(division_num) + "\n"
                        # print(tmp)
                        # f.write(tmp)
                        # trainer = BackpropTrainer(net, learningrate=learningrate_, lrdecay=lrdecay_,
                        #                           momentum=self.momentum_, verbose=True,
                        #                           batchlearning=self.batchlearning_,
                        #                           weightdecay=weightdecay_)
                        # different trainig calls
                        # trainer.train()
                        trainer.trainOnDataset(train_ds)
                        # trainer.trainOnDataset(dataset)
                        # trainer.trainUntilConvergence(dataset=None, maxEpochs=None,
                        #                               verbose=None, continueEpochs=10, validationProportion=0.25)
                        # different trainig calls

                        # print("Saving model")
                        # pickle.dump(trainer, open("models/"+ model_name+ ".pkl", "wb"))

                    # tmp = eval(" ", " ", test_precs, model_name,
                    #            X_train, y_train, net, svm, f, tr_=True)
                    # print("eval {}".format(tmp))

        return net
Ejemplo n.º 39
0
__author__ = 'davidoregan'


from pybrain.datasets            import *
from pybrain.utilities           import percentError
from pybrain.tools.shortcuts     import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules   import SoftmaxLayer

ds = SupervisedDataSet(6,3)

tf = open('MiscIdeas/../MiscIdeas/carData.csv','r')

for line in tf.readlines():
    data = [float(x) for x in line.strip().split(',') if x != '']
    indata =  tuple(data[:6])
    outdata = tuple(data[6:])
    ds.addSample(indata,outdata)

n = buildNetwork(ds.indim,8,8,ds.outdim,recurrent=True)
t = BackpropTrainer(n,learningrate=0.01,momentum=0.5,verbose=True)
t.trainOnDataset(ds,1000)
t.testOnData(verbose=True)
Ejemplo n.º 40
0
tsts.setField('input',x.reshape(len(x),1))
tsts.setField('target',s.reshape(len(s),1))

#read the train DataSet from file
trndata = SupervisedDataSet.loadFromFile(os.path.join(os.getcwd(),'trndata'))

#create the trainer

t = BackpropTrainer(n, learningrate = 0.01 ,
                    momentum = mom)
#train the neural network from the train DataSet

cterrori=1.0
print "trainer momentum:"+str(mom)
for iter in range(25):
  t.trainOnDataset(trndata, 1000)
  ctrndata = mv.calculateModuleOutput(n,trndata)
  cterr = v.MSE(ctrndata,trndata['target'])
  relerr = abs(cterr-cterrori)
  cterrori = cterr
  print 'iteration:',iter+1,'MSE error:',cterr
  myplot(trndata,ctrndata,iter=iter+1)
  if cterr < 1.e-5 or relerr < 1.e-7:
    break
#write the network using xml file     
myneuralnet = os.path.join(os.getcwd(),'myneuralnet.xml')
if os.path.isfile(myneuralnet):
    NetworkWriter.appendToFile(n,myneuralnet)
else:
    NetworkWriter.writeToFile(n,myneuralnet)
    
Ejemplo n.º 41
0
    def __init__(self, dir, testDir=None, doTest = True, ignoreKlass = [], includeKlass = None):
        logger.info("Start building " + self.__class__.__name__)
        self.__mutex = threading.Semaphore()
        self.__dir = dir
        self.__testDir = testDir or dir
        self.__filenameToUrl = self.__readLogFile()
        freqDists = {}
        ignore = stopwords.words('english')
        featureFd = FreqDist()
        klassSize = {}
        for klassId in self.__klasses(ignoreKlass, includeKlass):
            freqDist = FreqDist()
            size = 0
            for url, txt in self.__documents(klassId).items():
                txt = tokenize(txt)
                size += 1
                for part in txt:
                    if part.isalnum() and part not in ignore:
                        freqDist.inc(part)
                        featureFd.inc(part)
                        #for bigram in nltk.bigrams(txt):
                        #    freqDist.inc(bigram)
                        #    featureFd.inc(bigram)
            freqDists[klassId] = freqDist
            klassSize[klassId] = size

        selectedKlasses = self.__klasses(ignoreKlass, includeKlass)
        logger.info("Klasses:" + str(selectedKlasses))
        documentsWithLabel = [(document, correctKlass) for correctKlass in selectedKlasses for url, document in self.__documents(correctKlass).items()]
        testDocumentsWithLabel = [(document, correctKlass) for correctKlass in selectedKlasses for url, document in self.__testDocuments(correctKlass).items()[:100]]
        random.shuffle(documentsWithLabel)

        self.__featuresGenerator = FeatureGenerator(freqDists, featureFd, klassSize)

        outSize = len(klassSize)
        logger.info(u"Build data set")
        trainset = SupervisedDataSet(300, outSize)
        for doc, label in testDocumentsWithLabel:
            trainset.addSample( self.__featuresGenerator(doc) , [1 if k is label else -1 for k in selectedKlasses])
            #logger.info(trainset)
        logger.info(u"Build network")
        self.__classifier = buildNetwork(300, 500, outSize, bias=True)#, fast=True)
        trainer = BackpropTrainer(self.__classifier, verbose=True)
        logger.info(u"Train...")
        trainer.trainOnDataset(trainset, 20)
        trainer.testOnData(verbose=True)

        logger.info(u"Classifier learned")
        f = open('/tmp/_learned', 'w')
        pickle.dump(self.__classifier, f)
        f.close()
        if doTest:
            testset = trainset
            ref = [selectKlassName(selectedKlasses, out) for feat, out in testset]
            test = [selectKlassName(selectedKlasses, self.__classifier.activate(features)) for features, cat in testset]
            #for correctKlass, klass, featuresWithLabel in zip(ref, test, testset):
            #    if correctKlass != klass:
            #        pd = self.__classifier.prob_classify(dict(featuresWithLabel[0]))
            #        labelProbList = sorted( [(sample, pd.logprob(sample)) for sample in pd.samples()], key=lambda x: x[1], reverse=True)
            #        logger.info( correctKlass + " as " + klass + ": " + str([(correctKlass, "%.2f" % prob) for correctKlass, prob in labelProbList]))
            #        logger.info([(key, value)for key, value in featuresWithLabel[0].items() if value > 0])
            #        logger.info(self.__findDocumentByKlassAndFeatures(correctKlass, featuresWithLabel[0]))
            logger.info("\n" + ConfusionMatrix(ref, test).pp())
Ejemplo n.º 42
0
class Agent(object):
    def __init__(self, use_brain):
        self.price_belief_high = random.uniform(PRICE_LOW, PRICE_HIGH)
        self.price_belief_low = random.uniform(PRICE_LOW, self.price_belief_high)
        self.price = random.uniform(self.price_belief_low, self.price_belief_high)
        self.consumption_value_low = random.randint(15, 60) #Killowats used per day
        self.consumption_value_high = random.randint(self.consumption_value_low, 60)
        self.production_value = random.randint(2, 15) #Square Meters of Solar Panels

        self.no_trades = 0
        self.wealth = 0
        self.supply = 0
        self.demand = 0
        self.weather = 1.0
        self.power = 0.0
        self.reserve_power = 0.0
        self.observed_prices = [] #Prices which the agent successfully traded.
        
        self.use_brain = use_brain

        self.price_history = []
        self.wealth_history = []
        
        if use_brain:
            self.brain = buildNetwork(3, 40, 1)
            self.memory = SupervisedDataSet(3, 1)
            self.trainer = BackpropTrainer(self.brain)

    def sell(self, units, price):
        self.observed_prices.append(price)
        self.power -= units
        self.no_trades += 1
        self.wealth += (units * price)

    def buy(self, units, price):
        self.observed_prices.append(price)
        self.power += units
        self.no_trades += 1
        self.wealth -= (units * price)

    def day_begin(self, weather, market):
        self.price_history.append(self.price)
        self.wealth_history.append(self.wealth)
        
        self.weather = weather
        self.consumption_value = random.randint(self.consumption_value_low, self.consumption_value_high)
        self.power = ((self.production_value * self.weather) - self.consumption_value)

        #Use any reserve power if we have it.
        if self.reserve_power > 0:
            self.power += self.reserve_power
            self.reserve_power = 0

        #Update Supply and Demand unless "Smart Agent"
        if not self.use_brain or self.power <= 0 or len(market.price_history) < 3:
            self.update_supply_demand(market)
            return

        #Predict price
        buyers = [agent for agent in market.agents if agent.demand > 0]
        sellers = [agent for agent in market.agents if agent.supply > 0]

        supply = sum(seller.supply for seller in sellers)
        demand = sum(buyer.demand for buyer in buyers)
        weather = self.weather

        predicted_price = self.brain.activate((weather, supply, demand))[0]
        
        #Store power instead of selling it if price is going to be low.        
        threshold = statistics.median(market.price_history) #(PRICE_LOW + PRICE_HIGH) * 0.5
        if predicted_price < threshold:
            self.reserve_power += self.power
            self.power = 0
        self.update_supply_demand(market)

    def day_end(self, market):
        
        if not self.use_brain:
            return
        
        supply = market.asks[-1]
        demand = market.bids[-1]
        weather = self.weather
        price = market.price_history[-1]
        
        self.price_belief_low = self.brain.activate((weather, supply, demand))[0]
        self.price_belief_high = self.brain.activate((weather, supply, demand))[0]
        self.price = random.uniform(self.price_belief_low, self.price_belief_high)
        self.price_history[-1] = self.price
        
               
        self.memory.clear()
        self.memory.addSample((weather, supply, demand), (price,))
        self.trainer.trainOnDataset(self.memory)
        
    def update_price_belief(self, market, did_sell, success):
        public_mean_price = market.average_price()
        mean = (self.price_belief_low + self.price_belief_high) / 2
        confidence_sigma = 0.05
        
        delta_mean = mean - public_mean_price
        
        if success:
                        
            #If overpaid or undersold, shift towards mean
            if not did_sell and delta_mean > SIGNIFICANT:
                self.price_belief_low -= delta_mean / 2
                self.price_belief_high -= delta_mean / 2
            elif did_sell and delta_mean < -SIGNIFICANT:
                self.price_belief_low -= delta_mean / 2
                self.price_belief_high -= delta_mean / 2
                
                
            #increase confidence in price
            self.price_belief_low += confidence_sigma * mean
            self.price_belief_high -= confidence_sigma * mean
            
        else:
            
            #Shift belief towards means
            self.price_belief_low -= delta_mean / 2
            self.price_belief_high -= delta_mean / 2
            
                
            #Need lots of power? Buy for higher price
            if (not did_sell and self.demand > self.production_value * 2):
                confidence_sigma *= 2
            #Lots of power to sell? sell for lower price
            elif(did_sell and self.supply > self.consumption_value * 2):
                confidence_sigma *= 2
            #Otherise, check supply/demand
            else:
                asks = sum(market.asks) / len(market.asks)
                bids = sum(market.bids) / len(market.bids)
                supply_vs_demand = (asks - bids) / (asks + bids)
                if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:
                    new_mean = public_mean_price * (1-supply_vs_demand)
                    delta_to_mean = mean - new_mean
                    self.price_belief_high -= delta_mean / 2
                    self.price_belief_low -= delta_mean / 2

            #decrease confidence in price
            self.price_belief_low -= confidence_sigma * mean
            self.price_belief_high += confidence_sigma * mean
            

        self.price = random.uniform(self.price_belief_low, self.price_belief_high)

    def update_supply_demand(self, market):
        self.demand = 0
        self.supply = 0
        if self.power > 0:
            if len(self.observed_prices) < 3:
                self.supply = self.power
            else:
                self.supply = round(self.power * self.price_favorability(market.average_price()))
                if self.supply < 1:
                    self.supply = 0
            self.reserve_power = self.power - self.supply
        elif self.power < 0:
            if len(self.observed_prices) < 3:
                self.demand = self.power * -1
            else:
                f = (1 - self.price_favorability(market.average_price()))
                self.demand = round(self.power * f) * -1
                if self.demand < 0:
                    self.demand = 0

    def price_favorability(self, value):
        
        max_n = max(self.observed_prices)
        min_n = min(self.observed_prices)
        value -= min_n
        max_n -= min_n
        min_n = 0
        value = value / max_n
        if value < 0:
            value = 0
        if value > 1:
            value = 1
        
        return value
    
    @property
    def price_belief_high(self):
        return self._price_belief_high

    @property
    def price_belief_low(self):
        return self._price_belief_low
    
    @price_belief_high.setter
    def price_belief_high(self, value):
        if value < PRICE_LOW:
            value = PRICE_LOW
        elif value > PRICE_HIGH:
            value = PRICE_HIGH
        self._price_belief_high = value

    @price_belief_low.setter
    def price_belief_low(self, value):
        if value < PRICE_LOW:
            value = PRICE_LOW
        elif value > PRICE_HIGH:
            value = PRICE_HIGH
        self._price_belief_low = value
Ejemplo n.º 43
0
tst_ds, trn_ds = ds.splitWithProportion(0.2)

# print "train data"
# for inpt, target in trn_ds:
#     print inpt, target

# print "test data"
# for inpt, target in tst_ds:
#     print inpt, target

# More information about trainers: http://pybrain.org/docs/api/supervised/trainers.html

print "Training started"

trainer.trainOnDataset(trn_ds, 10)

# trainer.trainUntilConvergence(trn_ds, maxEpochs=100, verbose=True, continueEpochs=10, validationProportion=0.25)

# trainer.testOnData(tst_ds, verbose=True)

# exercise: how to get network error value for a given epoch?


# from: http://stackoverflow.com/questions/8150772/pybrain-how-to-print-a-network-nodes-and-weights
def display_net(net):
    for mod in net.modules:
        print("Module:", mod.name)
        if mod.paramdim > 0:
            print("--parameters:", mod.params)
        for conn in net.connections[mod]:
Ejemplo n.º 44
0
		ds.clear()
		k = 0
		for inp,targ in testdata:
			testSet.appendLinked(inp,targ-1)
		for inp,targ in traindata:
			trainSet.appendLinked(inp,targ-1)

trainSet._convertToOneOfMany(bounds=[0, 1])
testSet._convertToOneOfMany(bounds=[0, 1])

if(camada2==0):
	net = buildNetwork(trainSet.indim,camada1,trainSet.outdim, recurrent = True)
else :
	net = buildNetwork(trainSet.indim,camada1,camada2,trainSet.outdim, recurrent = True)
trainer = BackpropTrainer(net,dataset = trainSet,learningrate = Learning,momentum = Momentum, verbose = True)
trainer.trainOnDataset(trainSet,Ciclos)

out = net.activateOnDataset(testSet)
out = out.argmax(axis=1) 

acerto = total = i = 0
for data in testSet:
	if data[1][0] == 1 and out[i] == 0:
		acerto += 1
		total += 1
	elif data[1][1] == 1 and out[i] == 1:
		acerto += 1
		total += 1
	elif data[1][2] == 1 and out[i] == 2:
		acerto += 1
		total += 1
Ejemplo n.º 45
0
def train_separate_nets(data_set,
                        test_data,
                        arousal_net,
                        valence_net,
                        epochs=1):
    num_inputs = 4
    arousal_ds = SupervisedDataSet(num_inputs, 1)
    valence_ds = SupervisedDataSet(num_inputs, 1)
    for i in range(len(data_set)):
        try:
            arousal_ds.appendLinked(data_set[i], (data_set[i][1]))
            valence_ds.appendLinked(data_set[i], (data_set[i][2]))
        except:
            continue
    print str(
        len(arousal_ds)) + ' points successfully aquired for arousal analysis'
    print str(
        len(valence_ds)) + ' points successfully aquired for valence analysis'

    arousal_trainer = BackpropTrainer(arousal_net,
                                      learningrate=0.05,
                                      momentum=0.08,
                                      verbose=True)
    valence_trainer = BackpropTrainer(valence_net,
                                      learningrate=0.01,
                                      momentum=0.05,
                                      verbose=True)

    arousal_trainer.trainOnDataset(arousal_ds)
    valence_trainer.trainOnDataset(valence_ds)
    mean_internal_errors = []
    mean_errors = []

    for j in range(epochs / 50):
        arousal_trainer.trainEpochs(50)
        valence_trainer.trainEpochs(50)
        print str((j + 1) * 50) + '/' + str(epochs) + ' complete'
        sq_arousal_errors = [(arousal_net.activate(datum[0]) - datum[1])**2
                             for datum in test_data]
        sq_valence_errors = [(valence_net.activate(datum[0]) - datum[2])**2
                             for datum in test_data]
        errors = [
            sqrt(sq_arousal_errors[i] + sq_valence_errors[i])
            for i in range(len(sq_arousal_errors))
        ]
        mean_errors.append(np.mean(errors))

        sq_arousal_errors = [
            (arousal_net.activate(data_set[i][0]) - data_set[i][1])**2
            for i in range(len(data_set))
        ]
        sq_valence_errors = [
            (valence_net.activate(data_set[i][0]) - data_set[i][2])**2
            for i in range(len(data_set))
        ]
        errors = [
            sqrt(sq_arousal_errors[i] + sq_valence_errors[i])
            for i in range(len(sq_arousal_errors))
        ]
        mean_internal_errors.append(np.mean(errors))

    return arousal_net, valence_net, mean_errors, mean_internal_errors
Ejemplo n.º 46
0
 
alldata.addSample([1,1],[1])
alldata.addSample([1,1],[1])
alldata.addSample([1,1],[1])
alldata.addSample([1,1],[1])
alldata.addSample([1,1],[1])

tstdata, trndata = alldata.splitWithProportion( 0.25 )
trndata._convertToOneOfMany( )
tstdata._convertToOneOfMany( )
 
#We can also examine the dataset
print "Number of training patterns: ", len(trndata)
print "Input and output dimensions: ", trndata.indim, trndata.outdim
print "First sample (input, target, class):"
print trndata['input'][0], trndata['target'][0], trndata['class'][0]

fnn     = buildNetwork( trndata.indim, 5, trndata.outdim, recurrent=False )
trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01 )

# I am not sure about this, I don't think my production code is implemented like this
modval = ModuleValidator()
for i in range(1000):
      trainer.trainEpochs(1)
      trainer.trainOnDataset(dataset=trndata)
      cv = CrossValidator( trainer, trndata, n_folds=5, valfunc=modval.MSE )
      print "MSE %f @ %i" %( cv.validate(), i )

print tstdata
print ">", trainer.testOnClassData(dataset=tstdata)
# dividindo os dados para teste e validação
test_data, val_data = part_data.splitWithProportion(0.5)
print('Quantidade para teste: %d' % len(test_data))
print('Quantidade para validação: %d' % len(val_data))

from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer

net = buildNetwork(datasets.indim, 3, datasets.outdim)
trainer = BackpropTrainer(net,
                          dataset=train_data,
                          learningrate=0.01,
                          momentum=0.1,
                          verbose=True)

train_erros, val_erros = trainer.trainUntilConvergence(dataset=train_data,
                                                       maxEpochs=100)

import matplotlib.pyplot as plt

plt.plot(train_erros, 'b', val_erros, 'r')
plt.show()
print(trainer.totalepochs)

# OUtra forma de treinar
trainer.trainOnDataset(train_data, 500)

out = net.activateOnDataset(test_data)
for i in range(len(out)):
    print('out: %f, correct: %f' % (out[i], test_data['target'][i]))
Ejemplo n.º 48
0
import pybrain
from pybrain.datasets import *
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
import pickle

dataset_and = SupervisedDataSet(2, 1)
dataset_and.addSample( (0,0) , (0,))
dataset_and.addSample( (0,1) , (0,))
dataset_and.addSample( (1,0) , (0,))
dataset_and.addSample( (1,1) , (1,))

net_and = buildNetwork(2, 4, 1, bias=True)

trainer_and = BackpropTrainer(net_and, learningrate = 0.01, momentum = 0.99)
trainer_and.trainOnDataset(dataset_and, 3000)
trainer_and.testOnData(verbose=True)


######################################################
dataset_or = SupervisedDataSet(2, 1)
dataset_or.addSample( (0,0) , (0,))
dataset_or.addSample( (0,1) , (1,))
dataset_or.addSample( (1,0) , (1,))
dataset_or.addSample( (1,1) , (1,))

net_or = buildNetwork(2, 4, 1, bias=True)

trainer_or = BackpropTrainer(net_or, learningrate = 0.01, momentum = 0.99)
trainer_or.trainOnDataset(dataset_or, 3000)
trainer_or.testOnData(verbose=True)
Ejemplo n.º 49
0
from pybrain.supervised.trainers import BackpropTrainer
import pickle

if __name__ == "__main__":
  ds = SupervisedDataSet(2, 1)
  ds.addSample( (0,0) , (1,))
  ds.addSample( (0,1) , (0,))
  ds.addSample( (1,0) , (0,))
  ds.addSample( (1,1) , (0,))

  net = buildNetwork(2, 4, 4, 1, bias=True)

  # try:
        #         f = open('_learned', 'r')
  #   net = pickle.load(f)
  #   f.close()
  # except:
  trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
  trainer.trainOnDataset(ds, 3000)
  trainer.testOnData()
  # f = open('_learned', 'w')
  # pickle.dump(net, f)
  # f.close()
  

  print net.activate((1,1))
  print net.activate((1,0))
  print net.activate((0,1))
  print net.activate((0,0))

Ejemplo n.º 50
0
def group_into_sets(speed_limits, waze_speeds, drive_speeds):
    data = []

    for i in range(len(waze_speeds)):
        datum = {}
        datum["order"] = i
        datum["speed_limit"] = speed_limits[i]
        datum["waze_speed"] = waze_speeds[i]
        datum["drive_speed"] = drive_speeds[i]
        print datum
        data.append(datum)

    ranges = []
    for i in range(6):
        ranges.append([])

    for i in range(len(data)):
        datum = data[i]
        waze_speed = datum["waze_speed"]
        if (waze_speed < 10):
            ranges[0].append(datum)
        elif (waze_speed < 20):
            ranges[1].append(datum)
        elif (waze_speed < 30):
            ranges[2].append(datum)
        elif (waze_speed < 40):
            ranges[3].append(datum)
        elif (waze_speed < 50):
            ranges[4].append(datum)
        else:
            ranges[5].append(datum)

    unsorted_data = []

    for group in ranges:
        if (len(group) > 0):
            ds = SupervisedDataSet(1, 1)

            for i in range(len(group)):
                print group[i]
                indata = group[i]["waze_speed"]
                outdata = group[i]["drive_speed"]
                ds.addSample(indata, outdata)

            n = buildNetwork(ds.indim, 8, 8, ds.outdim, recurrent=True)
            t = BackpropTrainer(n,
                                learningrate=0.001,
                                momentum=0.05,
                                verbose=True)
            t.trainOnDataset(ds, 1000)
            t.testOnData(verbose=True)

            for i in range(len(group)):
                group[i]["predicted_speed"] = n.activate(
                    group[i]["waze_speed"])
                unsorted_data.append(group[i])

    predicted_speeds = []

    for i in range(len(unsorted_data)):
        for datum in unsorted_data:
            if (datum["order"] == i):
                predicted_speeds.append(datum["predicted_speed"])
                break

    return predicted_speeds
Ejemplo n.º 51
0
net.addInputModule(inp) 
net.addModule(h1)
net.addModule(bias)

# create connections 
net.addConnection(IdentityConnection(inp, h1)) 
net.addConnection(FullConnection(h1, outp))
#net.addConnection(FullConnection(bias, outp))
#net.addConnection(FullConnection(bias, h1))

# finish up 
net.sortModules()

# initialize the backprop trainer and train 
trainer = BackpropTrainer(net, ds, momentum=.99, learningrate=0.01)
trainer.trainOnDataset(ds,10)
trainer.testOnData(verbose=True)

print 'Final weights:',net.params
		
print net

test_x = test_x[0]
preds_y = []
index_inpoints = size_ls

for i in range(len(xt)):
	pred_y = net.activate(test_x)
	
	test_x = test_x[1:]
	test_x.append(pred_y[0])
DS = SupervisedDataSet( 1, 1 )

#Append Linked: x,y
for i in range(len(df)):
    DS.appendLinked((x_norm.ix[i]),(y_norm.ix[i]))


# #### Build and use the Trainer

# In[ ]:

#Learning Rate shouldnt be bigger than 0.01
t = BackpropTrainer(fnn, learningrate = 0.01, momentum = 0.99, verbose = True,lrdecay=0.9999)

#Training on the DataSet with 1500 epochs
t.trainOnDataset(DS, 1500)


# # Model Evaluation

# In[ ]:

y_pred = fnn.activateOnDataset(DS)
#"DeNormalize" Again to turn the data into the original monetary value
y_pred = y_pred * wy


# In[ ]:

#Create the DataSet for a RegressionLine
Ejemplo n.º 53
0
def train_callback():
        trainer = BackpropTrainer(net, learningrate=0.01, momentum=0.0, verbose=True)
	print 'MSE before', trainer.testOnData(ds, verbose=True)
	trainer.trainOnDataset(ds, 2000)
	print 'MSE after', trainer.testOnData(ds, verbose=True)
Ejemplo n.º 54
0
alldata.addSample([-1, -1], [0])
alldata.addSample([-1, -1], [0])
alldata.addSample([-1, -1], [0])
alldata.addSample([-1, -1], [0])

alldata.addSample([1, 1], [1])
alldata.addSample([1, 1], [1])
alldata.addSample([1, 1], [1])
alldata.addSample([1, 1], [1])
alldata.addSample([1, 1], [1])

tstdata, trndata = alldata.splitWithProportion(0.25)
trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()

# We can also examine the dataset
print "Number of training patterns: ", len(trndata)
print "Input and output dimensions: ", trndata.indim, trndata.outdim
print "First sample (input, target, class):"
print trndata['input'][0], trndata['target'][0], trndata['class'][0]

fnn     = buildNetwork( trndata.indim, 5, trndata.outdim, recurrent=False )
trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01 )

# I am not sure about this, I don't think my production code is implemented like this
modval = ModuleValidator()
trainer.trainEpochs(20)
trainer.trainOnDataset(dataset=trndata)
cv = CrossValidator( trainer, trndata, n_folds=5, valfunc=modval.MSE )
print "MSE %f" %( cv.validate() )
Ejemplo n.º 55
0
out_len = 10
NET_SHAPE = (inp_len, inp_len // 2, inp_len // 4, out_len)
train_err = [0] * len(offsets)
test_err = [0] * len(offsets)

print 'training_set_max_size:', train_size, '\n'

for i, o in enumerate(offsets):
    print 'learning a neural net with training_set_size=' + str(o)
    print 'getting data',
    cifar_data = datasets.cifar_nn(offset=o)
    print 'building net',
    net = buildNetwork(*NET_SHAPE)
    print 'training',
    trainer = BackpropTrainer(net, cifar_data['train_nn'])
    trainer.trainOnDataset(cifar_data['train_nn'], 5)
    print 'validating'
    train_err[i] = mean_squared_error(cifar_data['train']['labels'], [
        net.activate(cifar_data['train']['data'][i])
        for k in xrange(len(cifar_data['train']['data']))
    ])
    test_err[i] = mean_squared_error(cifar_data['test']['labels'], [
        net.activate(cifar_data['test']['data'][i])
        for k in xrange(len(cifar_data['test']['data']))
    ])
    print 'train_err: ' + str(train_err[i])
    print 'test_err: ' + str(test_err[i])
    print '---'

# Plot results
print 'plotting results'
Ejemplo n.º 56
0
tst_ds, trn_ds = ds.splitWithProportion(0.2)

# print "train data"
# for inpt, target in trn_ds:
#     print inpt, target

# print "test data"
# for inpt, target in tst_ds:
#     print inpt, target

# More information about trainers: http://pybrain.org/docs/api/supervised/trainers.html

print "Training started"

trainer.trainOnDataset(trn_ds, 10)

# trainer.trainUntilConvergence(trn_ds, maxEpochs=100, verbose=True, continueEpochs=10, validationProportion=0.25)

# trainer.testOnData(tst_ds, verbose=True)


# exercise: how to get network error value for a given epoch?

# from: http://stackoverflow.com/questions/8150772/pybrain-how-to-print-a-network-nodes-and-weights
def display_net(net):
    for mod in net.modules:
        print("Module:", mod.name)
        if mod.paramdim > 0:
            print("--parameters:", mod.params)
        for conn in net.connections[mod]: