Esempio n. 1
0
    def __init__(self, module, learner=None):
        assert isinstance(module, FeedForwardNetwork)
        assert len(module.outmodules) == 1

        LearningAgent.__init__(self, module, learner)

        # create gaussian layer
        self.explorationlayer = GaussianLayer(self.outdim, name='gauss')
        self.explorationlayer.setSigma([-2] * self.outdim)

        # add gaussian layer to top of network through identity connection
        out = self.module.outmodules.pop()
        self.module.addOutputModule(self.explorationlayer)
        self.module.addConnection(IdentityConnection(out,
                                                     self.module['gauss']))
        self.module.sortModules()

        # tell learner the new module
        self.learner.setModule(self.module)

        # add the log likelihood (loglh) to the dataset and link it to the others
        self.history.addField('loglh', self.module.paramdim)
        self.history.link.append('loglh')
        self.loglh = None
Esempio n. 2
0
 def __init__(self, module, learner = None):
     assert isinstance(module, FeedForwardNetwork)
     assert len(module.outmodules) == 1
     
     LearningAgent.__init__(self, module, learner)
     
     # create gaussian layer
     self.explorationlayer = GaussianLayer(self.outdim, name='gauss')
     self.explorationlayer.setSigma([-2] * self.outdim)
     
     # add gaussian layer to top of network through identity connection
     out = self.module.outmodules.pop()
     self.module.addOutputModule(self.explorationlayer)
     self.module.addConnection(IdentityConnection(out, self.module['gauss']))
     self.module.sortModules()
     
     # tell learner the new module
     self.learner.setModule(self.module)
     
     # add the log likelihood (loglh) to the dataset and link it to the others
     self.history.addField('loglh', self.module.paramdim)
     self.history.link.append('loglh')
     self.loglh = None
Esempio n. 3
0
def exec_algo(xml_file, output_location):
                
        rootObj=ml.parse(xml_file)

        #Getting the root element so that we get the subclasses and its members and member function

        file=open(rootObj.MachineLearning.classification.datafile)

        var_inp=rootObj.MachineLearning.classification.input
        var_out=rootObj.MachineLearning.classification.output
        classes=rootObj.MachineLearning.classification.classes

        DS=ClassificationDataSet(var_inp,var_out,nb_classes=classes)

        for line in file.readlines():
                data=[float(x) for x in line.strip().split(',') if x != '']
                inp=tuple(data[:var_inp])
                output=tuple(data[var_inp:])
                DS.addSample(inp,output)

        split=rootObj.MachineLearning.classification.split
        tstdata,trndata=DS.splitWithProportion(split)
        trdata=ClassificationDataSet(trndata.indim,var_out,nb_classes=classes)
        tsdata=ClassificationDataSet(tstdata.indim,var_out,nb_classes=classes)

        for i in xrange(trndata.getLength()):
            trdata.addSample(trndata.getSample(i)[0],trndata.getSample(i)[1])

        for i in xrange(tstdata.getLength()):
            tsdata.addSample(tstdata.getSample(i)[0],tstdata.getSample(i)[1])


        trdata._convertToOneOfMany()
        tsdata._convertToOneOfMany()

        hiddenNeurons=rootObj.MachineLearning.classification.algorithm.RadialBasisFunctionNetwork.hiddenNeurons
        fnn=FeedForwardNetwork()
        inputLayer=LinearLayer(trdata.indim)
        hiddenLayer=GaussianLayer(hiddenNeurons)
        outputLayer=LinearLayer(trdata.outdim)

        fnn.addInputModule(inputLayer)
        fnn.addModule(hiddenLayer)
        fnn.addOutputModule(outputLayer)

        in_to_hidden=FullConnection(inputLayer,hiddenLayer)
        hidden_to_outputLayer=FullConnection(hiddenLayer,outputLayer)

        fnn.addConnection(in_to_hidden)
        fnn.addConnection(hidden_to_outputLayer)

        fnn.sortModules()
        learningrate=rootObj.MachineLearning.classification.algorithm.RadialBasisFunctionNetwork.learningRate
        momentum=rootObj.MachineLearning.classification.algorithm.RadialBasisFunctionNetwork.momentum
        epochs=rootObj.MachineLearning.classification.algorithm.RadialBasisFunctionNetwork.epochs
        trainer=BackpropTrainer(fnn,dataset=trdata, verbose=True, learningrate=learningrate, momentum=momentum)
        trainer.trainEpochs(epochs=epochs)
        #trainer.train()
        #trainer.trainUntilConvergence(dataset=trdata, maxEpochs=500, verbose=True, continueEpochs=10, validationProportion=0.25)

        trresult=percentError(trainer.testOnClassData(),trdata['class'])

        #testingResult=percentError(trainer.testOnClassData(dataset=tsdata),tsdata['class'])

        #print "Training accuracy : %f , Testing Accuracy: %f" % (100-trresult,100-testingResult)

        print "Training accuracy : %f " % (100-trresult)
        ts=time.time()
        directory = output_location + sep + str(int(ts)) ;
        makedirs(directory)
        fileObject=open(output_location + sep + str(int(ts)) + sep + 'pybrain_RBF','w')
        pickle.dump(trainer,fileObject)
        pickle.dump(fnn,fileObject)
        fileObject.close()
Esempio n. 4
0
class PolicyGradientAgent(LearningAgent):
    """ PolicyGradientAgent is a learning agent, that adds a GaussianLayer to
        its module and stores the log likelihoods (loglh) in the dataset. It is used
        for rllearners like enac, reinforce, gpomdp, ...
    """
    
    def __init__(self, module, learner = None):
        assert isinstance(module, FeedForwardNetwork)
        assert len(module.outmodules) == 1
        
        LearningAgent.__init__(self, module, learner)
        
        # create gaussian layer
        self.explorationlayer = GaussianLayer(self.outdim, name='gauss')
        self.explorationlayer.setSigma([-2] * self.outdim)
        
        # add gaussian layer to top of network through identity connection
        out = self.module.outmodules.pop()
        self.module.addOutputModule(self.explorationlayer)
        self.module.addConnection(IdentityConnection(out, self.module['gauss']))
        self.module.sortModules()
        
        # tell learner the new module
        self.learner.setModule(self.module)
        
        # add the log likelihood (loglh) to the dataset and link it to the others
        self.history.addField('loglh', self.module.paramdim)
        self.history.link.append('loglh')
        self.loglh = None
    
    def enableLearning(self):
        """ activate learning """
        LearningAgent.enableLearning(self)
        self.explorationlayer.enabled = True
    
    def disableLearning(self):
        """ deactivate learning """
        LearningAgent.disableLearning(self)
        self.explorationlayer.enabled = False
        
    def setSigma(self, sigma):
        """ sets variance in the exploration layer """
        assert len(sigma) == self.explorationlayer.paramdim
        # change the parameters of the exploration layer (owner is self.module)
        self.explorationlayer._setParameters(sigma, self.module)
    
    def getSigma(self):
        """ returns the variance from the exploration layer """
        return self.explorationlayer.params
               
    def setParameters(self, params):
        """ sets the parameters of the module """
        self.module._setParameters(params)
        # update parameters for learner
        self.learner.setModule(self.module)
    
    def getAction(self):
        """ calls the LearningAgent getAction method. Additionally, executes a backward pass in the module
            and stores all the derivatives in the dataset. """
        HistoryAgent.getAction(self)
        
        self.lastaction = self.module.activate(self.lastobs).copy()
        self.module.backward()
        self.loglh = self.module.derivs.copy()
        
        d = self.module.derivs
        d *= 0
        self.module.reset()
        return self.lastaction
        
    def giveReward(self, r):
        """ stores observation, action, reward and the log likelihood
            in the history dataset.
            @param r: reward for this timestep 
            @note: this function overwrites HistoryAgent.giveReward(self, r)
        """ 
        assert self.lastobs != None
        assert self.lastaction != None

        # store state, action, r, loglh in dataset
        if self.remember:
            self.history.appendLinked(self.lastobs, self.lastaction, r, self.loglh)

        self.lastobs = None
        self.lastaction = None
Esempio n. 5
0
class PolicyGradientAgent(LearningAgent):
    """ PolicyGradientAgent is a learning agent, that adds a GaussianLayer to
        its module and stores the log likelihoods (loglh) in the dataset. It is used
        for rllearners like enac, reinforce, gpomdp, ...
    """
    def __init__(self, module, learner=None):
        assert isinstance(module, FeedForwardNetwork)
        assert len(module.outmodules) == 1

        LearningAgent.__init__(self, module, learner)

        # create gaussian layer
        self.explorationlayer = GaussianLayer(self.outdim, name='gauss')
        self.explorationlayer.setSigma([-2] * self.outdim)

        # add gaussian layer to top of network through identity connection
        out = self.module.outmodules.pop()
        self.module.addOutputModule(self.explorationlayer)
        self.module.addConnection(IdentityConnection(out,
                                                     self.module['gauss']))
        self.module.sortModules()

        # tell learner the new module
        self.learner.setModule(self.module)

        # add the log likelihood (loglh) to the dataset and link it to the others
        self.history.addField('loglh', self.module.paramdim)
        self.history.link.append('loglh')
        self.loglh = None

    def enableLearning(self):
        """ activate learning """
        LearningAgent.enableLearning(self)
        self.explorationlayer.enabled = True

    def disableLearning(self):
        """ deactivate learning """
        LearningAgent.disableLearning(self)
        self.explorationlayer.enabled = False

    def setSigma(self, sigma):
        """ sets variance in the exploration layer """
        assert len(sigma) == self.explorationlayer.paramdim
        # change the parameters of the exploration layer (owner is self.module)
        self.explorationlayer._setParameters(sigma, self.module)

    def getSigma(self):
        """ returns the variance from the exploration layer """
        return self.explorationlayer.params

    def setParameters(self, params):
        """ sets the parameters of the module """
        self.module._setParameters(params)
        # update parameters for learner
        self.learner.setModule(self.module)

    def getAction(self):
        """ calls the LearningAgent getAction method. Additionally, executes a backward pass in the module
            and stores all the derivatives in the dataset. """
        HistoryAgent.getAction(self)

        self.lastaction = self.module.activate(self.lastobs).copy()
        self.module.backward()
        self.loglh = self.module.derivs.copy()

        d = self.module.derivs
        d *= 0
        self.module.reset()
        return self.lastaction

    def giveReward(self, r):
        """ stores observation, action, reward and the log likelihood
            in the history dataset.
            @param r: reward for this timestep 
            @note: this function overwrites HistoryAgent.giveReward(self, r)
        """
        assert self.lastobs != None
        assert self.lastaction != None

        # store state, action, r, loglh in dataset
        if self.remember:
            self.history.appendLinked(self.lastobs, self.lastaction, r,
                                      self.loglh)

        self.lastobs = None
        self.lastaction = None