Exemple #1
0
 def __init__(self, indim, outdim, nhidden=20):
     FA.__init__(self, indim, outdim)
     
     # learning rate
     self.alpha = 0.1
                                             
     # number of neurons in each layer
     self.indim = indim
     self.nhidden = nhidden
     self.outdim = outdim
     
     # change output activation if task is classification
     self.classification = False
     
     # online training or batch, if batch, then train for that many epochs 
     self.online = False
     self.epochs = 100
     
     # initialize weights randomly (+1 for bias)
     self.hWeights = 0.01 * np.random.random((self.nhidden, self.indim+1)) 
     self.oWeights = 0.01 * np.random.random((self.outdim, self.nhidden+1))
     
     # activations of neurons (sum of inputs)
     self.hActivation = np.zeros((self.nhidden, 1), dtype=float)
     self.oActivation = np.zeros((self.outdim, 1), dtype=float)
     
     # outputs of neurons (after sigmoid function)
     self.iOutput = np.zeros((self.indim+1, 1), dtype=float)      # +1 for bias
     self.hOutput = np.zeros((self.nhidden+1, 1), dtype=float)    # +1 for bias
     self.oOutput = np.zeros((self.outdim, 1), dtype=float)
     
     # deltas for hidden and output layer
     self.hDelta = np.zeros((self.nhidden), dtype=float)
     self.oDelta = np.zeros((self.outdim), dtype=float)   
Exemple #2
0
    def reset(self):
        FA.reset(self)

        # self.network = buildNetwork(self.indim, 2*(self.indim+self.outdim), self.outdim)
        self.network = buildNetwork(self.indim, self.outdim, bias=True)
        self.network._setParameters(random.normal(0, 0.1, self.network.params.shape))
        self.pybdataset = SupervisedDataSet(self.indim, self.outdim)
Exemple #3
0
 def update(self, inp, tgt):
     if self.online:
         # train only one step online on current sample
         self.predict(inp)
         self.backward(tgt)
     else:
         # don't train, only update dataset
         FA.update(self, inp, tgt)
Exemple #4
0
 def reset(self):
     FA.reset(self)
     
     # initialize the LWPR function
     self.lwpr = LWPR(self.indim, self.outdim)     
     self.lwpr.init_D = 10.*np.eye(self.indim)
     self.lwpr.init_alpha = 0.1*np.ones([self.indim, self.indim])
     self.lwpr.meta = True
Exemple #5
0
 def __init__(self, indim, outdim, bayes=True, rbf=False):
     """ initialize function approximator with input and output dimension. """
     self.rbf = rbf
     self.bayes = bayes
     if self.rbf:
         self.numCenters = 20
     else:
         self.numCenters = indim
     FA.__init__(self, indim, outdim)
Exemple #6
0
 def reset(self):
     FA.reset(self)
     self.centers = [np.random.uniform(-1, 1, self.indim) for i in xrange(self.numCenters)]
     self.W = np.random.random((self.numCenters, self.outdim))
     
     # parameters for maximum map
     self.alpha = 100.
     self.SN = np.matrix(self.alpha*np.eye(self.numCenters))
     self.mN = np.matrix(np.zeros((self.numCenters, 1), float))
Exemple #7
0
    def update(self, inp, tgt):
        FA.update(self, inp, tgt)

        inp = self._asFlatArray(inp)
        tgt = self._asFlatArray(tgt)
        self.pybdataset.addSample(inp, tgt)
Exemple #8
0
 def reset(self):
     """ this initializes the function approximator to an initial state,
         forgetting everything it has learned before. """
     FA.reset(self)
     self.matrix = np.random.uniform(-0.1, 0.1, (self.indim + 1, self.outdim))
Exemple #9
0
 def __init__(self, indim, outdim):
     FA.__init__(self, indim, outdim)
     self.filename = None