コード例 #1
0
    def __init__(self,
                 inputs,
                 outputs,
                 numHiddenNeurons,
                 activationFunction,
                 LN=True,
                 AE=True,
                 ORTH=True,
                 inputWeightForgettingFactor=0.999,
                 outputWeightForgettingFactor=0.999,
                 hiddenWeightForgettingFactor=0.999):

        self.activationFunction = activationFunction
        self.inputs = inputs
        self.outputs = outputs
        self.numHiddenNeurons = numHiddenNeurons

        # input to hidden weights
        self.inputWeights = np.random.random(
            (self.numHiddenNeurons, self.inputs))
        # hidden layer to hidden layer wieghts
        self.hiddenWeights = np.random.random(
            (self.numHiddenNeurons, self.numHiddenNeurons))
        # initial hidden layer activation
        self.initial_H = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
        self.H = self.initial_H
        self.LN = LN
        self.AE = AE
        self.ORTH = ORTH
        # bias of hidden units
        self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
        # hidden to output layer connection
        self.beta = np.random.random((self.numHiddenNeurons, self.outputs))

        # auxiliary matrix used for sequential learning
        self.M = inv(0.00001 * np.eye(self.numHiddenNeurons))

        self.forgettingFactor = outputWeightForgettingFactor

        self.trace = 0
        self.thresReset = 0.001

        if self.AE:
            self.inputAE = FOSELM(inputs=inputs,
                                  outputs=inputs,
                                  numHiddenNeurons=numHiddenNeurons,
                                  activationFunction=activationFunction,
                                  LN=LN,
                                  forgettingFactor=inputWeightForgettingFactor,
                                  ORTH=ORTH)

            self.hiddenAE = FOSELM(inputs=numHiddenNeurons,
                                   outputs=numHiddenNeurons,
                                   numHiddenNeurons=numHiddenNeurons,
                                   activationFunction=activationFunction,
                                   LN=LN,
                                   ORTH=ORTH)
    def __init__(self, inputs, outputs, numHiddenNeurons, activationFunction, min_t, max_t, LN=True, AE=True, ORTH=True,
                 inputWeightForgettingFactor=0.999,
                 outputWeightForgettingFactor=0.999,
                 hiddenWeightForgettingFactor=0.999):

        self.min_t = min_t
        self.max_t = max_t
        self.activationFunction = activationFunction
        self.inputs = inputs
        self.outputs = outputs
        self.numHiddenNeurons = numHiddenNeurons

        # input to hidden weights
        self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
        # hidden layer to hidden layer weights
        self.hiddenWeights = np.random.random((self.numHiddenNeurons, self.numHiddenNeurons))
        # initial hidden layer activation
        self.initial_H = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
        self.H = self.initial_H

        self.LN = LN
        self.AE = AE
        self.ORTH = ORTH

        # bias of hidden units
        self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1

        self.forgettingFactor = outputWeightForgettingFactor

        self.FFmin = 0.9
        self.FFmax = 0.999

        self.trace = 0
        self.thresReset = 0.001

        if self.AE:
            self.inputAE = FOSELM(inputs=inputs,
                                  outputs=inputs,
                                  numHiddenNeurons=numHiddenNeurons,
                                  activationFunction=activationFunction,
                                  LN=LN,
                                  forgettingFactor=inputWeightForgettingFactor,
                                  ORTH=ORTH
                                  )

            self.hiddenAE = FOSELM(inputs=numHiddenNeurons,
                                   outputs=numHiddenNeurons,
                                   numHiddenNeurons=numHiddenNeurons,
                                   activationFunction=activationFunction,
                                   LN=LN,
                                   ORTH=ORTH
                                   )
コード例 #3
0
  def __init__(self, inputs, outputs, numHiddenNeurons, activationFunction, LN=True,
               outputWeightForgettingFactor=0.999,
               inputWeightForgettingFactor=0.999, AE=True, ORTH=False):


    self.activationFunction = activationFunction
    self.inputs = inputs
    self.outputs = outputs
    self.numHiddenNeurons = numHiddenNeurons

    # input to hidden weights
    self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
    # bias of hidden units
    #self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1

    self.bias = np.zeros([1,self.numHiddenNeurons])
    # hidden to output layer connection
    self.beta = np.random.random((self.numHiddenNeurons, self.outputs))

    # auxiliary matrix used for sequential learning
    self.M = None
    self.LN = LN
    self.AE = AE
    self.ORTH = ORTH
    self.forgettingFactor =outputWeightForgettingFactor
    self.FOSELM_AE = FOSELM(inputs= inputs,
                            outputs = inputs,
                            numHiddenNeurons= numHiddenNeurons,
                            activationFunction= activationFunction,
                            LN=True,
                            forgettingFactor=inputWeightForgettingFactor,
                            ORTH=self.ORTH)