Ejemplo n.º 1
0
  def __init__(self, inputs, outputs, numHiddenNeurons, activationFunction, LN=True,
               outputWeightForgettingFactor=0.999,
               inputWeightForgettingFactor=0.999, AE=True, ORTH=False):


    self.activationFunction = activationFunction
    self.inputs = inputs
    self.outputs = outputs
    self.numHiddenNeurons = numHiddenNeurons

    # input to hidden weights
    self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
    # bias of hidden units
    #self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1

    self.bias = np.zeros([1,self.numHiddenNeurons])
    # hidden to output layer connection
    self.beta = np.random.random((self.numHiddenNeurons, self.outputs))

    # auxiliary matrix used for sequential learning
    self.M = None
    self.LN = LN
    self.AE = AE
    self.ORTH = ORTH
    self.forgettingFactor =outputWeightForgettingFactor
    self.FOSELM_AE = FOSELM(inputs= inputs,
                            outputs = inputs,
                            numHiddenNeurons= numHiddenNeurons,
                            activationFunction= activationFunction,
                            LN=True,
                            forgettingFactor=inputWeightForgettingFactor,
                            ORTH=self.ORTH)
    def __init__(self,
                 inputs,
                 outputs,
                 numHiddenNeurons,
                 activationFunction,
                 LN=True,
                 AE=True,
                 ORTH=True,
                 inputWeightForgettingFactor=0.999,
                 outputWeightForgettingFactor=0.999,
                 hiddenWeightForgettingFactor=0.999):

        self.activationFunction = activationFunction
        self.inputs = inputs
        self.outputs = outputs
        self.numHiddenNeurons = numHiddenNeurons

        # input to hidden weights
        self.inputWeights = np.random.random(
            (self.numHiddenNeurons, self.inputs))
        # hidden layer to hidden layer wieghts
        self.hiddenWeights = np.random.random(
            (self.numHiddenNeurons, self.numHiddenNeurons))
        # initial hidden layer activation
        self.initial_H = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
        self.H = self.initial_H
        self.LN = LN
        self.AE = AE
        self.ORTH = ORTH
        # bias of hidden units
        self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
        # hidden to output layer connection
        self.beta = np.random.random((self.numHiddenNeurons, self.outputs))

        # auxiliary matrix used for sequential learning
        self.M = inv(0.00001 * np.eye(self.numHiddenNeurons))

        self.forgettingFactor = outputWeightForgettingFactor

        self.trace = 0
        self.thresReset = 0.001

        if self.AE:
            self.inputAE = FOSELM(inputs=inputs,
                                  outputs=inputs,
                                  numHiddenNeurons=numHiddenNeurons,
                                  activationFunction=activationFunction,
                                  LN=LN,
                                  forgettingFactor=inputWeightForgettingFactor,
                                  ORTH=ORTH)

            self.hiddenAE = FOSELM(inputs=numHiddenNeurons,
                                   outputs=numHiddenNeurons,
                                   numHiddenNeurons=numHiddenNeurons,
                                   activationFunction=activationFunction,
                                   LN=LN,
                                   ORTH=ORTH)
    def __init__(self, inputs, outputs, numHiddenNeurons, activationFunction, min_t, max_t, LN=True, AE=True, ORTH=True,
                 inputWeightForgettingFactor=0.999,
                 outputWeightForgettingFactor=0.999,
                 hiddenWeightForgettingFactor=0.999):

        self.min_t = min_t
        self.max_t = max_t
        self.activationFunction = activationFunction
        self.inputs = inputs
        self.outputs = outputs
        self.numHiddenNeurons = numHiddenNeurons

        # input to hidden weights
        self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
        # hidden layer to hidden layer weights
        self.hiddenWeights = np.random.random((self.numHiddenNeurons, self.numHiddenNeurons))
        # initial hidden layer activation
        self.initial_H = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
        self.H = self.initial_H

        self.LN = LN
        self.AE = AE
        self.ORTH = ORTH

        # bias of hidden units
        self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1

        self.forgettingFactor = outputWeightForgettingFactor

        self.FFmin = 0.9
        self.FFmax = 0.999

        self.trace = 0
        self.thresReset = 0.001

        if self.AE:
            self.inputAE = FOSELM(inputs=inputs,
                                  outputs=inputs,
                                  numHiddenNeurons=numHiddenNeurons,
                                  activationFunction=activationFunction,
                                  LN=LN,
                                  forgettingFactor=inputWeightForgettingFactor,
                                  ORTH=ORTH
                                  )

            self.hiddenAE = FOSELM(inputs=numHiddenNeurons,
                                   outputs=numHiddenNeurons,
                                   numHiddenNeurons=numHiddenNeurons,
                                   activationFunction=activationFunction,
                                   LN=LN,
                                   ORTH=ORTH
                                   )
Ejemplo n.º 4
0
class NAOSELM(object):
  def __init__(self, inputs, outputs, numHiddenNeurons, activationFunction, LN=True,
               outputWeightForgettingFactor=0.999,
               inputWeightForgettingFactor=0.999, AE=True, ORTH=False):


    self.activationFunction = activationFunction
    self.inputs = inputs
    self.outputs = outputs
    self.numHiddenNeurons = numHiddenNeurons

    # input to hidden weights
    self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
    # bias of hidden units
    #self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1

    self.bias = np.zeros([1,self.numHiddenNeurons])
    # hidden to output layer connection
    self.beta = np.random.random((self.numHiddenNeurons, self.outputs))

    # auxiliary matrix used for sequential learning
    self.M = None
    self.LN = LN
    self.AE = AE
    self.ORTH = ORTH
    self.forgettingFactor =outputWeightForgettingFactor
    self.FOSELM_AE = FOSELM(inputs= inputs,
                            outputs = inputs,
                            numHiddenNeurons= numHiddenNeurons,
                            activationFunction= activationFunction,
                            LN=True,
                            forgettingFactor=inputWeightForgettingFactor,
                            ORTH=self.ORTH)


  def layerNormalization(self, H, scaleFactor=1, biasFactor=0):

    H_normalized = (H-H.mean())/(np.sqrt(H.var() + 0.00001))
    H_normalized = scaleFactor*H_normalized+biasFactor

    return H_normalized

  def __calculateInputWeightsUsingAE(self, features):
    self.FOSELM_AE.train(features=features,targets=features)
    return self.FOSELM_AE.beta

  def calculateHiddenLayerActivation(self, features):
    """
    Calculate activation level of the hidden layer
    :param features feature matrix with dimension (numSamples, numInputs)
    :return: activation level (numSamples, numHiddenNeurons)
    """
    if self.AE:
      self.inputWeights = self.__calculateInputWeightsUsingAE(features)
    if self.activationFunction is "sig":
      V = linear(features, self.inputWeights,self.bias)
      if self.LN:
        V = self.layerNormalization(V)
      H = sigmoidActFunc(V)
    else:
      print " Unknown activation function type"
      raise NotImplementedError

    return H

  def initializePhase(self, lamb=0.0001):
    """
    Step 1: Initialization phase
    :param features feature matrix with dimension (numSamples, numInputs)
    :param targets target matrix with dimension (numSamples, numOutputs)
    """

    self.FOSELM_AE.initializePhase(lamb=lamb)
    self.M = inv(lamb*np.eye(self.numHiddenNeurons))
    self.beta = np.zeros([self.numHiddenNeurons,self.outputs])


  def train(self, features, targets,VFF_RLS=False):
    """
    Step 2: Sequential learning phase
    :param features feature matrix with dimension (numSamples, numInputs)
    :param targets target matrix with dimension (numSamples, numOutputs)
    """
    (numSamples, numOutputs) = targets.shape
    assert features.shape[0] == targets.shape[0]

    H = self.calculateHiddenLayerActivation(features)
    Ht = np.transpose(H)

    try:
      scale = 1/(self.forgettingFactor)
      self.M = scale*self.M - np.dot(scale*self.M,
                       np.dot(Ht, np.dot(
          pinv(np.eye(numSamples) + np.dot(H, np.dot(scale*self.M, Ht))),
          np.dot(H, scale*self.M))))

      #self.beta = (self.forgettingFactor)*self.beta + np.dot(self.M, np.dot(Ht, targets - np.dot(H, (self.forgettingFactor)*self.beta)))
      self.beta = self.beta + np.dot(self.M, np.dot(Ht, targets - np.dot(H, self.beta)))

    except np.linalg.linalg.LinAlgError:
      print "SVD not converge, ignore the current training cycle"
    # else:
    #   raise RuntimeError

  def predict(self, features):
    """
    Make prediction with feature matrix
    :param features: feature matrix with dimension (numSamples, numInputs)
    :return: predictions with dimension (numSamples, numOutputs)
    """
    H = self.calculateHiddenLayerActivation(features)
    prediction = np.dot(H, self.beta)
    return prediction
class ORELM(object):
  def __init__(self, inputs, outputs, numHiddenNeurons, activationFunction, LN=True, AE=True, ORTH=True,
               inputWeightForgettingFactor=0.999,
               outputWeightForgettingFactor=0.999,
               hiddenWeightForgettingFactor=0.999):

    self.activationFunction = activationFunction
    self.inputs = inputs
    self.outputs = outputs
    self.numHiddenNeurons = numHiddenNeurons

    # input to hidden weights
    self.inputWeights  = np.random.random((self.numHiddenNeurons, self.inputs))
    # hidden layer to hidden layer wieghts
    self.hiddenWeights = np.random.random((self.numHiddenNeurons, self.numHiddenNeurons))
    # initial hidden layer activation
    self.initial_H = np.random.random((1, self.numHiddenNeurons)) * 2 -1
    self.H = self.initial_H
    self.LN = LN
    self.AE = AE
    self.ORTH = ORTH
    # bias of hidden units
    self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
    # hidden to output layer connection
    self.beta = np.random.random((self.numHiddenNeurons, self.outputs))

    # auxiliary matrix used for sequential learning
    self.M = inv(0.00001 * np.eye(self.numHiddenNeurons))

    self.forgettingFactor = outputWeightForgettingFactor

    self.trace=0
    self.thresReset=0.001


    if self.AE:
      self.inputAE = FOSELM(inputs = inputs,
                            outputs = inputs,
                            numHiddenNeurons = numHiddenNeurons,
                            activationFunction = activationFunction,
                            LN= LN,
                            forgettingFactor=inputWeightForgettingFactor,
                            ORTH = ORTH
                            )

      self.hiddenAE = FOSELM(inputs = numHiddenNeurons,
                             outputs = numHiddenNeurons,
                             numHiddenNeurons = numHiddenNeurons,
                             activationFunction=activationFunction,
                             LN= LN,
                             ORTH = ORTH
                             )



  def layerNormalization(self, H, scaleFactor=1, biasFactor=0):

    H_normalized = (H-H.mean())/(np.sqrt(H.var() + 0.000001))
    H_normalized = scaleFactor*H_normalized+biasFactor

    return H_normalized

  def __calculateInputWeightsUsingAE(self, features):
    self.inputAE.train(features=features,targets=features)
    return self.inputAE.beta

  def __calculateHiddenWeightsUsingAE(self, features):
    self.hiddenAE.train(features=features,targets=features)
    return self.hiddenAE.beta

  def calculateHiddenLayerActivation(self, features):
    """
    Calculate activation level of the hidden layer
    :param features feature matrix with dimension (numSamples, numInputs)
    :return: activation level (numSamples, numHiddenNeurons)
    """
    if self.activationFunction is "sig":

      if self.AE:
        self.inputWeights = self.__calculateInputWeightsUsingAE(features)

        self.hiddenWeights = self.__calculateHiddenWeightsUsingAE(self.H)

      V = linear_recurrent(features=features,
                           inputW=self.inputWeights,
                           hiddenW=self.hiddenWeights,
                           hiddenA=self.H,
                           bias= self.bias)
      if self.LN:
        V = self.layerNormalization(V)
      self.H = sigmoidActFunc(V)

    else:
      print " Unknown activation function type"
      raise NotImplementedError
    return self.H


  def initializePhase(self, lamb=0.0001):
    """
    Step 1: Initialization phase
    :param features feature matrix with dimension (numSamples, numInputs)
    :param targets target matrix with dimension (numSamples, numOutputs)
    """



    if self.activationFunction is "sig":
      self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
    else:
      print " Unknown activation function type"
      raise NotImplementedError

    self.M = inv(lamb*np.eye(self.numHiddenNeurons))
    self.beta = np.zeros([self.numHiddenNeurons,self.outputs])

    # randomly initialize the input->hidden connections
    self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
    self.inputWeights = self.inputWeights * 2 - 1

    if self.AE:
     self.inputAE.initializePhase(lamb=0.00001)
     self.hiddenAE.initializePhase(lamb=0.00001)
    else:
      # randomly initialize the input->hidden connections
      self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
      self.inputWeights = self.inputWeights * 2 - 1

      if self.ORTH:
        if self.numHiddenNeurons > self.inputs:
          self.inputWeights = orthogonalization(self.inputWeights)
        else:
          self.inputWeights = orthogonalization(self.inputWeights.transpose())
          self.inputWeights = self.inputWeights.transpose()

      # hidden layer to hidden layer wieghts
      self.hiddenWeights = np.random.random((self.numHiddenNeurons, self.numHiddenNeurons))
      self.hiddenWeights = self.hiddenWeights * 2 - 1
      if self.ORTH:
        self.hiddenWeights = orthogonalization(self.hiddenWeights)

  def reset(self):
    self.H = self.initial_H

  def train(self, features, targets,RESETTING=False):
    """
    Step 2: Sequential learning phase
    :param features feature matrix with dimension (numSamples, numInputs)
    :param targets target matrix with dimension (numSamples, numOutputs)
    """
    (numSamples, numOutputs) = targets.shape
    assert features.shape[0] == targets.shape[0]

    H = self.calculateHiddenLayerActivation(features)
    Ht = np.transpose(H)
    try:
      scale = 1/(self.forgettingFactor)
      self.M = scale*self.M - np.dot(scale*self.M,
                       np.dot(Ht, np.dot(
          pinv(np.eye(numSamples) + np.dot(H, np.dot(scale*self.M, Ht))),
          np.dot(H, scale*self.M))))
      if RESETTING:
        beforeTrace=self.trace
        self.trace=self.M.trace()
        print np.abs(beforeTrace - self.trace)
        if np.abs(beforeTrace - self.trace) < self.thresReset:
          print self.M
          eig,_=np.linalg.eig(self.M)
          lambMin=min(eig)
          lambMax=max(eig)
          #lamb = (lambMax+lambMin)/2
          lamb = lambMax
          lamb = lamb.real
          self.M= lamb*np.eye(self.numHiddenNeurons)
          print "reset"
          print self.M

      self.beta = (self.forgettingFactor)*self.beta + np.dot(self.M, np.dot(Ht, targets - np.dot(H, (self.forgettingFactor)*self.beta)))
      #self.beta = self.beta + np.dot(self.M, np.dot(Ht, targets - np.dot(H, self.beta)))


    except np.linalg.linalg.LinAlgError:
      print "SVD not converge, ignore the current training cycle"
    # else:
    #   raise RuntimeError

  def predict(self, features):
    """
    Make prediction with feature matrix
    :param features: feature matrix with dimension (numSamples, numInputs)
    :return: predictions with dimension (numSamples, numOutputs)
    """
    H = self.calculateHiddenLayerActivation(features)
    prediction = np.dot(H, self.beta)
    return prediction