Exemple #1
0
 def __init__(self, H = 3, d = 1, ny = 1, M = 3, debug_output = False):
   """
   H: number of hidden units
   d: number of inputs
   ny: number of outputs
   M: number of mixture components
   """
   self.c = ny
   ny = 2*M + M * ny # M mixing coefficients + M variances + M*ny means
   self.M = M
   self.count_fwd = 0
   TLP.__init__(self, H, d, ny, linear_output = True, error_function = 'mdn', 
                  debug_output = debug_output)
Exemple #2
0
 def __init__(self, H = 3, d = 1, ny = 1, T = 100):
     """
     Create a fully-connected neural network with one hidden recurrent layer .
     @param H: number of hidden units
     @param ny: number of output units
     @param T: number of time-steps
     """
     TLP.__init__(self, H, d, ny)
     self.T = T
     z = np.zeros([T, H+1]) # hidden unit activations + bias
     self.z = z[None, :]
     self.z[:,:,0] = 1 # set extra bias unit to one
     
     dj = np.zeros([T, H+1])
     self.dj = dj[None, :]
     
     # init recurrent weights
     self.wh = np.random.normal(loc=0.0, scale = 1,size=[H, H]) / np.sqrt(H) # TODO: check?
     self.Nwh = H**2
Exemple #3
0
 def _forward(self, x, w1 = None, w2 = None):
   # perform forward propagation    
   #t0=datetime.now()
   y = TLP._forward(self, x, w1, w2)
   
   #self.count_fwd = self.count_fwd + 1
   
   # the outputs are ordered as follows:
   # y[0:M]:       M mixing coefficients alpha
   # y[M:2*M]:     M kernel widths sigma
   # y[2*M:M*c]:   M x c kernel centre components
   M = int(self.M)
   # calculate mixing coefficients, variances and means from network output
   y[0:M] = self.softmax(y[0:M]) # alpha
   # avoid overflow
   y[M:2*M] = np.minimum(y[M:2*M], np.log(np.finfo(float).max))
   y[M:2*M] = np.exp(y[M:2*M]) # sigma
   # avoid underflow
   y[M:2*M] = np.maximum(y[M:2*M], np.finfo(float).eps)
   #print (datetime.now()-t0).microseconds
   return y
Exemple #4
0
 def __init__(self, H = 3, d = 1, ny = 1):
   TLP.__init__(self, H, d, ny, linear_output = True, error_function = 'bayes')