Exemple #1
0
  def forward(self,theta, activateIn = True, activateOut = False, signal=None):
#    print self.inputs
#    print 'Node.forward',self.cat# #theta[('composition', '#X#', '(#X#, #X#)', 'I', 'M')][0][:3]
    if activateIn:
      [i.forward(theta, activateIn,activateOut) for i in self.inputs]
    if signal is None:
      self.inputsignal = np.concatenate([c.a for c in self.inputs])
      self.dinputsignal = np.concatenate([c.ad for c in self.inputs])
    else:
      self.inputsignal = signal[0]
      self.dinputsignal = signal[1]

    M= theta[self.cat+('M',)]
    b= theta[self.cat+('B',)]
    if M is None or b is None: raise RuntimeError('Fail to forward node, no matrix and bias vector:'+str(self.cat))
#    print self.cat, M.shape


    try:
      self.z = M.dot(self.inputsignal)+b
      self.a, self.ad = activation.activate(self.z, self.nonlin)
    except:
       print 'problem', self.cat, self.inputsignal.shape, M.shape, b.shape
       self.z = M.dot(self.inputsignal)+b
       self.a, self.ad = activation.activate(self.z, self.nonlin)
    if activateOut:
      for node in self.outputs:
        if node.cat[0]=='reconstruction':
          node.forward(theta, False, False, signal=(self.a,self.ad))
        else:
          node.forward(theta, False, True, signal=None)
Exemple #2
0
def fwdPass(inputs, convolved, filters, conv_bias, pooled, switches):
    #Convolutional NNet stage
    for conv in range(net.conv_layers):        
        for fil in range(net.filter_count):            
            #convolve and activate          
            if conv == 0:
                #for first convolution do it on the input image
                convolved[conv][fil] = act.activate(sp.convolve(inputs, filters[conv][fil][0], mode = 'constant') + conv_bias[conv][fil], net.act_fn_conv[conv])
                
            elif conv > 0: 
                temp = np.zeros(convolved[conv][fil].shape)
                for prv_fil in range(net.filter_count):
                    temp += sp.convolve(pooled[conv-1][prv_fil], filters[conv][fil][prv_fil], mode = 'constant')
                convolved[conv][fil] = act.activate(temp + conv_bias[conv][fil], net.act_fn_conv[conv])
            
            pool.downsample(convolved[conv][fil], pooled[conv][fil], switches[conv][fil])
Exemple #3
0
 def forward(self,theta):
   # recursively collect children's activation
   inputsignal = np.concatenate([child.forward(theta) for child in self.children])
   # compute activation to return
   M= theta[self.cat+'M']
   b= theta[self.cat+'B']
   self.z = M.dot(inputsignal)+b
   # store activation and its gradient for use in backprop
   self.a, self.ad = activation.activate(self.z,self.nonlin)
   return self.a
Exemple #4
0
 def inner(self, theta):
     #    print 'leaf.inner', self.cat
     if self.cat == 'rel': print 'Leaf.inner', self.cat, self.index
     self.innerZ = np.asarray(theta[self.cat + 'IM'][self.index]).flatten()
     # after theta is updated, the wordIM has become a matrix instead of a 2D-array.
     # therefore, the innerZ is of dimension (1,5) rather than (5,) as it used to be.
     # hence the flattening
     self.innerA, self.innerAd = activation.activate(self.innerZ, self.actI)
     #    print self.cat, self.innerZ.shape, self.innerA.shape
     return self.innerA
Exemple #5
0
  def forward(self,theta, activateIn = True, activateOut = False):
#    print 'Leaf.forward', self.cat, self.key#, type(self.key)
    try: self.z = theta[self.cat][self.key]
#     try: self.z = theta[self.cat][self.key]
    except:
      print 'Fail to forward Leaf:', self.cat, self.key, type(self.key)
      sys.exit()

    self.a, self.ad = activation.activate(self.z,self.nonlin)
    if activateOut:
      [i.forward(theta, False,activateOut) for i in self.outputs] #self.outputs.forward(theta, activateIn,activateOut)
Exemple #6
0
def fwdPass(inputs, convolved, filters, conv_bias, pooled, switches):
    #Convolutional NNet stage
    for conv in range(net.conv_layers):
        for fil in range(net.filter_count):
            #convolve and activate
            if conv == 0:
                #for first convolution do it on the input image
                convolved[conv][fil] = act.activate(
                    sp.convolve(inputs, filters[conv][fil][0], mode='constant')
                    + conv_bias[conv][fil], net.act_fn_conv[conv])

            elif conv > 0:
                temp = np.zeros(convolved[conv][fil].shape)
                for prv_fil in range(net.filter_count):
                    temp += sp.convolve(pooled[conv - 1][prv_fil],
                                        filters[conv][fil][prv_fil],
                                        mode='constant')
                convolved[conv][fil] = act.activate(
                    temp + conv_bias[conv][fil], net.act_fn_conv[conv])

            pool.downsample(convolved[conv][fil], pooled[conv][fil],
                            switches[conv][fil])
Exemple #7
0
    def forward(self, theta, activate_in=True):
        if activate_in:
            [i.forward(theta, activate_in) for i in self.inputs]
        self.inputsignal = np.concatenate([c.a for c in self.inputs])
        self.dinputsignal = np.concatenate([c.ad for c in self.inputs])

        M = theta[self.cat + ('M', )]
        b = theta[self.cat + ('B', )]
        if M is None or b is None:
            raise RuntimeError(
                'Fail to forward node, no matrix and bias vector:' +
                str(self.cat))
        self.z = M.dot(self.inputsignal) + b
        self.a, self.ad = activation.activate(self.z, self.nonlin)
Exemple #8
0
    def inner(self, theta):
        #    print 'node.inner', self.cat
        #    inputsignal = np.concatenate([child.inner(theta) for child in self.children])
        inputs = [child.inner(theta) for child in self.children]

        inputsignal = np.concatenate(inputs)
        M = theta[self.cat + 'IM']
        b = theta[self.cat + 'IB']
        try:
            self.innerZ = M.dot(inputsignal) + b
        except:
            print self.cat, ', matrix:', M.shape, ', input:', inputsignal.shape
            for c in self.children:
                print c.cat, c.innerA.shape
            sys.exit()
        self.innerA, self.innerAd = activation.activate(self.innerZ, self.actI)
        return self.innerA
Exemple #9
0
 def outer(self, theta):
     #    print 'node.outer', self.cat
     #    print 'outer called for:', self,  'of cat', self.cat
     if not self.parent:
         self.outerZ = np.zeros_like(theta[self.cat + 'LOB'])
     else:
         inputsignal = self.parent.outerA
         cat = self.parent.cat
         if self.siblingL:
             inputsignal = np.append(self.siblingL.innerA, inputsignal)
             cat += 'R'
         elif self.siblingR:
             inputsignal = np.append(self.siblingR.innerA, inputsignal)
             cat += 'L'
         try:
             M = theta[cat + 'OM']
             b = theta[cat + 'OB']
         except:
             print 'No theta entry for', self.cat, self
             sys.exit()
         self.outerZ = M.dot(inputsignal) + b
     self.outerA, self.outerAd = activation.activate(self.outerZ, self.actO)
     [child.outer(theta) for child in self.children]
Exemple #10
0
 def forward(self,theta):
   self.z = theta[self.cat][self.index]
   self.a, self.ad = activation.activate(self.z,self.nonlin)
   return self.a
Exemple #11
0
 def forward(self, theta, activate_in=True):
     self.z = theta[self.cat][self.key]
     self.a, self.ad = activation.activate(self.z, self.nonlin)
Exemple #12
0
def fwdPass(inputs, receptors, synapses, bias):
    receptors[0] = inputs.reshape(inputs.size)
    for index in xrange(0, net.depth):
        receptors[index + 1] = act.activate(
            synapses[index].dot(receptors[index]) + bias[index + 1],
            net.act_fn[index + 1])