示例#1
0
文件: mlp.py 项目: stjordanis/CUV
 def applyDeltaWeights(self, dWList, dBList, updateOnlyLast, batchSize):
     if self.useRPROP:
         for i in reversed(xrange(self.NumberOfLayers - 1)):
             cp.rprop(self.Weights[i], dWList[i], self.DeltaWeightsOld[i],
                      self.WeightsLearnRate[i], self.cfg.finetune_cost)
             cp.rprop(self.Bias[i], dBList[i], self.DeltaBiasOld[i],
                      self.BiasLearnRate[i], self.cfg.finetune_cost)
             if updateOnlyLast: break
     else:
         for i in reversed(xrange(self.NumberOfLayers - 1)):
             W, B = self.Weights[i], self.Bias[i]
             dW, dWo = dWList[i], self.DeltaWeightsOld[i]
             dB, dBo = dBList[i], self.DeltaBiasOld[i]
             cp.apply_binary_functor(dW, dWo, cp.binary_functor.XPBY,
                                     self.cfg.finetune_momentum)
             cp.apply_binary_functor(dB, dBo, cp.binary_functor.XPBY,
                                     self.cfg.finetune_momentum)
             cp.learn_step_weight_decay(
                 W, dW, self.cfg.finetune_learnrate / batchSize,
                 self.cfg.finetune_cost)
             cp.learn_step_weight_decay(
                 B, dB, self.cfg.finetune_learnrate / batchSize,
                 self.cfg.finetune_cost)
             cp.copy(dWo, dW)
             cp.copy(dBo, dB)
             if updateOnlyLast: break
示例#2
0
 def setMiniBatch(self, mb, dst_layer):
     self.sampleset_ = mb
     self.sampleset = cp.dev_tensor_float_cm(
         self.sampleset_.astype('float32').copy('F'))
     if hasattr(self, "norm"):
         self.norm(self.sampleset)
     cp.copy(dst_layer, self.sampleset)
示例#3
0
文件: mlp.py 项目: 0rchard/CUV
 def delta_output(self, calculated, correct):
   derivative = cp.dev_tensor_float_cm([calculated.shape[0], correct.shape[1]])
   h = cp.dev_tensor_float_cm(derivative.shape)
   cp.copy(derivative, calculated)
   cp.apply_scalar_functor(derivative, cp.scalar_functor.DSIGM)
   cp.copy(h, correct)
   cp.apply_binary_functor(h, calculated, cp.binary_functor.SUBTRACT)
   cp.apply_binary_functor(derivative, h, cp.binary_functor.MULT)
   h.dealloc()
   return derivative
示例#4
0
文件: mlp.py 项目: stjordanis/CUV
 def delta_output(self, calculated, correct):
     derivative = cp.dev_tensor_float_cm(
         [calculated.shape[0], correct.shape[1]])
     h = cp.dev_tensor_float_cm(derivative.shape)
     cp.copy(derivative, calculated)
     cp.apply_scalar_functor(derivative, cp.scalar_functor.DSIGM)
     cp.copy(h, correct)
     cp.apply_binary_functor(h, calculated, cp.binary_functor.SUBTRACT)
     cp.apply_binary_functor(derivative, h, cp.binary_functor.MULT)
     h.dealloc()
     return derivative
示例#5
0
    def fit(self, input_matrix, teacher_matrix, n_epochs=100, learnrate = 0.10):
        """
        Function to train the network

        @param input_matrix -- matrix consisting of input data
           to the network.
        @param teacher_matrix -- matrix consisting of labels
           of input data.
        @param n_epochs -- number of epochs the network
           is to be trained.

        """
        n_samples = input_matrix.shape[-1]
        squared_errors = cp.dev_tensor_float_cm(self.neuron_layers[-1].deltas.shape)
        for r in xrange(n_epochs):
            print "Epoch ", r + 1, "/", n_epochs
            mse = 0.0
            ce = 0.0
            for batch in xrange(n_samples / self.batch_size):
                index_begin = self.batch_size * batch
                index_end = self.batch_size + index_begin

                # Push input and teacher to GPU memory
                # .copy("F") is needed since memory is non-contiguous
                self.neuron_layers[0].activations = cp.dev_tensor_float_cm(
                    input_matrix[:, index_begin:index_end].copy('F'))
                teacher_batch_host = teacher_matrix[:, index_begin:index_end]
                teacher_batch = cp.dev_tensor_float_cm(teacher_batch_host.copy('F'))

                # Forward-Pass
                for i in xrange(self.n_layers):
                    self.weight_layers[i].forward()

                # calculate error at output layer
                cp.copy(self.neuron_layers[-1].deltas, teacher_batch)
                self.neuron_layers[-1].deltas -= self.neuron_layers[-1].activations
                cp.copy(squared_errors, self.neuron_layers[-1].deltas)
                cp.apply_scalar_functor(squared_errors, cp.scalar_functor.SQUARE)
                mse += cp.sum(squared_errors)
                ce += float(np.sum(np.argmax(teacher_batch_host, axis=0)
                        != np.argmax(self.neuron_layers[-1].activations.np, axis=0)))

                # Backward-Pass
                for i in xrange(self.n_layers):
                    self.weight_layers[self.n_layers - i - 1].backward(learnrate, decay=.01)

                # Don't wait for garbage collector
                teacher_batch.dealloc()
                self.neuron_layers[0].activations.dealloc()

            print "MSE: ",     (mse / n_samples)
            print "Classification Error Training: ", (ce / n_samples)
        squared_errors.dealloc()
示例#6
0
文件: mlp.py 项目: 0rchard/CUV
 def applyDeltaWeights(self, dWList,dBList, updateOnlyLast, batchSize):
   if self.useRPROP:
       for i in reversed(xrange(self.NumberOfLayers-1)):
           cp.rprop(self.Weights[i], dWList[i], self.DeltaWeightsOld[i], self.WeightsLearnRate[i], self.cfg.finetune_cost)
           cp.rprop(self.Bias[i],    dBList[i],    self.DeltaBiasOld[i],    self.BiasLearnRate[i], self.cfg.finetune_cost)
           if updateOnlyLast: break
   else:
       for i in reversed(xrange(self.NumberOfLayers-1)):
           W, B   = self.Weights[i], self.Bias[i]
           dW,dWo = dWList[i], self.DeltaWeightsOld[i]
           dB,dBo = dBList[i], self.DeltaBiasOld[i]
           cp.apply_binary_functor(  dW, dWo, cp.binary_functor.XPBY, self.cfg.finetune_momentum)
           cp.apply_binary_functor(  dB, dBo, cp.binary_functor.XPBY, self.cfg.finetune_momentum)
           cp.learn_step_weight_decay(W, dW,    self.cfg.finetune_learnrate/batchSize, self.cfg.finetune_cost)
           cp.learn_step_weight_decay(B, dB,    self.cfg.finetune_learnrate/batchSize, self.cfg.finetune_cost)
           cp.copy(dWo,dW)
           cp.copy(dBo,dB)
           if updateOnlyLast: break
示例#7
0
 def setMiniBatch(self, mb, dst_layer):
     self.sampleset_ = mb
     self.sampleset  = cp.dev_tensor_float_cm(self.sampleset_.astype('float32').copy('F'))
     if hasattr(self,"norm"):
         self.norm(self.sampleset)
     cp.copy(dst_layer,self.sampleset)