示例#1
0
def adagrad_momentum(grads,
                     params,
                     learning_rate=1.0,
                     momentum=0.9,
                     epsilon=1e-06):
    return apply_nesterov_momentum(adagrad(grads, params, learning_rate,
                                           epsilon),
                                   params=params,
                                   momentum=momentum)
示例#2
0
def adadelta_momentum(grads,
                      params,
                      learning_rate=1.0,
                      momentum=0.9,
                      rho=0.95,
                      epsilon=1e-06):
    return apply_nesterov_momentum(adadelta(grads, params, learning_rate, rho,
                                            epsilon),
                                   params=params,
                                   momentum=momentum)
 def _create_functions(self):
     self._update_func = self._update(self._get_loss_function(),
                                      self._get_all_trainable_params(),
                                      self._learning_rate)
     if self._momentum != 0:
         self._update_func = apply_nesterov_momentum(
             self._update_func, self._get_all_trainable_params(),
             self._momentum)
     self._loss_func = self._get_loss_function()
     self._train_function = theano.function([self.t_input, self.t_label],
                                            self._get_loss_function(),
                                            updates=self._update_func)
 def rmsprop_nesterov(loss, params, eta=1e-3, alpha=0.9, **kwargs):
     rms = updt.rmsprop(loss, params, learning_rate=eta, **kwargs)
     return updt.apply_nesterov_momentum(rms, params, momentum=alpha)
l5=Conv2DLayer(l4,96,(5,5),nonlinearity=very_leaky_rectify,W=GlorotUniform('relu'))
l6=MaxPool2DLayer(l5,(3,3))
l7=DenseLayer(l6,512,nonlinearity=very_leaky_rectify,W=lasagne.init.GlorotNormal())
#l7_5=cyclicpool(l7)
#l7_5=lasagne.layers.DropoutLayer(l7)
l8=DenseLayer(l7,2,nonlinearity=softmax)



rate=theano.shared(.0002)
params = lasagne.layers.get_all_params(l8)
prediction = lasagne.layers.get_output(l8)
loss = lasagne.objectives.categorical_crossentropy(prediction,y1)
loss = loss.mean()
updates_sgd = adagrad(loss, params, learning_rate=rate)
updates = apply_nesterov_momentum(updates_sgd, params, momentum=0.9)


train_model = theano.function([x1,y1],outputs=loss,updates=updates)

pred = theano.function([x1,y1],outputs=lasagne.objectives.categorical_crossentropy(prediction,y1))

# pred=theano.function([x1,y1],outputs=prediction,on_unused_input='ignore')

### begin to train
renewtrain=len(train_x)/batchsize
renewtest=len(test_x)/batchsize
for i in range(15000):
    if i>325 and i<3000:
        rate.set_value(.001)
    elif i>6500 and i<15000:
 def _create_functions(self):
     self._update_func = self._update(self._get_loss_function(), self._get_all_trainable_params(), self._learning_rate)
     if self._momentum != 0:
         self._update_func = apply_nesterov_momentum(self._update_func, self._get_all_trainable_params(), self._momentum)
     self._loss_func = self._get_loss_function()
     self._train_function = theano.function([self.t_input, self.t_label], self._get_loss_function(), updates=self._update_func)