def pretraining_functions(self, train_set_x, batch_size): # index to a [mini]batch index = T.lscalar('index') # index to a minibatch corruption_level = T.scalar('corruption') # % of corruption to use learning_rate = T.scalar('lr') # learning rate to use momentum = T.scalar('momentum') batch_size = batch_size if train_set_x.get_value(borrow=True).shape[0] > batch_size else train_set_x.get_value(borrow=True).shape[0] # number of batches n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size # begining of a batch, given `index` batch_begin = index * batch_size # ending of a batch given `index` batch_end = batch_begin + batch_size pretrain_fns = [] for dA in self.dA_layers: # get the cost and the updates list cost, updates = dA.get_cost_updates(corruption_level, learning_rate, momentum) # compile the theano function fn = theano.function(inputs=[index, theano.Param(corruption_level, default=0.2), theano.Param(learning_rate, default=0.1) ,theano.Param(momentum, default=0.5) ], outputs=cost, updates=updates, givens={self.x: train_set_x[batch_begin:batch_end]}) # append `fn` to the list of functions pretrain_fns.append(fn) return pretrain_fns
def pretraining_function(self, dA, train_set_x, batch_size): # index to a [mini]batch index = T.lscalar('index') # index to a minibatch corruption_level = T.scalar('corruption') # % of corruption to use learning_rate = T.scalar('lr') # learning rate to use momentum = T.scalar('momentum') # number of batches batch_size = batch_size if train_set_x.get_value(borrow=True).shape[0] > batch_size else train_set_x.get_value(borrow=True).shape[0] n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size # begining of a batch, given `index` batch_begin = index * batch_size # ending of a batch given `index` batch_end = batch_begin + batch_size cost, updates = dA.get_cost_updates(corruption_level, learning_rate, momentum, weight_y = self.cfg.settings['weight_y'] , size_of_x = self.cfg.n_ins, size_of_y = self.cfg.n_outs) # compile the theano function fn = theano.function(inputs=[index, theano.Param(corruption_level, default=0.2), theano.Param(learning_rate, default=0.1) ,theano.Param(momentum, default=0.5) ], outputs=cost, updates=updates, givens={dA.x: train_set_x[batch_begin:batch_end]}) return fn
def get_cost_functions(self, train_set_x): # get cost of train_set_x or validation set for each dA layer. cost_fns = [] for dA in self.dA_layers: # get the cost and the updates list cost, updates = dA.get_cost_updates(0, 0) # compile the theano function fn = theano.function(inputs=[], outputs=cost, givens={self.x: train_set_x}) # append `fn` to the list of functions cost_fns.append(fn)