Exemplo n.º 1
0
 def __init__(self, hyperParams):
     self.hP = hyperParams
     
     #Instantiate Layers:
     self.hL = Layers.HiddenLayer(inputSize = self.hP.layerSizes[0], outputSize = self.hP.layerSizes[1], \
                               activationType = self.hP.activations[0])
     self.oL = Layers.OutputLayer(inputSize = self.hP.layerSizes[1], outputSize = self.hP.layerSizes[2], \
                               activationType = self.hP.activations[1])
     
     self.inputSize = self.hL.inputSize
     
     #Initialize Params
     self.params = Params([self.hL, self.oL])
Exemplo n.º 2
0
 def fit(self,
         data,
         labels,
         test_data,
         test_labels,
         learning_rate=0.1,
         n_epochs=250,
         nkerns=[20, 50],
         batch_size=500):
     logger.info('Initialising the classifier')
     rng = numpy.random.RandomState()
     data, labels = Scripts.shared_dataset(data_x=data, data_y=labels)
     test_data, test_labels = Scripts.shared_dataset(data_x=test_data,
                                                     data_y=test_labels)
     if batch_size < 1:
         batch_size = data.get_value(borrow=True).shape[0]
     n_train_batches = data.get_value(borrow=True).shape[0] / batch_size
     n_test_batches = test_data.get_value(borrow=True).shape[0] / batch_size
     logger.info('Constructing the classifier')
     self.layers = []
     self.layers.append(
         Layers.PoolingLayer(rng,
                             input=self.x.reshape((batch_size, 1, 28, 28)),
                             image_shape=(batch_size, 1, 28, 28),
                             filter_shape=(nkerns[0], 1, 5, 5),
                             poolsize=(2, 2)))
     self.layers.append(
         Layers.PoolingLayer(rng,
                             input=self.layers[-1].output,
                             image_shape=(batch_size, nkerns[0], 12, 12),
                             filter_shape=(nkerns[1], nkerns[0], 5, 5),
                             poolsize=(2, 2)))
     self.layers.append(
         Layers.HiddenLayer(rng,
                            input=self.layers[-1].output.flatten(2),
                            n_in=nkerns[1] * 4 * 4,
                            n_out=500,
                            activation=T.tanh))
     self.layers.append(
         Layers.LogisticRegression(input=self.layers[-1].output,
                                   n_in=500,
                                   n_out=10))
     test_givens = {
         self.x:
         test_data[self.index * batch_size:(self.index + 1) * batch_size],
         self.y:
         test_labels[self.index * batch_size:(self.index + 1) * batch_size]
     }
     self.test_model = theano.function([self.index],
                                       self.layers[-1].errors(self.y),
                                       givens=test_givens)
     params = self.layers[0].params + self.layers[1].params + self.layers[
         2].params + self.layers[3].params
     cost = self.layers[-1].negative_log_likelihood(self.y)
     grads = T.grad(cost, params)
     updates = [(param_i, param_i - learning_rate * grad_i)
                for param_i, grad_i in zip(params, grads)]
     train_givens = {
         self.x:
         data[self.index * batch_size:(self.index + 1) * batch_size],
         self.y:
         labels[self.index * batch_size:(self.index + 1) * batch_size]
     }
     self.train_model = theano.function([self.index],
                                        cost,
                                        updates=updates,
                                        givens=train_givens)
     patience, patience_increase = 10000, 2
     validation_frequency = min(n_train_batches, patience / 2)
     epoch, count = 0, 0
     start_time = time.time()
     n_iters = n_epochs * n_train_batches
     logger.info("Fitting Classifier")
     logger.debug("{0} epochs, {1} batches, {2} iterations".format(
         n_epochs, n_train_batches, n_iters))
     while epoch < n_epochs and patience > count:
         epoch += 1
         for minibatch_index in xrange(n_train_batches):
             count = (epoch - 1) * n_train_batches + minibatch_index
             if count % 50 == 0:
                 percentage = round(100.0 * count / n_iters, 2)
                 if percentage == 0:
                     time_stamp = "Null"
                 else:
                     time_stamp = datetime.utcfromtimestamp(
                         (time.time() - start_time) * (100.0 / percentage) +
                         start_time)
                 logger.info(
                     "training is {0}% complete (Completion at {1})".format(
                         round(percentage, 2), time_stamp))
             train_cost = self.train_model(minibatch_index)
             if (count + 1) % validation_frequency == 0:
                 testlosses = [
                     self.test_model(i) for i in xrange(n_test_batches)
                 ]
                 test_score = numpy.mean(testlosses)
                 logger.info(
                     'Test error of {0}% achieved on Epoch {1} Iteration {2}'
                     .format(test_score * 100.0, epoch, count + 1))
             logger.debug("Iteration number {0}".format(count))
     logger.debug('Optimization complete.')
     logger.debug('Conducting final model testing')
     testlosses = [self.test_model(i) for i in xrange(n_test_batches)]
     test_score = numpy.mean(testlosses)
     t_taken = int((time.time() - start_time) / 60.0)
     logger.info('Training Complete')
     logger.info('Test score of {0}%, training time {1}m'.format(
         test_score * 100.0, t_taken))
     if self.args_save:
         self.save()