Example #1
0
    def __init__(self,
                 learning_rate=0.1,
                 n_epochs=1,
                 dataset='mnist.pkl.gz',
                 nkerns=[20, 50],
                 batch_size=500,
                 testing=0):

        self.data = load_data(dataset)
def main():
    print 'Loading data...'
    X, y = load_data('ex2data2.txt')

    print 'First 10 examples from dataset:'
    print '\t\tX\t\t  y'
    for i in range(10):
        print '  {0}\t{1}'.format(X[i], y[i])

    print 'Plotting data...'
    plot_data(X, y)

    print 'Normalizing features...'
    X, mu, sigma = feature_normalize(X)

    print 'Adding polynomial features...'
    X = map_feature(X[:, 0], X[:, 1])

    initial_theta = np.zeros((X.shape[1], 1))

    lambda_ = 1

    print 'Computing cost...'
    cost, grad = cost_function_reg(initial_theta, X, y, lambda_)

    print 'Optimizing using gradient descent...'
    alpha = 0.05
    num_iters = 1000

    theta = np.zeros((X.shape[1], 1))
    theta, J_history = gradient_descent_multi_reg(
        X, y, theta, alpha, num_iters, lambda_)

    plt.plot(range(1, len(J_history) + 1), J_history, '-b')
    plt.xlabel('Number of iterations')
    plt.ylabel('Cost J')
    plt.show()

    print 'Theta computed from gradient descent:'
    print theta

    chip_data = np.array([[-0.5, 0.7]])
    chip_data = (chip_data - mu) / sigma
    chip_data = np.insert(chip_data, 0, 1, axis=1)
    chip_data = map_feature(chip_data[:, 0], chip_data[:, 1])
    prob = sigmoid(chip_data.dot(theta))
    print 'For a microchip with test of -0.5 and 0.7, we predict an acceptance of {0}%'.format(prob[0, 0] * 100)

    print 'Computing accuracy:\n'
    p = predict(theta, X)
    accuracy = np.mean(y == p)
    print 'Train Accuracy: {0}%'.format(accuracy * 100)
Example #3
0
def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
                    dataset='mnist.pkl.gz',
                    nkerns=[20, 50], batch_size=500):
    """ Demonstrates lenet on MNIST dataset

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
                          gradient)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: path to the dataset used for training /testing (MNIST here)

    :type nkerns: list of ints
    :param nkerns: number of kernels on each layer
    """

    rng = numpy.random.RandomState(23455)

    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
    n_test_batches = test_set_x.get_value(borrow=True).shape[0]
    n_train_batches /= batch_size
    n_valid_batches /= batch_size
    n_test_batches /= batch_size

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch

    # start-snippet-1
    x = T.matrix('x')   # the data is presented as rasterized images
    y = T.ivector('y')  # the labels are presented as 1D vector of
                        # [int] labels

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
    # to a 4D tensor, compatible with our LeNetConvPoolLayer
    # (28, 28) is the size of MNIST images.
    layer0_input = x.reshape((batch_size, 1, 28, 28))

    # Construct the first convolutional pooling layer:
    # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
    # maxpooling reduces this further to (24/2, 24/2) = (12, 12)
    # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
    layer0 = LeNetConvPoolLayer(
        rng,
        input=layer0_input,
        image_shape=(batch_size, 1, 28, 28),
        filter_shape=(nkerns[0], 1, 5, 5),
        poolsize=(2, 2)
    )

    # Construct the second convolutional pooling layer
    # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
    # maxpooling reduces this further to (8/2, 8/2) = (4, 4)
    # 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
    layer1 = LeNetConvPoolLayer(
        rng,
        input=layer0.output,
        image_shape=(batch_size, nkerns[0], 12, 12),
        filter_shape=(nkerns[1], nkerns[0], 5, 5),
        poolsize=(2, 2)
    )

    # the HiddenLayer being fully-connected, it operates on 2D matrices of
    # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
    # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
    # or (500, 50 * 4 * 4) = (500, 800) with the default values.
    layer2_input = layer1.output.flatten(2)

    # construct a fully-connected sigmoidal layer
    layer2 = HiddenLayer(
        rng,
        input=layer2_input,
        n_in=nkerns[1] * 4 * 4,
        n_out=500,
        activation=T.tanh
    )

    # classify the values of the fully-connected sigmoidal layer
    layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)

    # the cost we minimize during training is the NLL of the model
    cost = layer3.negative_log_likelihood(y)

    # create a function to compute the mistakes that are made by the model
    test_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size],
            y: test_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    validate_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: valid_set_x[index * batch_size: (index + 1) * batch_size],
            y: valid_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    # create a list of all model parameters to be fit by gradient descent
    params = layer3.params + layer2.params + layer1.params + layer0.params

    # create a list of gradients for all model parameters
    grads = T.grad(cost, params)

    # train_model is a function that updates the model parameters by
    # SGD Since this model has many parameters, it would be tedious to
    # manually create an update rule for each model parameter. We thus
    # create the updates list by automatically looping over all
    # (params[i], grads[i]) pairs.
    updates = [
        (param_i, param_i - learning_rate * grad_i)
        for param_i, grad_i in zip(params, grads)
    ]

    train_model = theano.function(
        [index],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size],
            y: train_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )
    # end-snippet-1

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'
    # early-stopping parameters
    patience = 10000  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
                           # found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
                                  # go through this many
                                  # minibatche before checking the network
                                  # on the validation set; in this case we
                                  # check every epoch

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = timeit.default_timer()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):

            iter = (epoch - 1) * n_train_batches + minibatch_index

            if iter % 100 == 0:
                print 'training @ iter = ', iter
            cost_ij = train_model(minibatch_index)

            if (iter + 1) % validation_frequency == 0:

                # compute zero-one loss on validation set
                validation_losses = [validate_model(i) for i
                                     in xrange(n_valid_batches)]
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:

                    #improve patience if loss improvement is good enough
                    if this_validation_loss < best_validation_loss *  \
                       improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    # save best validation score and iteration number
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = [
                        test_model(i)
                        for i in xrange(n_test_batches)
                    ]
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = timeit.default_timer()
    print('Optimization complete.')
    print('Best validation score of %f %% obtained at iteration %i, '
          'with test performance %f %%' %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
Example #4
0
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
             dataset='mnist.pkl.gz', batch_size=20, n_hidden=500, n_hidden_2=50):
    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]


    # number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    #import ipdb; ipdb.set_trace()
    
    # Build the model
    print '... building the model'

    # symbolic variables for the data
    index = T.lscalar()
    x = T.matrix('x')
    y = T.ivector('y')

    rng = np.random.RandomState(1234)

    # construct the MLP class
    classifier = MLP(
        rng=rng,
        input=x,
        n_in=28 * 28,
        n_hidden=n_hidden,
        n_hidden_2=n_hidden_2,
        n_out=10
    )

    # minimize negative log likelihood & regularization terms during training
    cost = (
        classifier.negative_log_likelihood(y) + 
        L1_reg * classifier.L1 +
        L2_reg * classifier.L2_sqr
    )
    print 'cost={}'.format(cost)

    test_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: test_set_x[index * batch_size:(index+1) * batch_size],
            y: test_set_y[index * batch_size:(index+1) * batch_size],
        }
    )

    validate_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index+1) * batch_size],
            y: valid_set_y[index * batch_size:(index+1) * batch_size],
        }
    )

    # Compute the gradient of cost wrt theata
    print classifier.params
    gparams = [T.grad(cost, param) for param in classifier.params]
    print gparams

    # Specify how to update the parameters of the model
    updates = [
        (param, param - learning_rate * gparam) for param, gparam in zip(classifier.params, gparams)
    ]

    # Compile training function
    train_model = theano.function(
        inputs=[index],
        outputs=cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size:(index+1) * batch_size],
            y: train_set_y[index * batch_size:(index+1) * batch_size],
        }
    )

    # Train the model
    print '... training'

    patience = 10000
    patience_increase = 2
    improvement_threshold = 0.995
    validation_frequency = min(n_train_batches, patience/2)

    best_validation_loss = np.inf
    best_iter = 0
    test_score = 0.
    start_time = timeit.default_timer()

    epoch = 0
    done_looping = False

    print 'Number of minibatches: {}'.format(n_train_batches)
    while (epoch < n_epochs) and (not done_looping):
        epoch += 1

        epoch_start_time = timeit.default_timer()
        for minibatch_index in xrange(n_train_batches):
            minibatch_avg_cost = train_model(minibatch_index)
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]
                this_validation_loss = np.mean(validation_losses)
                print 'epoch {}, minibatch {}/{}, validation error {} %'.format(
                    epoch,
                    minibatch_index + 1,
                    n_train_batches,
                    this_validation_loss * 100.
                )

                # If this is the best validation score up until now
                if this_validation_loss < best_validation_loss:
                    if this_validation_loss < best_validation_loss * improvement_threshold:
                        patience = max(patience, iter * patience_increase)
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # Test against tes set
                    test_losses = [test_model(i) for i in xrange(n_test_batches)]
                    test_score = np.mean(test_losses)

                    print '    epoch {}, minibatch {}/{}, test error of best model {} %'.format(
                        epoch,
                        minibatch_index + 1,
                        n_train_batches,
                        test_score * 100.
                    )
                    
            if patience <= iter:
                done_looping = True
                break
            
        epoch_end_time = timeit.default_timer()
        print '    epoch {}, ran for {}s'.format(
            epoch,
            (epoch_end_time - epoch_start_time)
        )

    end_time = timeit.default_timer()
    print 'Optimization complete.  Best validation score of {} %'.format(
        best_validation_loss * 100.
    )
    print 'obtained at iteration {}, with test performance {} %'.format(
        best_iter + 1,
        test_score * 100.
    )
Example #5
0
def evaluate_lenet5(learning_rate=0.1,
                    n_epochs=200,
                    dataset='mnist.pkl.gz',
                    nkerns=[16, 16, 16],
                    batch_size=500):

    rng = numpy.random.RandomState(32324)

    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size

    index = T.lscalar()  # index for each mini batch
    train_epoch = T.lscalar()

    x = T.matrix('x')
    y = T.ivector('y')

    # ------------------------------- Building Model ----------------------------------
    print "...Building the model"

    # output image size = (28-5+1+4)/2 = 14
    layer_0_input = x.reshape((batch_size, 1, 28, 28))
    layer_0 = LeNetConvPoolLayer(rng,
                                 input=layer_0_input,
                                 image_shape=(batch_size, 1, 28, 28),
                                 filter_shape=(nkerns[0], 1, 5, 5),
                                 poolsize=(2, 2),
                                 border_mode=2)

    #output image size = (14-3+1)/2 = 6
    layer_1 = LeNetConvPoolLayer(rng,
                                 input=layer_0.output,
                                 image_shape=(batch_size, nkerns[0], 14, 14),
                                 filter_shape=(nkerns[1], nkerns[0], 3, 3),
                                 poolsize=(2, 2))
    #output image size = (6-3+1)/2 = 2
    layer_2 = LeNetConvPoolLayer(rng,
                                 input=layer_1.output,
                                 image_shape=(batch_size, nkerns[1], 6, 6),
                                 filter_shape=(nkerns[2], nkerns[1], 3, 3),
                                 poolsize=(2, 2))

    # make the input to hidden layer 2 dimensional
    layer_3_input = layer_2.output.flatten(2)

    layer_3 = HiddenLayer(rng,
                          input=layer_3_input,
                          n_in=nkerns[2] * 2 * 2,
                          n_out=200,
                          activation=T.tanh)

    layer_4 = LogReg(input=layer_3.output, n_in=200, n_out=10)

    teacher_p_y_given_x = theano.shared(numpy.asarray(
        pickle.load(open('prob_best_model.pkl', 'rb')),
        dtype=theano.config.floatX),
                                        borrow=True)

    #cost = layer_4.neg_log_likelihood(y) + T.mean((teacher_W - layer_4.W)**2)/(2.*(1+epoch*2)) + T.mean((teacher_b-layer_4.b)**2)/(2.*(1+epoch*2))

    # import pdb
    # pdb.set_trace()

    p_y_given_x = T.matrix('p_y_given_x')

    e = theano.shared(value=0, name='e', borrow=True)

    #cost = layer_4.neg_log_likelihood(y)  + 1.0/(e)*T.mean((layer_4.p_y_given_x - p_y_given_x)**2)
    cost = layer_4.neg_log_likelihood(
        y) + 2.0 / (e) * T.mean(-T.log(layer_4.p_y_given_x) * p_y_given_x -
                                layer_4.p_y_given_x * T.log(p_y_given_x))

    test_model = theano.function(
        [index],
        layer_4.errors(y),
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size],
            y: test_set_y[index * batch_size:(index + 1) * batch_size]
        })

    validate_model = theano.function(
        [index],
        layer_4.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        })

    # list of parameters

    params = layer_4.params + layer_3.params + layer_2.params + layer_1.params + layer_0.params

    grads = T.grad(cost, params)

    updates = [(param_i, param_i - learning_rate * grad_i)
               for param_i, grad_i in zip(params, grads)]

    train_model = theano.function(
        [index, train_epoch],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size],
            p_y_given_x: teacher_p_y_given_x[index],
            e: train_epoch
        })

    # -----------------------------------------Starting Training ------------------------------

    print('..... Training ')

    # for early stopping
    patience = 10000
    patience_increase = 2

    improvement_threshold = 0.95

    validation_frequency = min(n_train_batches, patience // 2)

    best_validation_loss = numpy.inf  # initialising loss to be inifinite
    best_itr = 0
    test_score = 0

    start_time = timeit.default_timer()
    #epo = theano.shared('epo')
    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1

        for minibatch_index in range(n_train_batches):
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if iter % 100 == 0:
                print('training @ iter = ', iter)

            cost_ij = train_model(minibatch_index, epoch)

            if (iter + 1) % validation_frequency == 0:
                # compute loss on validation set
                validation_losses = [
                    validate_model(i) for i in range(n_valid_batches)
                ]
                this_validation_loss = numpy.mean(validation_losses)

                # import pdb
                # pdb.set_trace()
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # check with best validation score till now
                if this_validation_loss < best_validation_loss:

                    # improve
                    if this_validation_loss < best_validation_loss * improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_itr = iter

                    test_losses = [
                        test_model(i) for i in range(n_test_batches)
                    ]
                    test_score = numpy.mean(test_losses)

                    print('epoch %i, minibatch %i/%i, testing error %f %%' %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

                    with open('best_model_3layer.pkl', 'wb') as f:
                        pickle.dump(params, f)
                    with open('Results_student_3.txt', 'wb') as f2:
                        f2.write(str(test_score * 100) + '\n')

            #if patience <= iter:
            #	done_looping = True
            #	break

    end_time = timeit.default_timer()
    print('Optimization complete')
    print(
        'Best validation score of %f %% obtained at iteration %i,'
        'with test performance %f %%' %
        (best_validation_loss * 100., best_itr, test_score * 100))
    print('The code ran for %.2fm' % ((end_time - start_time) / 60.))
Example #6
0
def test_rbm(learning_rate=0.1, training_epochs=15,
             dataset='mnist.pkl.gz', batch_size=20,
             n_chains=20, n_samples=10, output_folder='rbm_plots',
             n_hidden=500):
    """
    Demonstrate how to train and afterwards sample from it using Theano.

    This is demonstrated on MNIST.

    :param learning_rate: learning rate used for training the RBM

    :param training_epochs: number of epochs used for training

    :param dataset: path the the pickled dataset

    :param batch_size: size of a batch used to train the RBM

    :param n_chains: number of parallel Gibbs chains to be used for sampling

    :param n_samples: number of samples to plot for each chain

    """
    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size

    # allocate symbolic variables for the data
    index = T.lscalar()    # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images

    rng = numpy.random.RandomState(123)
    theano_rng = RandomStreams(rng.randint(2 ** 30))

    # initialize storage for the persistent chain (state = hidden
    # layer of chain)
    persistent_chain = theano.shared(numpy.zeros((batch_size, n_hidden),
                                                 dtype=theano.config.floatX),
                                     borrow=True)

    # construct the RBM class
    rbm = RBM(input=x, n_visible=28 * 28,
              n_hidden=n_hidden, numpy_rng=rng, theano_rng=theano_rng)

    # get the cost and the gradient corresponding to one step of CD-15
    cost, updates = rbm.get_cost_updates(lr=learning_rate,
                                         persistent=persistent_chain, k=15)

    #################################
    #     Training the RBM          #
    #################################
    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)
    os.chdir(output_folder)

    # it is ok for a theano function to have no output
    # the purpose of train_rbm is solely to update the RBM parameters
    train_rbm = theano.function([index], cost,
           updates=updates,
           givens={x: train_set_x[index * batch_size:
                                  (index + 1) * batch_size]},
           name='train_rbm')

    plotting_time = 0.
    start_time = time.clock()

    # go through training epochs
    for epoch in xrange(training_epochs):

        # go through the training set
        mean_cost = []
        for batch_index in xrange(n_train_batches):
            mean_cost += [train_rbm(batch_index)]

        print 'Training epoch %d, cost is ' % epoch, numpy.mean(mean_cost)

        # Plot filters after each training epoch
        plotting_start = time.clock()
        # Construct image from the weight matrix
        image = PIL.Image.fromarray(tile_raster_images(
                 X=rbm.W.get_value(borrow=True).T,
                 img_shape=(28, 28), tile_shape=(10, 10),
                 tile_spacing=(1, 1)))
        image.save('filters_at_epoch_%i.png' % epoch)
        plotting_stop = time.clock()
        plotting_time += (plotting_stop - plotting_start)

    end_time = time.clock()

    pretraining_time = (end_time - start_time) - plotting_time

    print ('Training took %f minutes' % (pretraining_time / 60.))

    #################################
    #     Sampling from the RBM     #
    #################################
    # find out the number of test samples
    number_of_test_samples = test_set_x.get_value(borrow=True).shape[0]

    # pick random test examples, with which to initialize the persistent chain
    test_idx = rng.randint(number_of_test_samples - n_chains)
    persistent_vis_chain = theano.shared(numpy.asarray(
            test_set_x.get_value(borrow=True)[test_idx:test_idx + n_chains],
            dtype=theano.config.floatX))

    plot_every = 1000
    # define one step of Gibbs sampling (mf = mean-field) define a
    # function that does `plot_every` steps before returning the
    # sample for plotting
    [presig_hids, hid_mfs, hid_samples, presig_vis,
     vis_mfs, vis_samples], updates =  \
                        theano.scan(rbm.gibbs_vhv,
                                outputs_info=[None,  None, None, None,
                                              None, persistent_vis_chain],
                                n_steps=plot_every)

    # add to updates the shared variable that takes care of our persistent
    # chain :.
    updates.update({persistent_vis_chain: vis_samples[-1]})
    # construct the function that implements our persistent chain.
    # we generate the "mean field" activations for plotting and the actual
    # samples for reinitializing the state of our persistent chain
    sample_fn = theano.function([], [vis_mfs[-1], vis_samples[-1]],
                                updates=updates,
                                name='sample_fn')

    # create a space to store the image for plotting ( we need to leave
    # room for the tile_spacing as well)
    image_data = numpy.zeros((29 * n_samples + 1, 29 * n_chains - 1),
                             dtype='uint8')
    for idx in xrange(n_samples):
        # generate `plot_every` intermediate samples that we discard,
        # because successive samples in the chain are too correlated
        vis_mf, vis_sample = sample_fn()
        print ' ... plotting sample ', idx
        image_data[29 * idx:29 * idx + 28, :] = tile_raster_images(
                X=vis_mf,
                img_shape=(28, 28),
                tile_shape=(1, n_chains),
                tile_spacing=(1, 1))
        # construct image
    print numpy.shape(image_data)
    image = PIL.Image.fromarray(image_data)
    image.save('samples.png')
    os.chdir('../')
Example #7
0
def evaluate_lenet5(learning_rate = 0.10, n_epochs=200, dataset='mnist.pkl.gz',nkerns = [16,16,16,12,12,12], batch_size = 500):
	

	rng = numpy.random.RandomState(32324)

	datasets = load_data(dataset)
	
	train_set_x,train_set_y = datasets[0]
	valid_set_x,valid_set_y = datasets[1]
	test_set_x,test_set_y = datasets[2]

	n_train_batches = train_set_x.get_value(borrow=True).shape[0]//batch_size
	n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]//batch_size
	n_test_batches = test_set_x.get_value(borrow=True).shape[0]//batch_size

  	index = T.lscalar() # index for each mini batch
  	train_epoch = T.lscalar('train_epoch')

  	x = T.matrix('x')
  	y = T.ivector('y')

  	# ------------------------------- Building Model ----------------------------------
  	print "...Building the model"

  	
  	layer_0_input = x.reshape((batch_size,1,28,28))

  	# output image size = (28-5+1+)/1 = 24
  	layer_0 = LeNetConvPoolLayer(rng,input = layer_0_input, image_shape=(batch_size,1,28,28),
  		filter_shape=(nkerns[0],1,5,5),poolsize=(1,1))

  	#output image size = (24-3+1) = 22
  	layer_1 = LeNetConvPoolLayer(rng, input = layer_0.output, image_shape = (batch_size, nkerns[0],24,24), 
  								filter_shape = (nkerns[1],nkerns[0],3,3), poolsize=(1,1) )

  	#output image size = (22-3+1)/2 = 10
  	layer_2 = LeNetConvPoolLayer(rng, input = layer_1.output, image_shape = (batch_size, nkerns[1],22,22), 
  								filter_shape = (nkerns[2],nkerns[1],3,3), poolsize=(2,2) )

  	#output image size = (10-3+1)/2 = 4
  	layer_3 = LeNetConvPoolLayer(rng, input = layer_2.output, image_shape = (batch_size, nkerns[2],10,10),
  								filter_shape = (nkerns[3], nkerns[2],3,3), poolsize=(2,2) )

  	#output image size = (4-3+2+1) = 4
  	layer_4 = LeNetConvPoolLayer(rng, input = layer_3.output, image_shape = (batch_size, nkerns[3],4,4),
  								filter_shape = (nkerns[4], nkerns[3],3,3), poolsize=(1,1), border_mode = 1 )

  	#output image size = (4-3+1)/2 = 2
  	layer_5 = LeNetConvPoolLayer(rng, input = layer_4.output, image_shape = (batch_size, nkerns[4],4,4),
  								filter_shape = (nkerns[5], nkerns[4],3,3), poolsize=(2,2), border_mode = 1 )

  	# make the input to hidden layer 2 dimensional
  	layer_6_input = layer_5.output.flatten(2)

  	layer_6 = HiddenLayer(rng,input = layer_6_input, n_in = nkerns[5]*2*2, n_out = 200, activation = T.tanh)

  	layer_7 = LogReg(input = layer_6.output, n_in=200, n_out = 10)

  	teacher_p_y_given_x = theano.shared(numpy.asarray(pickle.load(open('prob_best_model.pkl','rb')),dtype =theano.config.floatX), borrow=True)
  	p_y_given_x = T.matrix('p_y_given_x')
  	e = theano.shared(value = 0, name = 'e', borrow = True)

  	cost = layer_7.neg_log_likelihood(y)  + 2.0/(e)*T.mean(-T.log(layer_7.p_y_given_x)*p_y_given_x - layer_7.p_y_given_x*T.log(p_y_given_x))
  	
	tg = theano.shared(numpy.asarray(pickle.load(open('modified_guided_data.pkl','rb')),dtype =theano.config.floatX), borrow=True)
  	guiding_weights = T.tensor4('guiding_weights')
        #guide_cost = T.mean(-T.log(layer_3.output)*guiding_weights - layer_3.output*T.log(guiding_weights))  
	guide_cost = T.mean((layer_3.output-guiding_weights)**2)
  	test_model = theano.function([index],layer_7.errors(y),
  				givens={
  						x: test_set_x[index*batch_size:(index+1)*batch_size],
  						y: test_set_y[index*batch_size:(index+1)*batch_size]
  						})

  	validate_model = theano.function([index],layer_7.errors(y),
			givens={
					x: valid_set_x[index*batch_size:(index+1)*batch_size],
					y: valid_set_y[index*batch_size:(index+1)*batch_size]
					})

  	# list of parameters

  	params = layer_7.params + layer_6.params + layer_5.params + layer_4.params + layer_3.params + layer_2.params + layer_1.params + layer_0.params
        params_gl = layer_3.params + layer_2.params + layer_1.params + layer_0.params
  	# import pdb
  	# pdb.set_trace()
        grads_gl = T.grad(guide_cost,params_gl)
        updates_gl = [ (param_i,param_i-learning_rate/10*grad_i) for param_i,grad_i in  zip(params_gl,grads_gl) ]
  	
  	grads = T.grad(cost,params)
        updates = [ (param_i, param_i-learning_rate*grad_i) for param_i, grad_i in zip(params,grads) ]

  	train_model = theano.function([index,train_epoch],cost, updates=updates,
			givens={
					x: train_set_x[index*batch_size:(index+1)*batch_size],
					y: train_set_y[index*batch_size:(index+1)*batch_size],
          			p_y_given_x: teacher_p_y_given_x[index],
          			e: train_epoch
					})
        train_till_guided_layer = theano.function([index],guide_cost,updates = updates_gl,
                        givens={
                                        x:  train_set_x[index*batch_size:(index+1)*batch_size],
                                        y:  train_set_y[index*batch_size:(index+1)*batch_size],
                                		guiding_weights : tg[index]
                                },on_unused_input='ignore')


  	# -----------------------------------------Starting Training ------------------------------

  	print ('..... Training ' )

  	# for early stopping
  	patience = 10000
  	patience_increase = 2

  	improvement_threshold = 0.95

  	validation_frequency = min(n_train_batches, patience//2)

  	best_validation_loss = numpy.inf  # initialising loss to be inifinite
  	best_itr = 0
  	test_score = 0

  	start_time = timeit.default_timer()

  	epoch = 0
  	done_looping = False
	while (epoch < n_epochs) and (not done_looping) :
  		epoch = epoch+1
  		for minibatch_index in range(n_train_batches):
  			iter = (epoch - 1)*n_train_batches + minibatch_index

  			if iter%100==0:
  				print ('training @ iter = ', iter)
			if epoch < n_epochs/5:
				cost_ij_guided = train_till_guided_layer(minibatch_index)
  			cost_ij = train_model(minibatch_index,epoch)
  			
			if(iter +1)%validation_frequency ==0:
  				# compute loss on validation set
  				validation_losses = [validate_model(i) for i in range(n_valid_batches)]
  				this_validation_loss = numpy.mean(validation_losses)

  				# import pdb
  				# pdb.set_trace()

            			with open('Student_6_terminal_out','a+') as f_:
  					f_.write('epoch %i, minibatch %i/%i, validation error %f %% \n' %(epoch,minibatch_index+1,n_train_batches,this_validation_loss*100. ))

  				# check with best validation score till now
  				if this_validation_loss<best_validation_loss:

  					# improve 
  					if this_validation_loss < best_validation_loss * improvement_threshold:
  						patience = max(patience, iter*patience_increase)

  					best_validation_loss = this_validation_loss
  					best_itr = iter

  					test_losses = [test_model(i) for i in range(n_test_batches)]
  					test_score = numpy.mean(test_losses)

            				with open('Student_6_terminal_out','a+') as f_:
  						f_.write('epoch %i, minibatch %i/%i, testing error %f %%\n' %(epoch, minibatch_index+1,n_train_batches,test_score*100.))
  					with open('best_model_7layer.pkl', 'wb') as f:
  						pickle.dump(params, f)
  					with open('Results_student_6.txt', 'wb') as f1:
  						f1.write(str(test_score*100)+'\n')
  			#if patience <= iter:
  			#	done_looping = True
  			#	break

  	end_time = timeit.default_timer()
	with open('Student_6_terminal_out','a+') as f_:
		f_.write('Optimization complete\n')
		f_.write('Best validation score of %f %% obtained at iteration %i, with test performance %f %% \n' % (best_validation_loss*100., best_itr, test_score*100 ))
		f_.write('The code ran for %.2fm \n' %((end_time - start_time)/60.))
Example #8
0
def test_mlp(learning_rate=0.01,
             L1_reg=0.00,
             L2_reg=0.0001,
             n_epochs=1000,
             dataset='mnist.pkl.gz',
             batch_size=20,
             n_hidden=500):
    """
    Demonstrate stochastic gradient descent optimization for a multilayer
    perceptron

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
    gradient

    :type L1_reg: float
    :param L1_reg: L1-norm's weight when added to the cost (see
    regularization)

    :type L2_reg: float
    :param L2_reg: L2-norm's weight when added to the cost (see
    regularization)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: the path of the MNIST dataset file from
                 http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz


   """
    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    y = T.ivector('y')  # the labels are presented as 1D vector of
    # [int] labels

    rng = numpy.random.RandomState(1234)

    # construct the MLP class
    classifier = MLP(rng=rng,
                     input=x,
                     n_in=28 * 28,
                     n_hidden=n_hidden,
                     n_out=10)

    # start-snippet-4
    # the cost we minimize during training is the negative log likelihood of
    # the model plus the regularization terms (L1 and L2); cost is expressed
    # here symbolically
    cost = (classifier.negative_log_likelihood(y) + L1_reg * classifier.L1 +
            L2_reg * classifier.L2_sqr)
    # end-snippet-4

    # compiling a Theano function that computes the mistakes that are made
    # by the model on a minibatch
    test_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size],
            y: test_set_y[index * batch_size:(index + 1) * batch_size]
        })

    validate_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        })

    # start-snippet-5
    # compute the gradient of cost with respect to theta (sotred in params)
    # the resulting gradients will be stored in a list gparams
    gparams = [T.grad(cost, param) for param in classifier.params]

    # specify how to update the parameters of the model as a list of
    # (variable, update expression) pairs

    # given two lists of the same length, A = [a1, a2, a3, a4] and
    # B = [b1, b2, b3, b4], zip generates a list C of same size, where each
    # element is a pair formed from the two lists :
    #    C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
    updates = [(param, param - learning_rate * gparam)
               for param, gparam in zip(classifier.params, gparams)]

    # compiling a Theano function `train_model` that returns the cost, but
    # in the same time updates the parameter of the model based on the rules
    # defined in `updates`
    train_model = theano.function(
        inputs=[index],
        outputs=cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size]
        })
    # end-snippet-5

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'

    # early-stopping parameters
    patience = 10000  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
    # found
    improvement_threshold = 0.995  # a relative improvement of this much is
    # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
    # go through this many
    # minibatche before checking the network
    # on the validation set; in this case we
    # check every epoch

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = timeit.default_timer()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):

            minibatch_avg_cost = train_model(minibatch_index)
            # iteration number
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                # compute zero-one loss on validation set
                validation_losses = [
                    validate_model(i) for i in xrange(n_valid_batches)
                ]
                this_validation_loss = numpy.mean(validation_losses)

                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:
                    #improve patience if loss improvement is good enough
                    if (this_validation_loss <
                            best_validation_loss * improvement_threshold):
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = [
                        test_model(i) for i in xrange(n_test_batches)
                    ]
                    test_score = numpy.mean(test_losses)

                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = timeit.default_timer()
    print(('Optimization complete. Best validation score of %f %% '
           'obtained at iteration %i, with test performance %f %%') %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
Example #9
0
def evaluate_lenet5(learning_rate = 0.1, n_epochs=200, dataset='mnist.pkl.gz',nkerns = [20,50], batch_size = 500 , testing =0):
	

	rng = numpy.random.RandomState(32324)

	datasets = load_data(dataset)
	
	train_set_x,train_set_y = datasets[0]
	valid_set_x,valid_set_y = datasets[1]
	test_set_x,test_set_y = datasets[2]

	n_train_batches = train_set_x.get_value(borrow=True).shape[0]//batch_size
	n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]//batch_size
	n_test_batches = test_set_x.get_value(borrow=True).shape[0]//batch_size

  	index = T.lscalar() # index for each mini batch

  	x = T.matrix('x')
  	y = T.ivector('y')

  	# ------------------------------- Building Model ----------------------------------
  	if testing ==0:
  		print "...Building the model"

  	# output image size = (28-5+1)/2 = 12
  	layer_0_input = x.reshape((batch_size,1,28,28))
  	layer_0 = LeNetConvPoolLayer(rng,input = layer_0_input, image_shape=(batch_size,1,28,28),filter_shape=(nkerns[0],1,5,5),poolsize=(2,2))

  	#output image size = (12-5+1)/2 = 4
  	layer_1 = LeNetConvPoolLayer(rng, input = layer_0.output, image_shape = (batch_size, nkerns[0],12,12), 
  								filter_shape = (nkerns[1],nkerns[0],5,5), poolsize=(2,2) )

  	# make the input to hidden layer 2 dimensional
  	layer_2_input = layer_1.output.flatten(2)

  	layer_2 = HiddenLayer(rng,input = layer_2_input, n_in = nkerns[1]*4*4, n_out = 500, activation = T.tanh)

  	layer_3 = LogReg(input = layer_2.output, n_in=500, n_out = 10)

  	cost = layer_3.neg_log_likelihood(y)

   	test_model = theano.function([index],layer_3.errors(y),
  				givens={
  						x: test_set_x[index*batch_size:(index+1)*batch_size],
  						y: test_set_y[index*batch_size:(index+1)*batch_size]
  						})

  	validate_model = theano.function([index],layer_3.errors(y),
			givens={
					x: valid_set_x[index*batch_size:(index+1)*batch_size],
					y: valid_set_y[index*batch_size:(index+1)*batch_size]
					})

  	train_predic = theano.function([index], layer_3.prob_y_given_x(),
  			givens={
  				x: train_set_x[index*batch_size:(index+1)*batch_size]
  			})

  	# list of parameters
	layer_guided = theano.function([index], layer_1.output,
  			givens={
  				x: train_set_x[index*batch_size:(index+1)*batch_size]
  			})

  	params = layer_3.params + layer_2.params + layer_1.params + layer_0.params

  	grads = T.grad(cost,params)

  	updates = [ (param_i, param_i-learning_rate*grad_i) for param_i, grad_i in zip(params,grads) ]

  	train_model = theano.function([index],cost, updates=updates,
			givens={
					x: train_set_x[index*batch_size:(index+1)*batch_size],
					y: train_set_y[index*batch_size:(index+1)*batch_size]
					})

  	# -----------------------------------------Starting Training ------------------------------
  	if testing ==0:
  		print ('..... Training ' )

  	# for early stopping
  	patience = 10000
  	patience_increase = 2

  	improvement_threshold = 0.95

  	validation_frequency = min(n_train_batches, patience//2)

  	best_validation_loss = numpy.inf  # initialising loss to be inifinite
  	best_itr = 0
  	test_score = 0

  	start_time = timeit.default_timer()

  	epoch = 0
  	done_looping = False

  	while (epoch < n_epochs) and (not done_looping) and testing ==0:
  		epoch = epoch+1
  		for minibatch_index in range(n_train_batches):
  			iter = (epoch - 1)*n_train_batches + minibatch_index

  			if iter%100 ==0:
  				print ('training @ iter = ', iter)

  			cost_ij = train_model(minibatch_index)

  			if(iter +1)%validation_frequency ==0:
  				# compute loss on validation set
  				validation_losses = [validate_model(i) for i in range(n_valid_batches)]
  				this_validation_loss = numpy.mean(validation_losses)

  				# import pdb
  				# pdb.set_trace()
  				print ('epoch %i, minibatch %i/%i, validation error %f %%' %(epoch,minibatch_index+1,n_train_batches,this_validation_loss*100. ))

  				# check with best validation score till now
  				if this_validation_loss<best_validation_loss:

  					# improve 
  					# if this_validation_loss < best_validation_loss * improvement_threshold:
  					# 	patience = max(patience, iter*patience_increase)

  					best_validation_loss = this_validation_loss
  					best_itr = iter

  					test_losses = [test_model(i) for i in range(n_test_batches)]
  					test_score = numpy.mean(test_losses)

  					print ('epoch %i, minibatch %i/%i, testing error %f %%' %(epoch, minibatch_index+1,n_train_batches,test_score*100.))

  					with open('best_model.pkl', 'wb') as f:
  						pickle.dump(params, f)

  					with open('Results_teacher.txt','wb') as f2:
  						f2.write(str(test_score*100) + '\n')

  					p_y_given_x =  [train_predic(i) for i in range(n_train_batches)]
  					with open ('prob_best_model.pkl','wb') as f1:
  						pickle.dump(p_y_given_x,f1)

  			# if patience <= iter:
  			# 	done_looping = True
  			# 	break

	layer_2_op_dump = [layer_guided(i) for i in range(n_train_batches)]
	with open ('layer_guided.pkl','wb') as lg:
  		pickle.dump(layer_2_op_dump,lg)




  	end_time = timeit.default_timer()
  	# p_y_given_x =  [train_model(i) for i in range(n_train_batches)]
  	# with open ('prob_best_model.pkl') as f:
  	# 	pickle.dump(p_y_given_x)
  	
  	if testing ==0 :
  		print ('Optimization complete')
  		print ('Best validation score of %f %% obtained at iteration %i,' 
    			'with test performance %f %%' % (best_validation_loss*100., best_itr, test_score*100 ))
  		print('The code ran for %.2fm' %((end_time - start_time)/60.))
Example #10
0
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
             dataset='mnist.pkl.gz', batch_size=20, n_hidden=500):
    """
    Demonstrate stochastic gradient descent optimization for a multilayer
    perceptron

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
    gradient

    :type L1_reg: float
    :param L1_reg: L1-norm's weight when added to the cost (see
    regularization)

    :type L2_reg: float
    :param L2_reg: L2-norm's weight when added to the cost (see
    regularization)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: the path of the MNIST dataset file from
                 http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz


   """
    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    y = T.ivector('y')  # the labels are presented as 1D vector of
                        # [int] labels

    rng = numpy.random.RandomState(1234)

    # construct the MLP class
    classifier = MLP(rng=rng, input=x, n_in=28 * 28,
                     n_hidden=n_hidden, n_out=10)

    # the cost we minimize during training is the negative log likelihood of
    # the model plus the regularization terms (L1 and L2); cost is expressed
    # here symbolically
    cost = classifier.negative_log_likelihood(y) \
         + L1_reg * classifier.L1 \
         + L2_reg * classifier.L2_sqr

    # compiling a Theano function that computes the mistakes that are made
    # by the model on a minibatch
    test_model = theano.function(inputs=[index],
            outputs=classifier.errors(y),
            givens={
                x: test_set_x[index * batch_size:(index + 1) * batch_size],
                y: test_set_y[index * batch_size:(index + 1) * batch_size]})

    validate_model = theano.function(inputs=[index],
            outputs=classifier.errors(y),
            givens={
                x: valid_set_x[index * batch_size:(index + 1) * batch_size],
                y: valid_set_y[index * batch_size:(index + 1) * batch_size]})

    # compute the gradient of cost with respect to theta (sotred in params)
    # the resulting gradients will be stored in a list gparams
    gparams = []
    for param in classifier.params:
        gparam = T.grad(cost, param)
        gparams.append(gparam)

    # specify how to update the parameters of the model as a list of
    # (variable, update expression) pairs
    updates = []
    # given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of
    # same length, zip generates a list C of same size, where each element
    # is a pair formed from the two lists :
    #    C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
    for param, gparam in zip(classifier.params, gparams):
        updates.append((param, param - learning_rate * gparam))

    # compiling a Theano function `train_model` that returns the cost, but
    # in the same time updates the parameter of the model based on the rules
    # defined in `updates`
    train_model = theano.function(inputs=[index], outputs=cost,
            updates=updates,
            givens={
                x: train_set_x[index * batch_size:(index + 1) * batch_size],
                y: train_set_y[index * batch_size:(index + 1) * batch_size]})

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'

    # early-stopping parameters
    patience = 10000  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
                           # found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
                                  # go through this many
                                  # minibatche before checking the network
                                  # on the validation set; in this case we
                                  # check every epoch

    best_params = None
    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = time.clock()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):

            minibatch_avg_cost = train_model(minibatch_index)
            # iteration number
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                # compute zero-one loss on validation set
                validation_losses = [validate_model(i) for i
                                     in xrange(n_valid_batches)]
                this_validation_loss = numpy.mean(validation_losses)

                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                     (epoch, minibatch_index + 1, n_train_batches,
                      this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:
                    #improve patience if loss improvement is good enough
                    if this_validation_loss < best_validation_loss *  \
                           improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = [test_model(i) for i
                                   in xrange(n_test_batches)]
                    test_score = numpy.mean(test_losses)

                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                    done_looping = True
                    break

    end_time = time.clock()
    print(('Optimization complete. Best validation score of %f %% '
           'obtained at iteration %i, with test performance %f %%') %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))