示例#1
0
def evaluate_lenet5(learning_rate=0.1,
                    n_epochs=200,
                    dataset='mnist.pkl.gz',
                    nkerns=[16, 16, 16],
                    batch_size=500):

    rng = numpy.random.RandomState(32324)

    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size

    index = T.lscalar()  # index for each mini batch
    train_epoch = T.lscalar()

    x = T.matrix('x')
    y = T.ivector('y')

    # ------------------------------- Building Model ----------------------------------
    print "...Building the model"

    # output image size = (28-5+1+4)/2 = 14
    layer_0_input = x.reshape((batch_size, 1, 28, 28))
    layer_0 = LeNetConvPoolLayer(rng,
                                 input=layer_0_input,
                                 image_shape=(batch_size, 1, 28, 28),
                                 filter_shape=(nkerns[0], 1, 5, 5),
                                 poolsize=(2, 2),
                                 border_mode=2)

    #output image size = (14-3+1)/2 = 6
    layer_1 = LeNetConvPoolLayer(rng,
                                 input=layer_0.output,
                                 image_shape=(batch_size, nkerns[0], 14, 14),
                                 filter_shape=(nkerns[1], nkerns[0], 3, 3),
                                 poolsize=(2, 2))
    #output image size = (6-3+1)/2 = 2
    layer_2 = LeNetConvPoolLayer(rng,
                                 input=layer_1.output,
                                 image_shape=(batch_size, nkerns[1], 6, 6),
                                 filter_shape=(nkerns[2], nkerns[1], 3, 3),
                                 poolsize=(2, 2))

    # make the input to hidden layer 2 dimensional
    layer_3_input = layer_2.output.flatten(2)

    layer_3 = HiddenLayer(rng,
                          input=layer_3_input,
                          n_in=nkerns[2] * 2 * 2,
                          n_out=200,
                          activation=T.tanh)

    layer_4 = LogReg(input=layer_3.output, n_in=200, n_out=10)

    teacher_p_y_given_x = theano.shared(numpy.asarray(
        pickle.load(open('prob_best_model.pkl', 'rb')),
        dtype=theano.config.floatX),
                                        borrow=True)

    #cost = layer_4.neg_log_likelihood(y) + T.mean((teacher_W - layer_4.W)**2)/(2.*(1+epoch*2)) + T.mean((teacher_b-layer_4.b)**2)/(2.*(1+epoch*2))

    # import pdb
    # pdb.set_trace()

    p_y_given_x = T.matrix('p_y_given_x')

    e = theano.shared(value=0, name='e', borrow=True)

    #cost = layer_4.neg_log_likelihood(y)  + 1.0/(e)*T.mean((layer_4.p_y_given_x - p_y_given_x)**2)
    cost = layer_4.neg_log_likelihood(
        y) + 2.0 / (e) * T.mean(-T.log(layer_4.p_y_given_x) * p_y_given_x -
                                layer_4.p_y_given_x * T.log(p_y_given_x))

    test_model = theano.function(
        [index],
        layer_4.errors(y),
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size],
            y: test_set_y[index * batch_size:(index + 1) * batch_size]
        })

    validate_model = theano.function(
        [index],
        layer_4.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        })

    # list of parameters

    params = layer_4.params + layer_3.params + layer_2.params + layer_1.params + layer_0.params

    grads = T.grad(cost, params)

    updates = [(param_i, param_i - learning_rate * grad_i)
               for param_i, grad_i in zip(params, grads)]

    train_model = theano.function(
        [index, train_epoch],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size],
            p_y_given_x: teacher_p_y_given_x[index],
            e: train_epoch
        })

    # -----------------------------------------Starting Training ------------------------------

    print('..... Training ')

    # for early stopping
    patience = 10000
    patience_increase = 2

    improvement_threshold = 0.95

    validation_frequency = min(n_train_batches, patience // 2)

    best_validation_loss = numpy.inf  # initialising loss to be inifinite
    best_itr = 0
    test_score = 0

    start_time = timeit.default_timer()
    #epo = theano.shared('epo')
    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1

        for minibatch_index in range(n_train_batches):
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if iter % 100 == 0:
                print('training @ iter = ', iter)

            cost_ij = train_model(minibatch_index, epoch)

            if (iter + 1) % validation_frequency == 0:
                # compute loss on validation set
                validation_losses = [
                    validate_model(i) for i in range(n_valid_batches)
                ]
                this_validation_loss = numpy.mean(validation_losses)

                # import pdb
                # pdb.set_trace()
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # check with best validation score till now
                if this_validation_loss < best_validation_loss:

                    # improve
                    if this_validation_loss < best_validation_loss * improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_itr = iter

                    test_losses = [
                        test_model(i) for i in range(n_test_batches)
                    ]
                    test_score = numpy.mean(test_losses)

                    print('epoch %i, minibatch %i/%i, testing error %f %%' %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

                    with open('best_model_3layer.pkl', 'wb') as f:
                        pickle.dump(params, f)
                    with open('Results_student_3.txt', 'wb') as f2:
                        f2.write(str(test_score * 100) + '\n')

            #if patience <= iter:
            #	done_looping = True
            #	break

    end_time = timeit.default_timer()
    print('Optimization complete')
    print(
        'Best validation score of %f %% obtained at iteration %i,'
        'with test performance %f %%' %
        (best_validation_loss * 100., best_itr, test_score * 100))
    print('The code ran for %.2fm' % ((end_time - start_time) / 60.))
示例#2
0
    def evaluate_lenet5(self,
                        learning_rate=0.1,
                        n_epochs=1,
                        dataset='mnist.pkl.gz',
                        nkerns=[20, 50],
                        batch_size=500,
                        testing=0):

        rng = numpy.random.RandomState(32324)

        datasets = self.data

        train_set_x, train_set_y = datasets[0]
        valid_set_x, valid_set_y = datasets[1]
        test_set_x, test_set_y = datasets[2]

        n_train_batches = train_set_x.get_value(
            borrow=True).shape[0] // batch_size
        n_valid_batches = valid_set_x.get_value(
            borrow=True).shape[0] // batch_size
        n_test_batches = test_set_x.get_value(
            borrow=True).shape[0] // batch_size

        index = T.lscalar()  # index for each mini batch

        x = T.matrix('x')
        y = T.ivector('y')

        # ------------------------------- Building Model ----------------------------------
        if testing == 0:
            print "...Building the model"

        # output image size = (28-5+1)/2 = 12
        layer_0_input = x.reshape((batch_size, 1, 28, 28))
        layer_0 = LeNetConvPoolLayer(rng,
                                     input=layer_0_input,
                                     image_shape=(batch_size, 1, 28, 28),
                                     filter_shape=(nkerns[0], 1, 5, 5),
                                     poolsize=(2, 2))

        #output image size = (12-5+1)/2 = 4
        layer_1 = LeNetConvPoolLayer(rng,
                                     input=layer_0.output,
                                     image_shape=(batch_size, nkerns[0], 12,
                                                  12),
                                     filter_shape=(nkerns[1], nkerns[0], 5, 5),
                                     poolsize=(2, 2))

        # make the input to hidden layer 2 dimensional
        layer_2_input = layer_1.output.flatten(2)

        layer_2 = HiddenLayer(rng,
                              input=layer_2_input,
                              n_in=nkerns[1] * 4 * 4,
                              n_out=500,
                              activation=T.tanh)

        layer_3 = LogReg(input=layer_2.output, n_in=500, n_out=10)

        self.cost = layer_3.neg_log_likelihood(y)
        self.s = layer_3.s

        self.test_model = theano.function(
            [index],
            layer_3.errors(y),
            givens={
                x: test_set_x[index * batch_size:(index + 1) * batch_size],
                y: test_set_y[index * batch_size:(index + 1) * batch_size]
            })

        self.validate_model = theano.function(
            [index],
            layer_3.errors(y),
            givens={
                x: valid_set_x[index * batch_size:(index + 1) * batch_size],
                y: valid_set_y[index * batch_size:(index + 1) * batch_size]
            })

        self.train_predic = theano.function(
            [index],
            layer_3.prob_y_given_x(),
            givens={
                x: train_set_x[index * batch_size:(index + 1) * batch_size]
            })
        # list of parameters

        self.params = layer_3.params + layer_2.params + layer_1.params + layer_0.params
        grads = T.grad(self.cost, self.params)

        self.coefficient = 1

        self.shapes = [i.get_value().shape for i in self.params]
        symbolic_types = T.scalar, T.vector, T.matrix, T.tensor3, T.tensor4
        v = [symbolic_types[len(i)]() for i in self.shapes]

        #import pdb
        #pdb.set_trace()
        gauss_vector = Gv(self.cost, self.s, self.params, v, self.coefficient)
        self.get_cost = theano.function(
            [
                index,
            ],
            self.cost,
            givens={
                x: train_set_x[index * batch_size:(index + 1) * batch_size],
                y: train_set_y[index * batch_size:(index + 1) * batch_size]
            },
            on_unused_input='ignore')

        self.get_grad = theano.function(
            [
                index,
            ],
            grads,
            givens={
                x: train_set_x[index * batch_size:(index + 1) * batch_size],
                y: train_set_y[index * batch_size:(index + 1) * batch_size]
            },
            on_unused_input='ignore')

        self.get_s = theano.function(
            [
                index,
            ],
            self.s,
            givens={
                x: train_set_x[index * batch_size:(index + 1) * batch_size],
            },
            on_unused_input='ignore')
        self.function_Gv = theano.function(
            [index],
            gauss_vector,
            givens={
                x: train_set_x[index * batch_size:(index + 1) * batch_size],
                y: valid_set_y[index * batch_size:(index + 1) * batch_size]
            },
            on_unused_input='ignore')
        # # Using stochastic gradient updates
        # updates = [ (param_i, param_i-learning_rate*grad_i) for param_i, grad_i in zip(params,grads) ]
        # train_model = theano.function([index],cost, updates=updates,
        # 	givens={
        # 			x: train_set_x[index*batch_size:(index+1)*batch_size],
        # 			y: train_set_y[index*batch_size:(index+1)*batch_size]
        # 			})

        # Using conjugate gradient updates
        # 'cg_ = cg(cost,output,params,coefficient,v)
        # updated_params = [(param_i, param_j) for param_i,param_j in zip(params,cg_)]'

        #self.update_parameters= theano.function([updated_params],updates=[params,updated_params])

        # -----------------------------------------Starting Training ------------------------------
        if testing == 0:
            print('..... Training ')

        # for early stopping
        patience = 10000
        patience_increase = 2
        improvement_threshold = 0.95
        validation_frequency = min(n_train_batches, patience // 2)

        self.best_validation_loss = numpy.inf  # initialising loss to be inifinite
        best_itr = 0
        test_score = 0
        start_time = timeit.default_timer()

        epoch = 0
        done_looping = False

        while (epoch < n_epochs):
            epoch = epoch + 1
            for minibatch_index in range(n_train_batches):
                iter = (epoch - 1) * n_train_batches + minibatch_index

                if iter % 1 == 0:
                    print('training @ iter = ', iter)

                self.cg(minibatch_index)
        if testing == 0:
            print('Optimization complete')
            print(
                'Best validation score of %f %% obtained at iteration %i,'
                'with test performance %f %%' %
                (best_validation_loss * 100., best_itr, test_score * 100))
            print('The code ran for %.2fm' % ((end_time - start_time) / 60.))
示例#3
0
def evaluate_lenet5(learning_rate = 0.10, n_epochs=200, dataset='mnist.pkl.gz',nkerns = [16,16,16,12,12,12], batch_size = 500):
	

	rng = numpy.random.RandomState(32324)

	datasets = load_data(dataset)
	
	train_set_x,train_set_y = datasets[0]
	valid_set_x,valid_set_y = datasets[1]
	test_set_x,test_set_y = datasets[2]

	n_train_batches = train_set_x.get_value(borrow=True).shape[0]//batch_size
	n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]//batch_size
	n_test_batches = test_set_x.get_value(borrow=True).shape[0]//batch_size

  	index = T.lscalar() # index for each mini batch
  	train_epoch = T.lscalar('train_epoch')

  	x = T.matrix('x')
  	y = T.ivector('y')

  	# ------------------------------- Building Model ----------------------------------
  	print "...Building the model"

  	
  	layer_0_input = x.reshape((batch_size,1,28,28))

  	# output image size = (28-5+1+)/1 = 24
  	layer_0 = LeNetConvPoolLayer(rng,input = layer_0_input, image_shape=(batch_size,1,28,28),
  		filter_shape=(nkerns[0],1,5,5),poolsize=(1,1))

  	#output image size = (24-3+1) = 22
  	layer_1 = LeNetConvPoolLayer(rng, input = layer_0.output, image_shape = (batch_size, nkerns[0],24,24), 
  								filter_shape = (nkerns[1],nkerns[0],3,3), poolsize=(1,1) )

  	#output image size = (22-3+1)/2 = 10
  	layer_2 = LeNetConvPoolLayer(rng, input = layer_1.output, image_shape = (batch_size, nkerns[1],22,22), 
  								filter_shape = (nkerns[2],nkerns[1],3,3), poolsize=(2,2) )

  	#output image size = (10-3+1)/2 = 4
  	layer_3 = LeNetConvPoolLayer(rng, input = layer_2.output, image_shape = (batch_size, nkerns[2],10,10),
  								filter_shape = (nkerns[3], nkerns[2],3,3), poolsize=(2,2) )

  	#output image size = (4-3+2+1) = 4
  	layer_4 = LeNetConvPoolLayer(rng, input = layer_3.output, image_shape = (batch_size, nkerns[3],4,4),
  								filter_shape = (nkerns[4], nkerns[3],3,3), poolsize=(1,1), border_mode = 1 )

  	#output image size = (4-3+1)/2 = 2
  	layer_5 = LeNetConvPoolLayer(rng, input = layer_4.output, image_shape = (batch_size, nkerns[4],4,4),
  								filter_shape = (nkerns[5], nkerns[4],3,3), poolsize=(2,2), border_mode = 1 )

  	# make the input to hidden layer 2 dimensional
  	layer_6_input = layer_5.output.flatten(2)

  	layer_6 = HiddenLayer(rng,input = layer_6_input, n_in = nkerns[5]*2*2, n_out = 200, activation = T.tanh)

  	layer_7 = LogReg(input = layer_6.output, n_in=200, n_out = 10)

  	teacher_p_y_given_x = theano.shared(numpy.asarray(pickle.load(open('prob_best_model.pkl','rb')),dtype =theano.config.floatX), borrow=True)
  	p_y_given_x = T.matrix('p_y_given_x')
  	e = theano.shared(value = 0, name = 'e', borrow = True)

  	cost = layer_7.neg_log_likelihood(y)  + 2.0/(e)*T.mean(-T.log(layer_7.p_y_given_x)*p_y_given_x - layer_7.p_y_given_x*T.log(p_y_given_x))
  	
	tg = theano.shared(numpy.asarray(pickle.load(open('modified_guided_data.pkl','rb')),dtype =theano.config.floatX), borrow=True)
  	guiding_weights = T.tensor4('guiding_weights')
        #guide_cost = T.mean(-T.log(layer_3.output)*guiding_weights - layer_3.output*T.log(guiding_weights))  
	guide_cost = T.mean((layer_3.output-guiding_weights)**2)
  	test_model = theano.function([index],layer_7.errors(y),
  				givens={
  						x: test_set_x[index*batch_size:(index+1)*batch_size],
  						y: test_set_y[index*batch_size:(index+1)*batch_size]
  						})

  	validate_model = theano.function([index],layer_7.errors(y),
			givens={
					x: valid_set_x[index*batch_size:(index+1)*batch_size],
					y: valid_set_y[index*batch_size:(index+1)*batch_size]
					})

  	# list of parameters

  	params = layer_7.params + layer_6.params + layer_5.params + layer_4.params + layer_3.params + layer_2.params + layer_1.params + layer_0.params
        params_gl = layer_3.params + layer_2.params + layer_1.params + layer_0.params
  	# import pdb
  	# pdb.set_trace()
        grads_gl = T.grad(guide_cost,params_gl)
        updates_gl = [ (param_i,param_i-learning_rate/10*grad_i) for param_i,grad_i in  zip(params_gl,grads_gl) ]
  	
  	grads = T.grad(cost,params)
        updates = [ (param_i, param_i-learning_rate*grad_i) for param_i, grad_i in zip(params,grads) ]

  	train_model = theano.function([index,train_epoch],cost, updates=updates,
			givens={
					x: train_set_x[index*batch_size:(index+1)*batch_size],
					y: train_set_y[index*batch_size:(index+1)*batch_size],
          			p_y_given_x: teacher_p_y_given_x[index],
          			e: train_epoch
					})
        train_till_guided_layer = theano.function([index],guide_cost,updates = updates_gl,
                        givens={
                                        x:  train_set_x[index*batch_size:(index+1)*batch_size],
                                        y:  train_set_y[index*batch_size:(index+1)*batch_size],
                                		guiding_weights : tg[index]
                                },on_unused_input='ignore')


  	# -----------------------------------------Starting Training ------------------------------

  	print ('..... Training ' )

  	# for early stopping
  	patience = 10000
  	patience_increase = 2

  	improvement_threshold = 0.95

  	validation_frequency = min(n_train_batches, patience//2)

  	best_validation_loss = numpy.inf  # initialising loss to be inifinite
  	best_itr = 0
  	test_score = 0

  	start_time = timeit.default_timer()

  	epoch = 0
  	done_looping = False
	while (epoch < n_epochs) and (not done_looping) :
  		epoch = epoch+1
  		for minibatch_index in range(n_train_batches):
  			iter = (epoch - 1)*n_train_batches + minibatch_index

  			if iter%100==0:
  				print ('training @ iter = ', iter)
			if epoch < n_epochs/5:
				cost_ij_guided = train_till_guided_layer(minibatch_index)
  			cost_ij = train_model(minibatch_index,epoch)
  			
			if(iter +1)%validation_frequency ==0:
  				# compute loss on validation set
  				validation_losses = [validate_model(i) for i in range(n_valid_batches)]
  				this_validation_loss = numpy.mean(validation_losses)

  				# import pdb
  				# pdb.set_trace()

            			with open('Student_6_terminal_out','a+') as f_:
  					f_.write('epoch %i, minibatch %i/%i, validation error %f %% \n' %(epoch,minibatch_index+1,n_train_batches,this_validation_loss*100. ))

  				# check with best validation score till now
  				if this_validation_loss<best_validation_loss:

  					# improve 
  					if this_validation_loss < best_validation_loss * improvement_threshold:
  						patience = max(patience, iter*patience_increase)

  					best_validation_loss = this_validation_loss
  					best_itr = iter

  					test_losses = [test_model(i) for i in range(n_test_batches)]
  					test_score = numpy.mean(test_losses)

            				with open('Student_6_terminal_out','a+') as f_:
  						f_.write('epoch %i, minibatch %i/%i, testing error %f %%\n' %(epoch, minibatch_index+1,n_train_batches,test_score*100.))
  					with open('best_model_7layer.pkl', 'wb') as f:
  						pickle.dump(params, f)
  					with open('Results_student_6.txt', 'wb') as f1:
  						f1.write(str(test_score*100)+'\n')
  			#if patience <= iter:
  			#	done_looping = True
  			#	break

  	end_time = timeit.default_timer()
	with open('Student_6_terminal_out','a+') as f_:
		f_.write('Optimization complete\n')
		f_.write('Best validation score of %f %% obtained at iteration %i, with test performance %f %% \n' % (best_validation_loss*100., best_itr, test_score*100 ))
		f_.write('The code ran for %.2fm \n' %((end_time - start_time)/60.))
示例#4
0
def evaluate_lenet5(learning_rate = 0.1, n_epochs=200, dataset='mnist.pkl.gz',nkerns = [20,50], batch_size = 500 , testing =0):
	

	rng = numpy.random.RandomState(32324)

	datasets = load_data(dataset)
	
	train_set_x,train_set_y = datasets[0]
	valid_set_x,valid_set_y = datasets[1]
	test_set_x,test_set_y = datasets[2]

	n_train_batches = train_set_x.get_value(borrow=True).shape[0]//batch_size
	n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]//batch_size
	n_test_batches = test_set_x.get_value(borrow=True).shape[0]//batch_size

  	index = T.lscalar() # index for each mini batch

  	x = T.matrix('x')
  	y = T.ivector('y')

  	# ------------------------------- Building Model ----------------------------------
  	if testing ==0:
  		print "...Building the model"

  	# output image size = (28-5+1)/2 = 12
  	layer_0_input = x.reshape((batch_size,1,28,28))
  	layer_0 = LeNetConvPoolLayer(rng,input = layer_0_input, image_shape=(batch_size,1,28,28),filter_shape=(nkerns[0],1,5,5),poolsize=(2,2))

  	#output image size = (12-5+1)/2 = 4
  	layer_1 = LeNetConvPoolLayer(rng, input = layer_0.output, image_shape = (batch_size, nkerns[0],12,12), 
  								filter_shape = (nkerns[1],nkerns[0],5,5), poolsize=(2,2) )

  	# make the input to hidden layer 2 dimensional
  	layer_2_input = layer_1.output.flatten(2)

  	layer_2 = HiddenLayer(rng,input = layer_2_input, n_in = nkerns[1]*4*4, n_out = 500, activation = T.tanh)

  	layer_3 = LogReg(input = layer_2.output, n_in=500, n_out = 10)

  	cost = layer_3.neg_log_likelihood(y)

   	test_model = theano.function([index],layer_3.errors(y),
  				givens={
  						x: test_set_x[index*batch_size:(index+1)*batch_size],
  						y: test_set_y[index*batch_size:(index+1)*batch_size]
  						})

  	validate_model = theano.function([index],layer_3.errors(y),
			givens={
					x: valid_set_x[index*batch_size:(index+1)*batch_size],
					y: valid_set_y[index*batch_size:(index+1)*batch_size]
					})

  	train_predic = theano.function([index], layer_3.prob_y_given_x(),
  			givens={
  				x: train_set_x[index*batch_size:(index+1)*batch_size]
  			})

  	# list of parameters
	layer_guided = theano.function([index], layer_1.output,
  			givens={
  				x: train_set_x[index*batch_size:(index+1)*batch_size]
  			})

  	params = layer_3.params + layer_2.params + layer_1.params + layer_0.params

  	grads = T.grad(cost,params)

  	updates = [ (param_i, param_i-learning_rate*grad_i) for param_i, grad_i in zip(params,grads) ]

  	train_model = theano.function([index],cost, updates=updates,
			givens={
					x: train_set_x[index*batch_size:(index+1)*batch_size],
					y: train_set_y[index*batch_size:(index+1)*batch_size]
					})

  	# -----------------------------------------Starting Training ------------------------------
  	if testing ==0:
  		print ('..... Training ' )

  	# for early stopping
  	patience = 10000
  	patience_increase = 2

  	improvement_threshold = 0.95

  	validation_frequency = min(n_train_batches, patience//2)

  	best_validation_loss = numpy.inf  # initialising loss to be inifinite
  	best_itr = 0
  	test_score = 0

  	start_time = timeit.default_timer()

  	epoch = 0
  	done_looping = False

  	while (epoch < n_epochs) and (not done_looping) and testing ==0:
  		epoch = epoch+1
  		for minibatch_index in range(n_train_batches):
  			iter = (epoch - 1)*n_train_batches + minibatch_index

  			if iter%100 ==0:
  				print ('training @ iter = ', iter)

  			cost_ij = train_model(minibatch_index)

  			if(iter +1)%validation_frequency ==0:
  				# compute loss on validation set
  				validation_losses = [validate_model(i) for i in range(n_valid_batches)]
  				this_validation_loss = numpy.mean(validation_losses)

  				# import pdb
  				# pdb.set_trace()
  				print ('epoch %i, minibatch %i/%i, validation error %f %%' %(epoch,minibatch_index+1,n_train_batches,this_validation_loss*100. ))

  				# check with best validation score till now
  				if this_validation_loss<best_validation_loss:

  					# improve 
  					# if this_validation_loss < best_validation_loss * improvement_threshold:
  					# 	patience = max(patience, iter*patience_increase)

  					best_validation_loss = this_validation_loss
  					best_itr = iter

  					test_losses = [test_model(i) for i in range(n_test_batches)]
  					test_score = numpy.mean(test_losses)

  					print ('epoch %i, minibatch %i/%i, testing error %f %%' %(epoch, minibatch_index+1,n_train_batches,test_score*100.))

  					with open('best_model.pkl', 'wb') as f:
  						pickle.dump(params, f)

  					with open('Results_teacher.txt','wb') as f2:
  						f2.write(str(test_score*100) + '\n')

  					p_y_given_x =  [train_predic(i) for i in range(n_train_batches)]
  					with open ('prob_best_model.pkl','wb') as f1:
  						pickle.dump(p_y_given_x,f1)

  			# if patience <= iter:
  			# 	done_looping = True
  			# 	break

	layer_2_op_dump = [layer_guided(i) for i in range(n_train_batches)]
	with open ('layer_guided.pkl','wb') as lg:
  		pickle.dump(layer_2_op_dump,lg)




  	end_time = timeit.default_timer()
  	# p_y_given_x =  [train_model(i) for i in range(n_train_batches)]
  	# with open ('prob_best_model.pkl') as f:
  	# 	pickle.dump(p_y_given_x)
  	
  	if testing ==0 :
  		print ('Optimization complete')
  		print ('Best validation score of %f %% obtained at iteration %i,' 
    			'with test performance %f %%' % (best_validation_loss*100., best_itr, test_score*100 ))
  		print('The code ran for %.2fm' %((end_time - start_time)/60.))