def main():
    # import  mnist_loader as mn
    # training_data, validation_data, test_data = mn.load_data_wrapper()
    # print(type(test_data))
    import loadMNIST as dl
    training_data, validation_data, test_data = dl.load_data()

    print "Datset Loaded"

    import nn
    net = nn.Network([784, 30, 10])
    print "network done"
    accuracy = net.SGD(training_data, 1, 25, 3.0, test_data=test_data)
    print "Accuracy " + str(accuracy)
def test_DAE_mnist(n_epochs=400,dataset='mnist.pkl.gz',batch_size=600,
                   corruption_level=0.2,lambda_=0.001,sparsity_param=0.1,beta=3):
    import loadMNIST

    print '...loading datasets'
    dataSets=loadMNIST.load_data(dataset)

    train_set_x,train_set_y=loadMNIST.shared_dataset(dataSets[0])
    #valid_set_x,valid_set_y=loadMNIST.shared_dataset(dataSets[1])
    #test_set_x,test_set_y=loadMNIST.shared_dataset(dataSets[2])

    n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size
    #n_valid_batches=valid_set_x.get_value(borrow=True).shape[0]/batch_size
    #n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size
    print '...load datasets successfully.'

    print '...building the model'

    index=T.lscalar('index')
    x=T.matrix('x')
    learning_rate=T.fscalar('lr')
    rng=np.random.RandomState(123)
    theano_rng=RandomStreams(rng.randint(2**30))
    dAE=DenosingAutoEncoder(
        n_visible=28*28,n_hidden=500,np_rng=rng,theano_rng=theano_rng,input=x,corruption_level=corruption_level
    )
    cost,updates=dAE.get_cost_updates(learning_rate=learning_rate,
                                      lambda_=lambda_,sparsity_param=sparsity_param,beta=beta)
    train_dEA=theano.function(inputs=[index,theano.Param(learning_rate,default=0.1)],outputs=cost,updates=updates,
                              givens={x:train_set_x[index*batch_size:(index+1)*batch_size]
                                      })
    print '...build the model successfully'

    print '...training the model'
    start_time=time.clock()
    pre_cost=np.inf
    for epoch in xrange(n_epochs):
        c=[train_dEA(index=batch_index,lr=1./(epoch+10))for batch_index in xrange(n_train_batches)]
        mean_cost=np.mean(c)
        print 'Traing epoch %d,cost ' % (epoch+1),mean_cost
        if(abs(pre_cost-mean_cost)<0.01):
            break
        if np.mean(c)<pre_cost:
            pre_cost=np.mean(c)
    end_time=time.clock()
    traing_time=end_time-start_time
    print 'Traing  took %f minutes'%(traing_time/60.)
Exemplo n.º 3
0
def sgd_optimization_mnist(learning_rate=0.13,n_epochs=1000,dataSet='mnist.pkl.gz',batch_size=600):
    dataSets=loadMNIST.load_data(dataSet)

    train_set_x,train_set_y=loadMNIST.shared_dataset(dataSets[0])
    valid_set_x,valid_set_y=loadMNIST.shared_dataset(dataSets[1])
    test_set_x,test_set_y=loadMNIST.shared_dataset(dataSets[2])

    n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size
    n_valid_batches=valid_set_x.get_value(borrow=True).shape[0]/batch_size
    n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size

    print '...building the model'

    index=T.lscalar()
    x=T.matrix('x')
    y=T.ivector('y')
    classifier=LogisticRegression(input=x,n_in=28*28,n_out=10)
    cost=classifier.negative_log_likelihood(y)
    test_model=theano.function(inputs=[index],
                               outputs=classifier.errors(y),
                               givens={
                                   x:test_set_x[index*batch_size:(index+1)*batch_size],
                                   y:test_set_y[index*batch_size:(index+1)*batch_size]
                               })
    validate_model=theano.function(inputs=[index],
                               outputs=classifier.errors(y),
                               givens={
                                   x:valid_set_x[index*batch_size:(index+1)*batch_size],
                                   y:valid_set_y[index*batch_size:(index+1)*batch_size]
                               })
    g_w=T.grad(cost=cost,wrt=classifier.W)
    g_b=T.grad(cost=cost,wrt=classifier.b)
    updates=[(classifier.W,classifier.W-learning_rate*g_w),
        (classifier.b,classifier.b-learning_rate*g_b)]
    train_model=theano.function(inputs=[index],
                                outputs=cost,
                                updates=updates,
                                givens={
                                   x:train_set_x[index*batch_size:(index+1)*batch_size],
                                   y:train_set_y[index*batch_size:(index+1)*batch_size]
                               })
    print '...training the model'

    patience=5000
    patience_increase=2
    improvement_threshold=0.995
    validation_frequency=min(n_train_batches,patience/2)
    best_validation_loss=numpy.inf
    test_score=0
    start_time=time.clock()

    done_looping=False
    epoch=0
    while (epoch<n_epochs)and (not done_looping):
        epoch+=1
        for minibatch_index in xrange(n_train_batches):
            minibatch_avg_cost=train_model(minibatch_index)
            iter=(epoch-1)*n_train_batches+minibatch_index#iteration number
            if(iter+1)%validation_frequency==0:
                validation_losses=[validate_model(i)for i in xrange(n_valid_batches)]
                this_validation_loss=numpy.mean(validation_losses)
                print('epoch %i,minbatch %i/%i,validation error %f %%'%(
                    epoch,minibatch_index+1,n_train_batches,this_validation_loss*100.
                                                                       ))
                if this_validation_loss<best_validation_loss:
                    if this_validation_loss<best_validation_loss*improvement_threshold:
                        patience=max(patience,iter*patience_increase)
                    best_validation_loss=this_validation_loss
                    test_losses=[test_model(i) for i in xrange(n_test_batches)]
                    test_score=numpy.mean(test_losses)
                    print (('\tepoch %i,minibatch %i/%i,test error of best model %f %%')%(
                        epoch,minibatch_index+1,n_train_batches,test_score*100.))
        if patience<=iter:
                done_looping=True
                break
    end_time=time.clock()
    print (('Optimization complete with best validation score of %f %%,'
           'with test performance %f %%')%(best_validation_loss*100.,test_score*100.))
    print('The code run for %d epochs,with %f epochs/sec'%(
        epoch,1.*epoch/(end_time-start_time)))
    print >>sys.stderr,('The code run file '+os.path.split(__file__)[1]+
            'run for %.1fs'%((end_time-start_time)))
Exemplo n.º 4
0
def test_mlp(learning_rate_init=0.01,L1_reg=0.00,L2_reg=0.0001,n_epochs=1000,
             dataset='mnist.pkl.gz',batch_size=20,n_hidden=500):
    print '...loading datasets'
    dataSets=loadMNIST.load_data(dataset)

    train_set_x,train_set_y=loadMNIST.shared_dataset(dataSets[0])
    valid_set_x,valid_set_y=loadMNIST.shared_dataset(dataSets[1])
    test_set_x,test_set_y=loadMNIST.shared_dataset(dataSets[2])

    n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size
    n_valid_batches=valid_set_x.get_value(borrow=True).shape[0]/batch_size
    n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size

    print 'building the model'
    index=T.lscalar()
    x=T.matrix('x')
    y=T.ivector('y')
    learning_rate=T.fscalar('lr')
    rng=np.random.RandomState(1234)

    classifier=MLP(rng=rng,input=x,n_in=28*28,n_hidden=n_hidden,n_out=10)
    cost=(classifier.negative_log_likelihood(y)+
        L1_reg*classifier.L1+L2_reg*classifier.L2_sqr)

    test_model=theano.function(inputs=[index],
                               outputs=classifier.errors(y),
                               givens={
                                   x:test_set_x[index*batch_size:(index+1)*batch_size],
                                   y:test_set_y[index*batch_size:(index+1)*batch_size]
                               })
    validate_model=theano.function(inputs=[index],
                                   outputs=classifier.errors(y),
                                   givens={
                                       x:valid_set_x[index*batch_size:(index+1)*batch_size],
                                       y:valid_set_y[index*batch_size:(index+1)*batch_size]
                                   })
    gparams=[T.grad(cost,param) for param in classifier.params]
    updates=[(param,param-learning_rate*gparam)for param,gparam in zip(classifier.params,gparams)]
    train_model=theano.function(inputs=[index,theano.Param(learning_rate,default=0.1)],
                                outputs=cost,
                                updates=updates,
                                givens={
                                       x:train_set_x[index*batch_size:(index+1)*batch_size],
                                       y:train_set_y[index*batch_size:(index+1)*batch_size]
                                   })
    print '...training'

    patience=10000
    patience_increase=2
    improvement_threshold=0.995
    validation_frequency=min(n_train_batches,patience/2)

    best_validation_loss=np.inf
    best_iter=0
    test_score=0.
    start_time=time.clock()

    done_looping=False
    epoch=0

    while (epoch<n_epochs)and (not done_looping):
        epoch+=1
        for minibatch_index in xrange(n_train_batches):
            minibatch_avg_cost=train_model(minibatch_index,learning_rate_init*10/(epoch+1)+0.05)
            iter=(epoch-1)*n_train_batches+minibatch_index#iteration number
            if(iter+1)%validation_frequency==0:
                validation_losses=[validate_model(i)for i in xrange(n_valid_batches)]
                this_validation_loss=np.mean(validation_losses)
                print('epoch %i,minbatch %i/%i,validation error %f %%'%(
                    epoch,minibatch_index+1,n_train_batches,this_validation_loss*100.
                                                                       ))
                if this_validation_loss<best_validation_loss:
                    if this_validation_loss<best_validation_loss*improvement_threshold:
                        patience=max(patience,iter*patience_increase)

                    best_validation_loss=this_validation_loss
                    best_iter=iter
                    test_losses=[test_model(i) for i in xrange(n_test_batches)]
                    test_score=np.mean(test_losses)
                    print (('\tepoch %i,minibatch %i/%i,test error of best model %f %%')%(
                        epoch,minibatch_index+1,n_train_batches,test_score*100.))
        if patience<=iter:
                done_looping=True
                break
    end_time=time.clock()
    print (('Optimization complete with best validation score of %f %%,'
            'obtained at iteration %i,with test performance %f %%')%
           (best_validation_loss*100.,best_iter+1,test_score*100.))
    print('The code run for %d epochs,with %f epochs/sec'%(
        epoch,1.*epoch/(end_time-start_time)))
    print >>sys.stderr,('The code run file'+os.path.split(__file__)[1]+
            'run for %.1fm'%((end_time-start_time)/60.))
Exemplo n.º 5
0
def test_conv_mnist(
    learning_rate=0.01,
    L1_reg=0.00,
    L2_reg=0.001,
    n_epochs=1000,
    dataset="mnist.pkl.gz",
    batch_size=600,
    nkerns=[20, 50],
):

    import loadMNIST

    print "...loading datasets"
    dataSets = loadMNIST.load_data(dataset)

    train_set_x, train_set_y = loadMNIST.shared_dataset(dataSets[0])
    valid_set_x, valid_set_y = loadMNIST.shared_dataset(dataSets[1])
    test_set_x, test_set_y = loadMNIST.shared_dataset(dataSets[2])

    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
    print "...load datasets successfully."

    print "...building the model"
    index = T.lscalar()
    x = T.matrix("x")
    y = T.ivector("y")

    rng = np.random.RandomState(1234)

    layer0_input = x.reshape((batch_size, 1, 28, 28))
    layer0 = ConvPoolLayer(
        rng, input=layer0_input, image_shape=(batch_size, 1, 28, 28), filter_shape=(nkerns[0], 1, 5, 5), poolsize=(2, 2)
    )

    layer1 = ConvPoolLayer(
        rng,
        input=layer0.output,
        image_shape=(batch_size, nkerns[0], 12, 12),
        filter_shape=(nkerns[1], nkerns[0], 5, 5),
        poolsize=(2, 2),
    )

    layer2_input = layer1.output.flatten(2)
    layer2 = HiddenLayer(rng, input=layer2_input, n_in=nkerns[1] * 4 * 4, n_out=500, actication=T.tanh)
    layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)

    test_model = theano.function(
        inputs=[index],
        outputs=layer3.errors(y),
        givens={
            x: test_set_x[index * batch_size : (index + 1) * batch_size],
            y: test_set_y[index * batch_size : (index + 1) * batch_size],
        },
    )
    validate_model = theano.function(
        inputs=[index],
        outputs=layer3.errors(y),
        givens={
            x: valid_set_x[index * batch_size : (index + 1) * batch_size],
            y: valid_set_y[index * batch_size : (index + 1) * batch_size],
        },
    )

    params = layer3.params + layer2.params + layer1.params + layer0.params
    classifier_L1 = abs(layer3.W.sum() + layer2.W.sum() + layer1.W.sum() + layer0.W.sum())
    classifier_L2 = (layer3.W ** 2).sum() + (layer2.W ** 2).sum() + (layer1.W ** 2).sum() + (layer0.W ** 2).sum()
    cost = layer3.negative_log_likelihood(y) + 1.0 / batch_size * (L1_reg * classifier_L1 + L2_reg * classifier_L2)
    grads = T.grad(cost, params)
    updates = [(param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, grads)]
    train_model = theano.function(
        inputs=[index],
        outputs=cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size : (index + 1) * batch_size],
            y: train_set_y[index * batch_size : (index + 1) * batch_size],
        },
    )

    print "...build the model successfully"

    # Performs the actual training and early-stopping
    print "...training the model"

    patience = 5000
    patience_increase = 2
    improvement_threshold = 0.995
    validation_frequency = min(n_train_batches, patience / 2)
    best_validation_loss = np.inf
    test_score = 0

    start_time = time.clock()
    done_looping = False
    epoch = 0
    while (epoch < n_epochs) and (not done_looping):
        epoch += 1
        for minibatch_index in xrange(n_train_batches):
            minibatch_avg_cost = train_model(minibatch_index)
            iter = (epoch - 1) * n_train_batches + minibatch_index  # iteration number
            if (iter + 1) % validation_frequency == 0:
                validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]
                this_validation_loss = np.mean(validation_losses)
                print (
                    "epoch %i,minbatch %i/%i,validation error %f %%"
                    % (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.0)
                )
                if this_validation_loss < best_validation_loss:
                    if this_validation_loss < best_validation_loss * improvement_threshold:
                        patience = max(patience, iter * patience_increase)
                    best_validation_loss = this_validation_loss
                    test_losses = [test_model(i) for i in xrange(n_test_batches)]
                    test_score = np.mean(test_losses)
                    print (
                        ("\tepoch %i,minibatch %i/%i,test error of best model %f %%")
                        % (epoch, minibatch_index + 1, n_train_batches, test_score * 100.0)
                    )
        if patience <= iter:
            done_looping = True
            break
    end_time = time.clock()
    print (
        ("Optimization complete with best validation score of %f %%," "with test performance %f %%")
        % (best_validation_loss * 100.0, test_score * 100.0)
    )
    print ("The code run for %d epochs,with %f epochs/sec" % (epoch, 1.0 * epoch / (end_time - start_time)))
    print >> sys.stderr, (
        "The code run file " + os.path.split(__file__)[1] + "run for %.1fm" % ((end_time - start_time) / 60.0)
    )