Example #1
0
    print __doc__

    with open(sys.argv[1], 'r') as f:
        W, _, bh = pic.load(f)

    try:
        with open('logreg_backup_' + str(W.shape[0]), + '.pkl', 'r') as f:

            W2, b2 = pic.load(f)
        pretrain = False
    except:
        W2 = None
        b2 = None
        pretrain = True

    labels, train, test = pp.load_from_csv(sys.argv[2], sys.argv[3])

    train, test, labels, test_labels = cv.train_test_split(train,
                                                           labels,
                                                           test_size=0.2)

    mlp = ae.TwoLayerPerceptron(784,
                                W.shape[1],
                                10,
                                W_1_init=W,
                                b_1_init=bh,
                                W_2_init=W2,
                                b_2_init=b2)

    if pretrain:
        #greedy pretraining
Example #2
0
"""

import preprocess as pp
import sys
import autoencoder as ae
import csv
import cPickle as pic

import sklearn.decomposition as dec

if __name__ == '__main__':

    print __doc__

    labels, train_features, test_features = pp.load_from_csv(
        sys.argv[1], sys.argv[2])

    try:

        if sys.argv[6] == 'whiten':

            pca = dec.PCA(whiten=True)

            pca.fit(train_features)

            train_features = pca.transform(train_features)

            test_features = pca.transform(test_features)

        else:
Example #3
0

import preprocess as pp
import train_classifier as clf
from autoencoder import LogisticRegression
import numpy as np

if __name__ == "__main__":

    labels, train, test = pp.load_from_csv( 'train.csv', 'test.csv' )
    
    train_batches = train.reshape((420,100,784))

    label_batches  = np.array(labels).reshape((420,100))

    logreg = LogisticRegression( 784, 10 )

    logreg.fit( train_batches , label_batches, nbatches = 420 )
Example #4
0
import preprocess as pp
import train_classifier as clf
from autoencoder import LogisticRegression
import numpy as np

if __name__ == "__main__":

    labels, train, test = pp.load_from_csv('train.csv', 'test.csv')

    train_batches = train.reshape((420, 100, 784))

    label_batches = np.array(labels).reshape((420, 100))

    logreg = LogisticRegression(784, 10)

    logreg.fit(train_batches, label_batches, nbatches=420)
Example #5
0
"""

import preprocess as pp
import sys
import autoencoder as ae
import csv
import cPickle as pic

import sklearn.decomposition as dec

if __name__=='__main__':
    
    print __doc__

    labels, train_features, test_features = pp.load_from_csv(sys.argv[1], sys.argv[2])

    try:

        if sys.argv[6]=='whiten':

            pca = dec.PCA( whiten=True )

            pca.fit( train_features )

            train_features = pca.transform( train_features )
            
            test_features = pca.transform( test_features )

        else:
Example #6
0
                    #otherwise, if the costs are diminishing too slowly,
                    #increase the learning rate by 1.1
                    if np.abs(rel_cost_change) < tol_low and rel_cost_change < 0.0:
                        learning_rate = 1.1*learning_rate
                    
                    #if costs is changing less than the tolerance, stop, learning is done
                    if np.abs(rel_cost_change) < tolerance:
                        break



if __name__ == '__main__':

    print __doc__%(sys.argv[1],sys.argv[2],sys.argv[3])

    labels, train, test = pp.load_from_csv( sys.argv[1], sys.argv[2] )

    #set values to something useful, batch_size and number of epochs
    # doesn't seem to make much of a difference
    training_epochs = 300
    training_batches = 100

    #patch_size, for 28x28 images, 10x10 patches seemed reasonable
    patch_size = 15

    batch_size = int(train.shape[0] / training_batches)

    n_filters = 500

    output_file = ('ae_' + str(patch_size) + 'x' + str(patch_size) + '_'
                   + str(n_filters) + '_filters_backup.pikle')