コード例 #1
0
				 lmbda - the regularization term
				 model_file - the name of the file to store the final classification model
	"""


# Start counting time
start_time = time.clock()

# Set parameters
alpha = 0.01
lmbda = 0
maxiter = 100

# open and load csv files
time_load_start = time.clock()
X_train, y_train = fipr.load_csv("train_file.csv", True)
X_test, y_test = fipr.load_csv("test_file.csv", True)
y_train = y_train.flatten()
y_test = y_test.flatten()
time_load_end = time.clock()
print("Loading finished, loading time: %g seconds" %
      (time_load_end - time_load_start))

X_test_even, y_test_even = fipr.load_csv("test_file_even.csv", True)
y_test_even = y_test_even.flatten()

# scale features to encourage gradient descent convergence
X_train = fipr.scale_features(X_train, 0.0, 1.0)
X_test = fipr.scale_features(X_test, 0.0, 1.0)

X_test_even = fipr.scale_features(X_test_even, 0.0, 1.0)
コード例 #2
0
def main():

    # open and load csv files
    time_load_start = time.clock()
    X_train, y_train = fipr.load_csv("train_file.csv", True)
    X_test, y_test = fipr.load_csv("test_file.csv", True)
    y_train = y_train.flatten()
    y_test = y_test.flatten()
    time_load_end = time.clock()
    print("Loading finished, loading time: %g seconds" %
          (time_load_end - time_load_start))

    X_test_even, y_test_even = fipr.load_csv("test_file_even.csv", True)
    y_test_even = y_test_even.flatten()
    # scale features to encourage gradient descent convergence
    X_train = fipr.scale_features(X_train, 0.0, 1.0)
    X_test = fipr.scale_features(X_test, 0.0, 1.0)

    X_test_even = fipr.scale_features(X_test_even, 0.0, 1.0)

    Pattern_train = []
    for i, sample_train in enumerate(X_train):
        Pattern_train.append([sample_train, y_train[i]])

    Pattern_test = []
    for j, sample_test in enumerate(X_test):
        Pattern_test.append([sample_test, y_test[j]])

    Pattern_test_even = []
    for k, sample_test_even in enumerate(X_test_even):
        Pattern_test_even.append([sample_test_even, y_test_even[k]])

    #print(Pattern_train)
    #print(Pattern_test)
    # Teach network XOR function (for test only)
    '''pat = [
        [[0,0], [0]],
        [[0,1], [1]],
        [[1,0], [1]],
        [[1,1], [0]]
        ]
    print(pat)

    # create a network with two input, two hidden, and one output nodes
    n = NN(2, 2, 1)
    # train it with some patterns
    n.train(pat)
    # test it
    n.test(pat)'''

    # Test on Iris data
    #pattern = irisdemo()

    # create a network with two hundred inputs, two hidden, and one output nodes
    n = NN(200, 4, 1)

    # start counting time for training
    time_train_start = time.clock()

    # train it with some patterns
    n.train(Pattern_train)

    # print training time
    time_train_end = time.clock()
    print("Training finished, training time: %g seconds \n" %
          (time_train_end - time_train_start))

    # start counting time for testing
    time_test_start = time.clock()

    # test it
    n.test(Pattern_test)

    # print testing time
    time_test_end = time.clock()
    print("Testing finished, testing time: %g seconds  \n" %
          (time_test_end - time_test_start))

    # test on EVEN data set
    n.test(Pattern_test_even)
コード例 #3
0
def main():

    # open and load csv files
    time_load_start = time.clock()
    X_train, y_train = fipr.load_csv("train_file.csv", True)
    X_test, y_test = fipr.load_csv("test_file.csv", True)
    #y_train = y_train.flatten()
    #y_test = y_test.flatten()
    time_load_end = time.clock()
    print("Loading finished, loading time: %g seconds" %
          (time_load_end - time_load_start))

    X_test_even, y_test_even = fipr.load_csv("test_file_even.csv", True)

    training_data = X_train
    training_labels = y_train

    test_data = X_test
    test_labels = y_test

    test_data_even = X_test_even
    test_labels_even = y_test_even

    # building the SDA
    sDA = StackedDA([100])

    # start counting time for training
    time_train_start = time.clock()
    print('Pre-training...')

    # pre-trainning the SDA
    sDA.pre_train(training_data[:1000], noise_rate=0.3, epochs=100)
    print('Training Network...')

    # adding the final layer
    sDA.finalLayer(training_data, training_labels, epochs=500)

    # trainning the whole network
    sDA.fine_tune(training_data, training_labels, epochs=500)

    # print training time
    time_train_end = time.clock()
    print("Training finished, training time: %g seconds \n" %
          (time_train_end - time_train_start))

    # start counting time for testing
    time_test_start = time.clock()

    print('Testing performance...')
    # predicting using the SDA
    y_pred = sDA.predict(test_data).argmax(1)

    # print simple precision metric to the console
    print('Accuracy:  ' + str(fipr.compute_accuracy(y_test, y_pred)))

    # print testing time
    time_test_end = time.clock()
    print("Testing finished, testing time: %g seconds  \n" %
          (time_test_end - time_test_start))

    # Even set test
    y_pred_even = sDA.predict(test_data_even).argmax(1)

    # print simple precision metric to the console
    print('Accuracy on EVEN set:  ' +
          str(fipr.compute_accuracy(y_test_even, y_pred_even)))

    return sDA
コード例 #4
0
import urllib
import time
import tensorflow as tf
import FileProcess as fipr

from Mnist import Mnist
mnist = Mnist()

sess = tf.InteractiveSession()

# Start counting time
start_time = time.clock()

# open and load csv files
time_load_start = time.clock()
X_train, y_train = fipr.load_csv("train_file.csv", True)
#X_test, y_test = fipr.load_csv("test_file.csv", True)
#y_train = y_train.flatten()
#y_test = y_test.flatten()
time_load_end = time.clock()
#print("Loading finished, loading time: %g seconds" % (time_load_end - time_load_start))

training_data = X_train
training_labels = y_train

print(type(training_labels))
print(type(training_labels[0, 0]))
print(training_labels.shape)

print('original labels:')
print(training_labels[3])