예제 #1
0
    def start(self):
        # testing()
        dataset = self.dataset
        print (dataset)
        image_x, image_y = get_image_size(dataset)
        amount_classes = get_amount_of_classes(dataset)

        # Pooling size
        poolsize_x = self.pooling_x
        poolsize_y = self.pooling_y
        # Learning rate
        # Epochs to be trained and batch size
        user_learning_rate = self.learn_r
        user_nepochs = self.epochs
        user_batch = self.batch

        # Size of the convolution filter windows
        user_filter_x = self.filter_x
        user_filter_y = self.filter_y

        # Treshhold for model training
        user_treshhold = 0.995

        #amount of layers given by user
        n=self.layers
        print ('Please, wait until all iterations are completed.')
        evaluate_lenet5(learning_rate=self.learn_r, n_epochs=self.epochs,
                    dataset=self.dataset,
                    nkerns=[20, 50], batch_size=self.batch)
예제 #2
0
def test_convolutional_mlp():
    convolutional_mlp.evaluate_lenet5(n_epochs=1, nkerns=[5, 5])
예제 #3
0
        dataset[n, N] = files_data[n][1]

    return dataset


dataset = createDataSet()
# outputFile = "c:\\temp\\foo.csv"
# np.savetxt(outputFile, np.asarray(dataset), delimiter=",", fmt='%d')


def getFormattedDataSet(dataset):

    train_set = (dataset[0:10, :-1], dataset[0:10, -1])
    valid_set = (dataset[10:14, :-1], dataset[10:14, -1])
    test_set = (dataset[14:19, :-1], dataset[14:19, -1])

    train_set_x, train_set_y = shared_dataset(train_set)
    valid_set_x, valid_set_y = shared_dataset(valid_set)
    test_set_x, test_set_y = shared_dataset(test_set)

    rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)]
    return rval


evaluate_lenet5(getFormattedDataSet(dataset), learning_rate=0.01)


# plt.imshow(bla2, cmap=plt.cm.gray)
# l = misc.lena()
# plt.show()
예제 #4
0
def test_convolutional_mlp():
    t0=time.time()
    convolutional_mlp.evaluate_lenet5(n_epochs=5,nkerns=[5,5])
    print >> sys.stderr, "test_convolutional_mlp took %.3fs expected 168s in our buildbot"%(time.time()-t0)
logistic_cg.cg_optimization_mnist(mnist_pkl_gz=c10)

sys.stdout = open('results/cifar-10_results/lsgd.out', 'w')
logistic_sgd.sgd_optimization_mnist(dataset=c10)

sys.stdout = open('results/cifar-10_results/lsgd_gau.out', 'w')
logistic_sgd_gaussian.sgd_optimization_mnist(dataset=c10)

sys.stdout = open('results/cifar-10_results/lsgd_bin.out', 'w')
logistic_sgd_binomial.sgd_optimization_mnist(dataset=c10)

sys.stdout = open('results/cifar-10_results/mlp.out', 'w')
mlp.test_mlp(dataset=c10)

sys.stdout = open('results/cifar-10_results/mlpO.out', 'w')
# mlp_dropOut.test_mlp(p=0.8, n_hidden = 100)
mlp_dropOut.test_mlp(dataset=c10)

sys.stdout = open('results/cifar-10_results/mlpC.out', 'w')
mlp_dropConnect.test_mlp(dataset=c10)

sys.stdout = open('results/cifar-10_results/convo.out', 'w')
convolutional_mlp.evaluate_lenet5(dataset=c10)

sys.stdout = open('results/cifar-10_results/convoC.out', 'w')
# con_mlp_dropConnect.evaluate_lenet5(p=0.8)
con_mlp_dropConnect.evaluate_lenet5(dataset=c10)

sys.stdout = open('results/cifar-10_results/convoO.out', 'w')
con_mlp_dropOut.evaluate_lenet5(dataset=c10)
예제 #6
0
def test_convolutional_mlp():
    convolutional_mlp.evaluate_lenet5(n_epochs=1, nkerns=[5, 5])
def conv_mlp_train_and_predict(train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y,rng,isrbg=False):
    PRED = evaluate_lenet5(np.copy(train_set_x),np.copy(train_set_y),np.copy(valid_set_x),np.copy(valid_set_y),np.copy(test_set_x),np.copy(test_set_y),rng,isrbg=isrbg)
    return PRED