示例#1
0
文件: main.py 项目: chrisworld/nn_ku
def main():
    X, C, X_tst, C_tst = load_isolet()
    #print(X)
    Y = np.array([[1, 2, 3, 4]])
    print(X.shape)
    #print(C)
    #print(C.shape)

    b1 = BatchNormalizer(X)
    b1.getMean()
    b1.getStd()
    b1.getNormalized()
    b1.getBatches()
示例#2
0
文件: main.py 项目: chrisworld/nn_ku
def main():
    X, C, X_tst, C_tst = load_isolet()
    #print(X)
    Y = np.array([[1, 2, 3, 4]])
    #print(X.shape)
    #print(C)
    #print(C.shape)

    b1 = BatchNormalizer(X, C, batch_size=40, shuffle=True)
    train = b1.getBatches(X, C)
    test = b1.getBatches(X_tst, C_tst, test=True)
    #print(b1.cbatches.batch_num)

    print(X.shape)
    print(X_tst.shape)
示例#3
0
from Trainer import Trainer
from Evaluator import Evaluator
from Model import Model

from nn18_ex2_load import load_isolet

import numpy as np
import logging
import os

if __name__ == '__main__':

    # training and validation error collector
    ec = ErrorCollector()

    X, C, X_tst, C_tst = load_isolet()
    #print("X shape: ", X.shape[0])
    #print("X shape: ", X.shape[1])

    # Parameters and model
    epochs = 5
    #learning_rate = 0.001
    learning_rate = 0.01
    model = Model(n_in=X.shape[1], n_hidden=300, n_out=26, n_layer=1)
    batch_size = 40

    # setup logging
    log_file_name = 'logs' + os.sep + 'Log' + '_ep' + str(
        epochs) + '_hidu' + str(model.n_hidden) + '_hidl' + str(
            model.n_layer) + '_lr' + str(learning_rate) + '.log'
    logging.basicConfig(filename=log_file_name, level=logging.INFO)
示例#4
0
import tensorflow as tf
import numpy as np
import numpy.random as rd
from nn18_ex2_load import load_isolet
import matplotlib.pyplot as plt

Xa, C, X_test, C_test = load_isolet()
print(Xa.shape)
print(C.shape)
print(X_test.shape)
print(C_test.shape)


#We create the C arrays with this function
def createC(array):
    res = np.zeros((len(array), 26))

    for i in range(len(array)):
        res[i][array[i] - 1] = 1
    return res


Ca = createC(C)
Ca_Test = createC(C_test)
# Give the dimension of the data and chose the number of hidden layer
n_in = 300
n_out = 26
n_hidden = 20

# Set the variables
W_hid = tf.Variable(rd.randn(n_in, n_hidden) / np.sqrt(n_in), trainable=True)