#activation_functions.PReLU(learning_methods.Momentum(.01,.7),20),
#activation_functions.PReLU(learning_methods.Momentum(.01,.7),30)
network_activations = [activation_functions.Tanh(), \
activation_functions.Softmax()]


def reduceL(t):
    for index, v in enumerate(t):
        x, y = v
        t[index] = x, np.argmax(y)
    return t


eta = 3
lmbda = 0
epochs = 64
mini_batch = 10
net=fcnetwork.FCNetwork(network_topology, \
                        network_activations, \
                        cost_functions.CrossEntropy(),\
                        None, \
                        [bn.BNLayer(learning_methods.Momentum(.01,.7),.06,(30,mini_batch))]\
                        )
train, valid, test = mnist_loader.load_data_wrapper()

train_list = list(train)
#reg.L2Reg(lbmda,len(train_list))
net.learn(train_list,epochs,mini_batch, \
learning_methods.Momentum(eta,.7,reg.L2Reg(lmbda,len(train_list))), \
test_data=[reduceL(train_list[:10000]),list(test),list(valid)])
示例#2
0
import functions as f
import activation as act
import cost_functions as cost

from data_prep import DataPrep
from NeuralNetwork import NeuralNetwork

# set the parameters
n = 100
n_epochs = 300
n_batches = 100
neurons = [50, 50]
n_outputs = 10
hidden_act = act.Sigmoid()
output_act = act.Softmax()
cost_func = cost.CrossEntropy()
no_hidden = False
seed = 2034

# download MNIST dataset
digits = datasets.load_digits()

# define input data and labels
dataset = digits.images
labels = digits.target.reshape(-1, 1)

# flatten the image
N = len(dataset)
dataset = dataset.reshape(N, -1)

# Transform labels to onehot vectors and split in train and test