Example #1
0
 def __init__(self, weights=None, ai=None):
     self.x = 60
     self.y = randint(10,cn.HEIGHT-10)#height of screen -10
     self.speed = 7
     self.ai = ai
     self.rect = pg.Rect(self.x,self.y,cn.B_WIDTH, cn.B_HEIGHT)
     self.dead = False
     if not weights:
         self.net = nn.Network()#call for the neural network
     else: self.net = nn.Network(weights)#give the neural networks the weights the weight
     self.fitness = 0
Example #2
0
	def __init__(self,env):
		self.action_space=env.action_space
		print('env.action_space:'),
		print(env.action_space)
		print('env.observation_space:'),
		print(env.observation_space)
		print('env.reward_range:'),
		print(env.reward_range)
		INPUTS=env.observation_space.shape[0]
		OUTPUTS=env.action_space.n
		highVec=env.observation_space.high
		lowVec=env.observation_space.low
		#LAYERDIM=[2,1025,2]
		#LAYERDIM=[2,500,10,2]
		LAYERDIM=[INPUTS,500,OUTPUTS]
		GAMMA=0.005

		self.NN=nn.Network(LAYERDIM,GAMMA)
		filename=task+'.csv'
		self.filename=filename
		try:
			open(filename,'r')
			x=raw_input('found '+filename+'.  load network?  ([y]/n)')
			if(x=='' or x=='y'):
				print('loading '+filename+'...')
				self.NN=nn.loadNetwork(filename)
		except:
			x=raw_input(filename+' not found.  start?  ([y]/n)')
			if(x=='n'):
				exit()
Example #3
0
def test_2layer_net():
    params = init_toy_model()
    X, y = init_toy_data()
    Y_enc = ut.encode_labels(y)
    # Make the net
    layer_1 = layers.Linear(*params['W1'].T.shape,
                            reg='frob',
                            reg_param=0.05,
                            init_vals=(params['W1'].T, params['b1'].ravel()))
    act_1 = layers.Relu()
    layer_2 = layers.Linear(*params['W2'].T.shape,
                            reg='frob',
                            reg_param=0.05,
                            init_vals=(params['W2'].T, params['b2'].ravel()))
    net_2 = nn.Network([layer_1, act_1, layer_2], ls.CrossEntropy(),
                       optim.SGD(lr=1e-5))
    scores = net_2.forward(X)
    correct_scores = np.asarray([[-1.07260209, 0.05083871, -0.87253915],
                                 [-2.02778743, -0.10832494, -1.52641362],
                                 [-0.74225908, 0.15259725, -0.39578548],
                                 [-0.38172726, 0.10835902, -0.17328274],
                                 [-0.64417314, -0.18886813, -0.41106892]])
    diff = np.sum(np.abs(scores - correct_scores))
    assert (np.isclose(diff, 0.0, atol=1e-6))
    loss = net_2.loss(X, Y_enc)
    correct_loss = 1.071696123862817
    assert (np.isclose(loss, correct_loss, atol=1e-8))
Example #4
0
    def init_and_train_network(self, params_net):
        data_loader = self.load_data(params_net['data'])
        use_bias = bool(params_net['use_bias'])
        neural_network = nn.Network(
            bias=use_bias, shape_in=pd.DataFrame(
                data_loader.train_x).shape).init_network(save_plot_values=True)
        for layer in params_net['hidden_layers']:
            if layer['type'] == 'dropout':
                neural_network.add_layer(
                    layer=layers.DropoutLayer(dropout_prob=layer['prob'],
                                              nr_neurons=layer['nr_neurons']))
            else:
                neural_network.add_layer(size=layer['nr_neurons'],
                                         activation=layer['activation'],
                                         init_type=layer['init'])

        neural_network.add_output(data_loader.train_y.shape,
                                  params_net['out_activ'],
                                  params_net['loss'],
                                  init_type=params_net['out_init'])
        batch_size = params_net['batch_size']
        if params_net['online']:
            batch_size = len(data_loader.train_x)
        neural_network.train_network(data_loader.train_x,
                                     data_loader.train_y,
                                     data_loader.test_x,
                                     data_loader.test_y,
                                     online=params_net['online'],
                                     learning_rate=params_net['learning_rate'],
                                     nr_epochs=params_net['epochs'],
                                     batch_size=batch_size)
        return
Example #5
0
 def loss_func_b(bb):
     layer_lin = layers.Linear(n,
                               c,
                               reg='l2',
                               reg_param=0.05,
                               init_vals=(W.T, bb.ravel()))
     loss_func = ls.CrossEntropy()
     net = nn.Network([layer_lin], loss_func, optimizer=None)
     return net.loss(X_dev, Y_dev_enc)
Example #6
0
def test_CrossEntropyLoss():
    np.random.seed(1)
    W = np.random.randn(c, n) * 0.0001
    b = np.random.randn(c, 1) * 0.0001
    layer_lin = layers.Linear(n, c, init_vals=(W.T, b.ravel()))
    loss_func = ls.CrossEntropy()
    net = nn.Network([layer_lin], loss_func, optimizer=None)
    my_loss = net.loss(X_dev, Y_dev_enc)
    assert (np.isclose(my_loss, -np.log(.1), atol=1e-2))
Example #7
0
def test_Network():
	shape=[2,2,2]
	net=nn.Network(shape)
	coef=np.array([xrange(net.initialParametersSize())]).T*0.1+1e-12
	input=np.array([[0.4,0.5],[6.0,7]]);
	out=np.array([[0.1,0.8],[2,1.1]]);
	net.calculate(input,coef,out,verbose=True)
	assert(aux.testDerivative(lambda x:net.calculate(input,x,out),coef))
	print "Tests passed."
Example #8
0
def test_EncDecode():	
	net=nn.Network([3,2,3])
	ip=np.array([xrange(net.initialParametersSize())]).T
	mc=net.__decode__(ip);
	fv=net.__encode__(mc);
	print ip
	print mc
	print fv
	assert(all(fv==ip))
	print "Tests passed."
Example #9
0
def create_phenotype(chromosome):
    """ Receives a chromosome and returns its phenotype (a neural network) """
    import nn
    # bias parameter is missing (default=0)
    neurons_list = [nn.Neuron(ng.type, ng.id, ng.bias, ng.response) \
                    for ng in chromosome.node_genes]

    conn_list = [(cg.innodeid, cg.outnodeid, cg.weight) \
                 for cg in chromosome.conn_genes if cg.enabled]

    return nn.Network(neurons_list, conn_list)
Example #10
0
 def __init__(self, *largs, **dargs):
     super(TestFFNN, self).__init__(*largs, **dargs)
     bow = data.S1DataSet('label_sent.txt', 2, True)
     F = nn.FeedForwardLayer
     S = nn.Sigmoid
     T = nn.Tanh
     opt = nn.SteepestGradientOptimizer(1, 0.9)
     layers = ((F, 2, S), (F, None, T))
     self.w = nn.Network(layers, bow, opt)
     opt.init_weights(np.random.uniform)
     self.opt = opt
Example #11
0
    def __init__(self, playerNum):
        self.num = playerNum
        self.epsilon = 1.0
        self.gamma = 0.6
        #self.alpha = 0.052 using network defualt
        self.last = None
        self.experiences = []
        self.max_experiences = 500
        self.experience_idx = 0

        self.NN = nn.Network(
            [nn.Layer(18, 150, activation=nn.ReLU()),
             nn.Layer(150, 9)])
Example #12
0
File: exp.py Project: alexkupri/nn
def sphere_experiment(dim,samples,network_shape,penalty=nn.MixedPenalty,proving_samples=1000):
	print "Starting experiment on spheres of dimension",dim,"learning samples=",samples,"shape",network_shape
	input=np.random.rand(dim,samples)*4-2 #All values belong to [-2,2]
	#input=aux.regularGrid()
	output=sphere(input)
	assert(len(network_shape)>=2 and network_shape[0]==dim and network_shape[-1]==1)
	network=nn.Network(network_shape,penalty=penalty)
	parameters,err=opt.optimizeNetwork(network,input,output)
	proving_input=np.random.rand(dim,proving_samples)*4-2
	proving_output=sphere(proving_input)
	network_on_learning=network.calculate(input,parameters)
	network_on_new=network.calculate(proving_input,parameters)
	output_statistics(output,network_on_learning,"Learning subset")
	output_statistics(proving_output,network_on_new,"New subset")
def main():
    # import  mnist_loader as mn
    # training_data, validation_data, test_data = mn.load_data_wrapper()
    # print(type(test_data))
    import loadMNIST as dl
    training_data, validation_data, test_data = dl.load_data()

    print "Datset Loaded"

    import nn
    net = nn.Network([784, 30, 10])
    print "network done"
    accuracy = net.SGD(training_data, 1, 25, 3.0, test_data=test_data)
    print "Accuracy " + str(accuracy)
Example #14
0
def test_CrossEntropy_Linear_Grad():
    np.random.seed(1)
    W = np.random.randn(c, n) * 0.0001
    b = np.random.randn(c, 1) * 0.0001
    layer_lin = layers.Linear(n,
                              c,
                              reg='l2',
                              reg_param=0.05,
                              init_vals=(W.T, b.ravel()))
    loss_func = ls.CrossEntropy()
    net = nn.Network([layer_lin], loss_func, optimizer=None)
    net_loss = net.loss(X_dev, Y_dev_enc)
    ngrad = net.backward()

    # Define functions to pass to helper
    def loss_func_W(ww):
        layer_lin = layers.Linear(n,
                                  c,
                                  reg='l2',
                                  reg_param=0.05,
                                  init_vals=(ww.T, b.ravel()))
        loss_func = ls.CrossEntropy()
        net = nn.Network([layer_lin], loss_func, optimizer=None)
        return net.loss(X_dev, Y_dev_enc)

    def loss_func_b(bb):
        layer_lin = layers.Linear(n,
                                  c,
                                  reg='l2',
                                  reg_param=0.05,
                                  init_vals=(W.T, bb.ravel()))
        loss_func = ls.CrossEntropy()
        net = nn.Network([layer_lin], loss_func, optimizer=None)
        return net.loss(X_dev, Y_dev_enc)

    # Actually run the test
    rel_err_weight = dutil.grad_check_sparse(loss_func_W,
                                             W,
                                             net.grads[0].T,
                                             10,
                                             seed=42)
    rel_err_bias = dutil.grad_check_sparse(loss_func_b,
                                           b.ravel(),
                                           net.grads[1],
                                           10,
                                           seed=42)
    assert (np.allclose(rel_err_weight,
                        np.zeros(rel_err_weight.shape),
                        atol=1e-4))
    assert (np.allclose(rel_err_bias, np.zeros(rel_err_bias.shape), atol=1e-4))
Example #15
0
def main():
    # import  mnist_loader as mn
    # training_data, validation_data, test_data = mn.load_data_wrapper()
    # print(type(test_data))
    import dataset_loader as dl
    training_data, validation_data, test_data = dl.load_data()

    # import network
    # net = network.Network([784, 30, 10])
    # net.SGD(training_data, 30, 10, 3.0, test_data=test_data)

    Max = -9999
    alpha = 3
    neuronNumber = 30
    for X in range(30, 100, 5):
        import nn
        net = nn.Network([8, X, 30])
        # print len(net.biases[1])
        # print net.weights
        Y = 0.1
        for y in range(0, 100):
            accuracy = net.SGD(training_data, 20, 10, Y, validation_data)
            Y += .1
            if (Y == 3.0):
                break
            print "Accuracy " + str(accuracy)
            if (accuracy > Max):
                alpha = Y
                neuronNumber = X
    import nn
    net = nn.Network([8, neuronNumber, 30])
    # print len(net.biases[1])
    # print net.weights

    accuracy = net.SGD(training_data, 20, 10, alpha, test_data)
    print "TestAccuracy " + str(accuracy)
Example #16
0
def test_2layer_grad():
    params = init_toy_model()
    X, y = init_toy_data()
    Y_enc = ut.encode_labels(y)
    # Make the net
    layer_1 = layers.Linear(*params['W1'].T.shape,
                            reg='frob',
                            reg_param=0.05,
                            init_vals=(params['W1'].T, params['b1'].ravel()))
    act_1 = layers.Relu()
    layer_2 = layers.Linear(*params['W2'].T.shape,
                            reg='frob',
                            reg_param=0.05,
                            init_vals=(params['W2'].T, params['b2'].ravel()))
    net_2 = nn.Network([layer_1, act_1, layer_2], ls.CrossEntropy(),
                       optim.SGD(lr=1e-5))
    loss = net_2.loss(X, Y_enc)
    net_2.backward()

    def f_change_param(param_name, U):
        if param_name == 3:
            net_2.layers[0].params['b'] = U
        if param_name == 2:
            net_2.layers[0].params['W'] = U
        if param_name == 1:
            net_2.layers[2].params['b'] = U
        if param_name == 0:
            net_2.layers[2].params['W'] = U
        return net_2.loss(X, Y_enc)

    rel_errs = np.empty(4)
    for param_name in range(4):
        f = lambda U: f_change_param(param_name, U)
        if param_name == 3:
            pass_pars = net_2.layers[0].params['b']
        if param_name == 2:
            pass_pars = net_2.layers[0].params['W']
        if param_name == 1:
            pass_pars = net_2.layers[2].params['b']
        if param_name == 0:
            pass_pars = net_2.layers[2].params['W']
        param_grad_num = dutil.grad_check(f, pass_pars, epsilon=1e-5)
        rel_errs[param_name] = ut.rel_error(param_grad_num,
                                            net_2.grads[param_name])
    assert (np.allclose(rel_errs, np.zeros(4), atol=1e-7))
Example #17
0
def create_phenotype(chromosome):
    """ Receives a chromosome and returns its phenotype (a neural network) """
    
    # first create inputs
    neurons_list = [nn.Neuron('INPUT', i.id, 0, 0) \
                    for i in chromosome.node_genes if i.type == 'INPUT']
    
    # Add hidden nodes in the right order
    for id in chromosome.node_order:
        neurons_list.append(nn.Neuron('HIDDEN', id, chromosome.node_genes[id - 1].bias, chromosome.node_genes[id - 1].response))
        
    # finally the output
    neurons_list.extend(nn.Neuron('OUTPUT', o.id, o.bias, o.response) \
                        for o in chromosome.node_genes if o.type == 'OUTPUT')
    
    assert(len(neurons_list) == len(chromosome.node_genes))
    
    conn_list = [(cg.innodeid, cg.outnodeid, cg.weight) \
                 for cg in chromosome.conn_genes if cg.enabled] 
    
    return nn.Network(neurons_list, conn_list)        
Example #18
0
def main():
    mnist = dts.MNIST('./')
    net = nn.Network([784, 300, 10], cost_func=nn.CrossEntropyCost)
    times = 80
    evaluation_cost, evaluation_accuracy, training_cost, training_accuracy = \
    net.SGD(mnist.train_data, times, 10, 0.001, evaluation_data=mnist.test_data,
            monitor_evaluation_accuracy=True, monitor_training_cost=True, monitor_training_accuracy=True)
    temp = np.tile(100.0, times)
    evaluation_accuracy = evaluation_accuracy / temp
    x = np.arange(1, times + 1, 1)
    fig = plt.figure()
    ax1 = fig.add_subplot(2, 1, 1)
    ax1 = plt.plot(x, evaluation_accuracy, 'g-', linewidth=2)
    plt.xlabel('Epoch')
    plt.grid()

    ax2 = fig.add_subplot(2, 1, 2)
    ax2 = plt.plot(x, training_cost, 'r-', linewidth=2)
    plt.ylabel('training_lost')
    plt.xlabel('Epoch')
    plt.grid()
    plt.show()
Example #19
0
def test_load_params(batch_size=45, random_state=123, init=False):
    train_sets = test_init()
    train_set_without_negatives = train_sets['train_set']
    net = nn.Network(batch_size=batch_size, random_state=random_state)
    test_img = np.array(list(train_set_without_negatives.keys()))
    test_img.sort()
    lbl_test = np.array([
        train_set_without_negatives.get(key).get_coordinates()
        for key in test_img
    ])
    for i in range(test_img.shape[0]):
        imgs, lbls = prepare_images.prepare(
            DATASET_PATH + test_img[i].decode('utf8'), lbl_test[i])
        y_pred = net.predict_values(imgs)
        tmp = lbls - y_pred

        tp = np.sum((y_pred == 1) & (lbls == 1))
        tn = np.sum((y_pred == 0) & (lbls == 0))
        fp = np.sum(tmp == -1)
        fn = np.sum(tmp == 1)
        f1_score = 2 * tp / (2 * tp + fn + fp)
        print(
            " f1 score = {}, true positive = {}, true negative = {}, false positive = {}, false negative = {}"
            .format(f1_score, tp, tn, fp, fn))
Example #20
0
File: main.py Project: say4n/nn
import numpy as np
import utils

logging.basicConfig(level=logging.ERROR)

dataset = {
    "train": utils.Dataset("data/mnist_train.csv", 1000),
    "test": utils.Dataset("data/mnist_test.csv", 100)
}

INPUT_DIM = 28 * 28
OUTPUT_DIM = 10

LAYERS = [(INPUT_DIM, 256), (256, 64), (64, 10)]

net = nn.Network("MNIST_Net")
for layer in LAYERS:
    net.add_layer(*layer)

print(net)

for epoch in range(10):
    avg_loss = 0
    for idx, data in enumerate(dataset["train"]):
        x, y = data.get_training_data()

        y_hat = net.forward_propagate(x)  # predict
        net.backward_propagate(y_hat, y)  # backprop
        net.update_parameters()  # update

        loss = np.sum((y_hat - y)**2)
Example #21
0
#!/usr/bin/python
import nn

the_Network = nn.Network()
print the_Network.run(1)
print the_Network.run(0.5)
print the_Network.run(0)
Example #22
0
train_data['dis'] = (train_data['dis'] - data_stats['min']['dis']) / (
    data_stats['max']['dis'] - data_stats['min']['dis'])
train_data['rad'] = (train_data['rad'] - data_stats['min']['rad']) / (
    data_stats['max']['rad'] - data_stats['min']['rad'])
train_data['tax'] = (train_data['tax'] - data_stats['min']['tax']) / (
    data_stats['max']['tax'] - data_stats['min']['tax'])
train_data['ptratio'] = (train_data['ptratio'] - data_stats['min']['ptratio']
                         ) / (data_stats['max']['ptratio'] -
                              data_stats['min']['ptratio'])
train_data['black'] = (train_data['black'] - data_stats['min']['black']) / (
    data_stats['max']['black'] - data_stats['min']['black'])

# create neural network with two layers
neural_network = nn.Network(
    [layer.Layer(13, 8, activation=tools.Relu),
     layer.Layer(8, 1)],
    loss_function=tools.MeanSquaredError,
    optimizer=tools.Momentum)

# prepare list of nodes with data for training
train_feature = list()
train_label = list()

for x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, y in zip(
        train_data['rm'], train_data['lstat'], train_data['crim'],
        train_data['zn'], train_data['indus'], train_data['chas'],
        train_data['nox'], train_data['age'], train_data['dis'],
        train_data['rad'], train_data['tax'], train_data['ptratio'],
        train_data['black'], train_data['medv']):
    # it has to be arrays of arrays, because it's possible to have
    # more then one feature per sample
Example #23
0
        predicted = np.argmax(Y_hat) + 1
        y = test_y[j]
        if print_flag:
            print(Y_hat, predicted, y)
        if flag:
            if predicted == y:
                cnt += 1
        else:
            if [1 if t + 1 == predicted else 0 for t in range(3)] == y:
                cnt += 1
    return (cnt / len(test_X))


# In[5]:

net = nn.Network(len(train_data.columns.values), 5, 3, 0.1)
# with open("checkpoint/nn-20000.pkl","rb") as netfile:
#     net = pickle.load(netfile)
#     net.learning_rate = 0.01
loss_history, accuracy_history = train(net, 100000, True)

# In[8]:
"""
Plot the train loss curve
"""
import matplotlib.pyplot as plt

fig = plt.figure()
ax = fig.add_subplot(111)

lns1 = ax.plot(loss_history, label="Loss")
Example #24
0
#!/usr/bin/python
import pygame
from pygame.locals import *

import nn

the_network = nn.Network()

pygame.init()
screen = pygame.display.set_mode((640, 480), 0, 32)
pygame.display.set_caption("NN_maze")
screen.fill((255, 255, 255))

begin_pos = [240, 220]

#screen settings
rc_wall = [0, 0, 0]
rc_space = [255, 255, 255]
rc_apple = [0, 255, 0]
rc_hero = [255, 0, 0]
rs = [10, 10]

#map settings
mazeA = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
         [1, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1],
         [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]


def screen_draw():
    global hero_pos
    rp = [begin_pos[0], begin_pos[1]]
Example #25
0
from random import shuffle
import numpy as np
import utils
import nn

HIDDEN_NEURONS = 100


def validation_test(output, label):
    return np.argmax(output) == np.argmax(label)


train_x, train_y, valid_x, valid_y, test_x, test_y = utils.load_mnist_data()
train_data = [(feature.reshape((784, 1)), utils.vectorize_y(label)) for feature, label in zip(train_x, train_y)]
valid_data = [(feature.reshape((784, 1)), utils.vectorize_y(label)) for feature, label in zip(valid_x, valid_y)]
test_data = [(feature.reshape(784, 1), utils.vectorize_y(label)) for feature, label in zip(test_x, test_y)]

net = nn.Network([28 * 28, HIDDEN_NEURONS, 10])
net.train(train_data, 50, 0.3, 1.0, momentum=0.5, mini_batch_size=32, cost=nn.CrossEntropyCost,
          validation_data=valid_data,
          validation_test=validation_test)

test_correct = net.evaluate(test_data, validation_test)
print("----------------------------------------")
print(
    "Test accuracy: " + str(test_correct) + "/" + str(len(test_data)) + ";" + str(test_correct / len(test_data) * 100))
Example #26
0
import numpy as np
import nn
import nnfs
from nnfs.datasets import spiral_data

X = np.random.randn(1, 5)

net = nn.Network([5, 5, 5, 5, 5, 3])
print(net.forward_prop(X))
Example #27
0
import nn
import data_loader
import parse_tsv
import numpy as np
import sys
#

# Initialze a training data object.
d = data_loader.Loader()
d.load2()
trdata = [(x, y[:2]) for x, y, z in d.data]
d.y_headers = d.y_headers[:2]

# Initialize a network object
sizes = [len(d.x_headers), 10, 10, 10, 10, 10, len(d.y_headers)]
net = nn.Network(sizes)
training_data = trdata
epochs = 30
eta = 3.
mini_batch_size = 10

# optimize the biases and weights of the net
net.SGD2(training_data, epochs, eta, mini_batch_size)

# parse input file
infile = sys.argv[1]
headers, data = parse_tsv.parse_tsv(infile)

# rank samples
indices = [i for i, item in enumerate(headers) if item in d.x_headers]
results = []
Example #28
0
File: py.py Project: melvinlim/nnet
OUTPUTS = len(out[0])

BATCHSIZE = 1
#raise Exception
#LAYERDIM=[2,1025,2]
#LAYERDIM=[2,500,10,2]
LAYERDIM = [INPUTS, 500, OUTPUTS]
EPOCHS = 100
GAMMA = 0.005
PRINTFREQ = BATCHSIZE

nExamples = len(inp)

np.set_printoptions(precision=4)

NN = nn.Network(LAYERDIM, GAMMA)
filename = task + '.csv'
try:
    open(filename, 'r')
    x = raw_input('found ' + filename + '.  load network?  ([y]/n)')
    if (x == '' or x == 'y'):
        print('loading ' + filename + '...')
        NN = nn.loadNetwork(filename)
except:
    x = raw_input(filename + ' not found.  start?  ([y]/n)')
    if (x == 'n'):
        exit()
t0 = time.clock()
for epoch in range(EPOCHS):
    bInp = []
    bOut = []
Example #29
0
import nn
import datetime

start_time = datetime.datetime.now()

topology = []
topology.append(3)
topology.append(3)
topology.append(2)
net = nn.Network(topology)
nn.Neuron.eta = 0.5
nn.Neuron.alpha = 0.015
while True:
    err = 0
    inputs = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1],
              [1, 1, 0], [1, 1, 1]]
    outputs = [[0, 0], [0, 1], [0, 1], [1, 0], [0, 1], [1, 0], [1, 0], [1, 1]]
    for i in range(len(inputs)):
        net.setInput(inputs[i])
        net.feedForword()
        net.backPropagate(outputs[i])
        err = err + net.getError(outputs[i])
    print("error: ", err)
    if err < 0.01:
        break

stopped_time = datetime.datetime.now()

print("time to train = ", stopped_time - start_time)

while True:
Example #30
0
File: test.py Project: ivenk/moreNN
import nn
import mnist_loader

test_data, valid_data, test_data = mnist_loader.load_data_wrapper()

net = nn.Network([784, 30, 10])
net.train(test_data, 30, 30, 3.0, test_data)