def buildIrisNetwork(all_inputs, all_targets):
	net = NeuralNet()
	net.init_layers(4, [6], 3)

	net.randomize_network()
	net.set_halt_on_extremes(True)

	#   Set to constrain beginning weights to -.5 to .5
	#       Just to show we can
	#net.set_random_constraint(.5)
	net.set_learnrate(.1)

	net.set_all_inputs(all_inputs)
	net.set_all_targets(all_targets)

	length = len(all_inputs)
	learn_end_point = int(length * .5)

	net.set_learn_range(0, learn_end_point)
	net.set_test_range(learn_end_point + 1, length-1)

	net.layers[0].set_activation_type('tanh')
	net.layers[1].set_activation_type('tanh')
	net.layers[2].set_activation_type('threshold')
	return net
def performNN(all_extracted_features, all_targets):
    from pyneurgen.neuralnet import NeuralNet
    #from pyneurgen.nodes import BiasNode, Connection
    net = NeuralNet()
    net.init_layers(len(all_extracted_features[0]), [2], 1)
    
    net.randomize_network()
    net.set_halt_on_extremes(True)
    
    #   Set to constrain beginning weights to -5 to 5
    #       Just to show we can
    #net.set_random_constraint(.5)
    net.set_learnrate(.001)
    
    net.set_all_inputs(all_extracted_features)
    net.set_all_targets(all_targets)
    
    length = len(all_extracted_features)
    learn_end_point = int(length * .8)
    
    net.set_learn_range(0, learn_end_point)
    net.set_test_range(learn_end_point + 1, length - 1)
    
    net.layers[1].set_activation_type('tanh')
    net.learn(epochs=150, show_epoch_results=True, random_testing=True)
    mse = net.test()
    print mse
def buildIrisNetwork(all_inputs, all_targets):
    net = NeuralNet()
    net.init_layers(4, [6], 3)

    net.randomize_network()
    net.set_halt_on_extremes(True)

    #   Set to constrain beginning weights to -.5 to .5
    #       Just to show we can
    #net.set_random_constraint(.5)
    net.set_learnrate(.1)

    net.set_all_inputs(all_inputs)
    net.set_all_targets(all_targets)

    length = len(all_inputs)
    learn_end_point = int(length * .5)

    net.set_learn_range(0, learn_end_point)
    net.set_test_range(learn_end_point + 1, length - 1)

    net.layers[0].set_activation_type('tanh')
    net.layers[1].set_activation_type('tanh')
    net.layers[2].set_activation_type('threshold')
    return net
net.randomize_network()
net.set_halt_on_extremes(True)

#   Set to constrain beginning weights to -.5 to .5
#       Just to show we can
#net.set_random_constraint(.5)
net.set_learnrate(.1)

net.set_all_inputs(all_inputs)
net.set_all_targets(all_targets)

length = len(all_inputs)
learn_end_point = int(length * .5)

net.set_learn_range(0, learn_end_point)
net.set_test_range(learn_end_point + 1, length - 1)

net.layers[0].set_activation_type('tanh')
net.layers[1].set_activation_type('tanh')
net.layers[2].set_activation_type('tanh')

iterations = 500
trn_errors = []
tst_errors = []
tst_perc_errors = []
################################
#	left off: was checking whether normal GA is learning same as
#	learn epochs=1. need to check percent error validation is all
# 	correct. Write tests!!!!
############################
for i in range(iterations):
Exemple #5
0
 
 net.randomize_network()
 net.set_halt_on_extremes(True)
 
 
 #   Set to constrain beginning weights to -.5 to .5
 #       Just to show we can
 
 net.set_random_constraint(.5)
 net.set_learnrate(.1)
 net.set_all_inputs(all_inputs)
 net.set_all_targets(all_targets)
 
 net.set_learn_range(indexes_learn)
 net.get_learn_range()
 net.set_test_range(indexes_test)
 net.get_test_range()
 net.layers[0].set_activation_type('tanh')
 net.layers[1].set_activation_type('tanh')
 net.layers[2].set_activation_type('tanh')
 net.layers[3].set_activation_type('tanh')
 
 ###training network
 net.learn(epochs=1200, show_epoch_results=True,
     random_testing=True)
 
 mse = net.test()
 
 #extract predicted velues
 all_learn = [item[1][0] for item in net.get_learn_data()]
 learn_positions = [item[0][3] for item in net.get_learn_data()]
Exemple #6
0
    y_train = scaler_y.fit_transform(y_train)
    y_test = scaler_y.transform(y_test)
    x_input = np.concatenate(
        (x_train, x_test, np.zeros((1, np.shape(x_train)[1]))))
    y_input = np.concatenate((y_train, y_test, np.zeros((1, 1))))
    #elaboracao do modelo de rede neural com os parametros definidos
    fit1 = NeuralNet()
    fit1.init_layers(input_nodes, [hidden_nodes], output_nodes,
                     ElmanSimpleRecurrent())
    fit1.randomize_network()
    fit1.layers[1].set_activation_type('sigmoid')
    fit1.set_learnrate(0.05)
    fit1.set_all_inputs(x_input)
    fit1.set_all_targets(y_input)
    fit1.set_learn_range(0, i)
    fit1.set_test_range(i, i + 1)
    fit1.learn(epochs=100, show_epoch_results=True, random_testing=False)
    mse = fit1.test()
    all_mse.append(mse)
    print("test set MSE = ", np.round(mse, 6))
    target = [item[0][0] for item in fit1.test_targets_activations]
    target = scaler_y.inverse_transform(
        np.array(target).reshape((len(target), 1)))
    pred = [item[1][0] for item in fit1.test_targets_activations]
    pred = scaler_y.inverse_transform(np.array(pred).reshape((len(pred), 1)))
    real_y_test.append(target[0][0])
    predicted_y_test.append(pred[0][0])
    filehandler = open('objects/elman/el_' + str(i) + '.obj', 'w')
    pickle.dump(fit1, filehandler)
    filehandler.close()
Exemple #7
0
net.randomize_network()
net.set_halt_on_extremes(True)

#   Set to constrain beginning weights to -.5 to .5
#       Just to show we can
net.set_random_constraint(.5)
net.set_learnrate(.1)

net.set_all_inputs(all_inputs)
net.set_all_targets(all_targets)

length = len(all_inputs)
learn_end_point = int(length * .8)

net.set_learn_range(0, learn_end_point)
net.set_test_range(learn_end_point + 1, length - 1)

net.layers[1].set_activation_type('tanh')

net.learn(epochs=125, show_epoch_results=True,
    random_testing=False)

mse = net.test()

test_positions = [item[0][1] * 1000.0 for item in net.get_test_data()]

all_targets1 = [item[0][0] for item in net.test_targets_activations]
allactuals = [item[1][0] for item in net.test_targets_activations]

fig = plt.figure()
ax1 = fig.add_subplot(311)
Exemple #8
0
input_nodes = 1
hidden_nodes = 5
output_nodes = 1

output_order = 20
incoming_weight_from_output = .5
input_order = 20
incoming_weight_from_input = .5

net = NeuralNet()
net.init_layers(
    input_nodes, [hidden_nodes], output_nodes,
    NARXRecurrent(output_order, incoming_weight_from_output, input_order,
                  incoming_weight_from_input))

net.randomize_network()

X = np.linspace(0, 10.0, num=10001)
Y = simpleWeierstrassTimeSeries(X)
Y = Y.reshape(-1, 1)

net.set_all_inputs(Y[:-1])
net.set_all_targets(Y[1:])

net.set_learn_range(0, 8000)
net.set_test_range(8000, 9999)

print net.test()
net.learn(epochs=5)
print net.test()
Exemple #9
0
def serNeural(sDay,nAhead,x0,hWeek):
    nLin = sDay.shape[0] + nAhead
    nFit = sDay.shape[0] if int(x0['obs_time']) <= 14 else int(x0['obs_time'])
    predS = getHistory(sDay,nAhead,x0,hWeek)
    weekS = [x.isocalendar()[1] for x in sDay.index]
    population = [[float(i),sDay['y'][i],float(i%7),weekS[i]] for i in range(sDay.shape[0])]
    all_inputs = []
    all_targets = []
    factorY = sDay['y'].mean()
    factorT = 1.0 / float(len(population))*factorY
    factorD = 1./7.*factorY
    factorW = 1./52.*factorY
    factorS = 4.*sDay['y'].std()
    factorH = factorY/sDay['hist'].mean()

    def population_gen(population):
        pop_sort = [item for item in population]
#        random.shuffle(pop_sort)
        for item in pop_sort:
            yield item
            
    for t,y,y1,y2 in population_gen(population):
        #all_inputs.append([t*factorT,(.5-random.random())*factorS+factorY,y1*factorD,y2*factorW])
        all_inputs.append([y1*factorD,(.5-random.random())*factorS+factorY,y2*factorW])
        all_targets.append([y])

    if False:
        plt.plot([x[0] for x in all_inputs],'-',label='targets0')
        plt.plot([x[1] for x in all_inputs],'-',label='targets1')
        plt.plot([x[2] for x in all_inputs],'-',label='targets2')
        # plt.plot([x[3] for x in all_inputs],'-',label='targets3')
        plt.plot([x[0] for x in all_targets],'-',label='actuals')
        plt.legend(loc='lower left', numpoints=1)
        plt.show()

    net = NeuralNet()
    net.init_layers(3,[10],1,NARXRecurrent(3,.6,2,.4))
    net.randomize_network()
    net.set_random_constraint(.5)
    net.set_learnrate(.1)
    net.set_all_inputs(all_inputs)
    net.set_all_targets(all_targets)
    #predS['pred'] = [item[0][0] for item in net.test_targets_activations]
    length = len(all_inputs)
    learn_end_point = int(length * .8)
    # random.sample(all_inputs,10)
    net.set_learn_range(0, learn_end_point)
    net.set_test_range(learn_end_point + 1, length - 1)
    net.layers[1].set_activation_type('tanh')

    net.learn(epochs=125,show_epoch_results=True,random_testing=False)
    mse = net.test()
    #net.save(os.environ['LAV_DIR'] + "/out/train/net.txt")

    test_positions = [item[0][0] for item in net.get_test_data()]
    all_targets1 = [item[0][0] for item in net.test_targets_activations]
    all_actuals = [item[1][0] for item in net.test_targets_activations]
    #   This is quick and dirty, but it will show the results
    plt.subplot(3, 1, 1)
    plt.plot([i for i in sDay['y']],'-')
    plt.title("Population")
    plt.grid(True)
    
    plt.subplot(3, 1, 2)
    plt.plot(test_positions, all_targets1, 'b-', label='targets')
    plt.plot(test_positions, all_actuals, 'r-', label='actuals')
    plt.grid(True)
    plt.legend(loc='lower left', numpoints=1)
    plt.title("Test Target Points vs Actual Points")

    plt.subplot(3, 1, 3)
    plt.plot(range(1, len(net.accum_mse) + 1, 1), net.accum_mse)
    plt.xlabel('epochs')
    plt.ylabel('mean squared error')
    plt.grid(True)
    plt.title("Mean Squared Error by Epoch")
    plt.show()
Exemple #10
0
net.set_random_constraint(.5)
net.set_learnrate(.1)

# net.set_all_inputs(training_data[:, 1])  # this results in [a, b, ..., z] not [[a], [b], ..., [z]]
# net.set_all_targets(training_data[:, 2])
net.set_all_inputs([[row[1]]
                    for row in selected_data])  # wanting [[a], [b], ..., [z]]
net.set_all_targets([[row[2]] for row in selected_data])

length = len(selected_data)
learn_end_point = int(
    (end_validate_idx - start_training_idx) / pick_every * .8)  # validation

net.set_learn_range(0, (end_validate_idx - start_training_idx) / pick_every)
net.set_test_range((end_validate_idx - start_training_idx) / pick_every,
                   (end_test_idx - start_training_idx) / pick_every)
net.layers[1].set_activation_type('tanh')

# train
net.learn(epochs=125, show_epoch_results=True)
mse = net.test()

# test and generate charts
all_real = [item[0] for item in net.test_targets_activations]
all_targets = [item[1] for item in net.test_targets_activations]

plot(selected_data_time[(end_validate_idx - start_training_idx) /
                        pick_every:(end_test_idx - start_training_idx) /
                        pick_every],
     all_targets,
     'bo',
Exemple #11
0
print()
print()
g = ges.population[ges.fitness_list.best_member()]
program = g.local_bnf['program']

saved_model = g.local_bnf['<saved_name>'][0]

#   We will create a brand new model
net = NeuralNet()
net.load(saved_model)

net.set_all_inputs(all_inputs)
net.set_all_targets(all_targets)

test_start_point = int(pop_len * .8) + 1
net.set_test_range(test_start_point, pop_len - 1)
mse = net.test()

print("The selected model has the following characteristics")
print("Activation Type:", net.layers[1].nodes[1].get_activation_type())
print("Hidden Nodes:", len(net.layers[1].nodes), ' + 1 bias node')
print("Learn Rate:", net.get_learnrate())
print("Epochs:", net.get_epochs())

test_positions = [item[0][0] * pop_len for item in net.get_test_data()]

all_targets1 = [item[0][0] for item in net.test_targets_activations]
allactuals = [item[1][0] for item in net.test_targets_activations]

#   This is quick and dirty, but it will show the results
fig = figure()
print
print
g = ges.population[ges.fitness_list.best_member()]
program = g.local_bnf['program']

saved_model = g.local_bnf['<saved_name>'][0]

#   We will create a brand new model
net = NeuralNet()
net.load(saved_model)

net.set_all_inputs(all_inputs)
net.set_all_targets(all_targets)

test_start_point = int(pop_len * .8) + 1
net.set_test_range(test_start_point, pop_len - 1)
mse = net.test()

print "The selected model has the following characteristics"
print "Activation Type:", net.layers[1].nodes[1].get_activation_type()
print "Hidden Nodes:", len(net.layers[1].nodes), ' + 1 bias node'
print "Learn Rate:", net.get_learnrate()
print "Epochs:", net.get_epochs()

test_positions = [item[0][0] * pop_len for item in net.get_test_data()]

all_targets1 = [item[0][0] for item in net.test_targets_activations]
allactuals = [item[1][0] for item in net.test_targets_activations]

#   This is quick and dirty, but it will show the results
fig = figure()
Exemple #13
0
fit1 = NeuralNet()
fit1.init_layers(
    input_nodes, [hidden_nodes], output_nodes,
    NARXRecurrent(output_order, incoming_weight_from_output, input_order,
                  incoming_weight_from_input))
fit1.randomize_network()
fit1.layers[1].set_activation_type('sigmoid')
fit1.set_learnrate(0.35)
fit1.set_all_inputs(x)
fit1.set_all_targets(y)

length = len(x)
learn_end_point = int(length * 0.85)
fit1.set_learn_range(0, learn_end_point)
fit1.set_test_range(learn_end_point + 1, length - 1)

fit1.learn(epochs=10, show_epoch_results=True, random_testing=False)

mse = fit1.test()
print("MSE for test set: ", round(mse, 6))

plt.figure(figsize=(15, 6))
plt.plot(np.arange(len(fit1.accum_mse)), fit1.accum_mse)
plt.xlabel('Epochs')
plt.ylabel('Mean Squared Error')
plt.savefig('../figs/fig9.png')

yhat = [i[1][0] for i in fit1.test_targets_activations]
yhat = scaler.inverse_transform(np.array(yhat).reshape((len(yhat), 1)))
yhat = yhat.flatten()