Exemplo n.º 1
0
    def setUp(self):

        self.net = NeuralNet()

        layer = Layer(0, 'input')
        layer.add_nodes(1, 'input')
        self.net.layers.append(layer)

        layer = Layer(1, 'hidden')
        layer.add_nodes(1, 'hidden')
        self.net.layers.append(layer)

        layer = Layer(2, 'output')
        layer.add_nodes(1, 'output')
        self.net.layers.append(layer)

        #   Specify connections
        self.net.layers[1].nodes[0].add_input_connection(
            Connection(self.net.layers[0].nodes[0],
                       self.net.layers[1].nodes[0], 1.00))

        self.net.layers[2].nodes[0].add_input_connection(
            Connection(self.net.layers[1].nodes[0],
                       self.net.layers[2].nodes[0], .75))

        self.net._epochs = 1
        self.net.copy_levels = 0
        self.net._allinputs = [[.1], [.2], [.3], [.4], [.5]]
        self.net._alltargets = [[.2], [.4], [.6], [.8], [1.0]]

        self.net.input_layer = self.net.layers[0]
        self.net.output_layer = self.net.layers[-1]
Exemplo n.º 2
0
def buildIrisNetwork(all_inputs, all_targets):
    net = NeuralNet()
    net.init_layers(4, [6], 3)

    net.randomize_network()
    net.set_halt_on_extremes(True)

    #   Set to constrain beginning weights to -.5 to .5
    #       Just to show we can
    #net.set_random_constraint(.5)
    net.set_learnrate(.1)

    net.set_all_inputs(all_inputs)
    net.set_all_targets(all_targets)

    length = len(all_inputs)
    learn_end_point = int(length * .5)

    net.set_learn_range(0, learn_end_point)
    net.set_test_range(learn_end_point + 1, length - 1)

    net.layers[0].set_activation_type('tanh')
    net.layers[1].set_activation_type('tanh')
    net.layers[2].set_activation_type('threshold')
    return net
Exemplo n.º 3
0
    def setUp(self):

        self.net = NeuralNet()
        self.net.init_layers(2, [1], 1)

        self.rec_config = NARXRecurrent(output_order=1,
                                        incoming_weight_from_output=.9,
                                        input_order=1,
                                        incoming_weight_from_input=.7)
Exemplo n.º 4
0
    def setUp(self):

        self.net = NeuralNet()
        self.net.init_layers(2, [1], 1)

        self.rec_config = JordanRecurrent(existing_weight=.8)
Exemplo n.º 5
0
    def setUp(self):

        self.net = NeuralNet()
        self.net.init_layers(2, [1], 1)

        self.rec_config = ElmanSimpleRecurrent()
Exemplo n.º 6
0
    def setUp(self):

        self.net = NeuralNet()
        self.net.init_layers(2, [1], 1)

        self.rec_config = RecurrentConfig()
Exemplo n.º 7
0
    sys.path.append('/home/david/Dropbox/programming/python/ann/myangn/sem6')
    sys.path.append('/home/david/Dropbox/programming/python/ann/mypybrain')

from pylab import array, ylim, where, average
from pylab import plot, legend, subplot, grid, xlabel, ylabel, show, title
from pyneurgen.neuralnet import NeuralNet
from pyneurgen.nodes import BiasNode, Connection
from pybrain.utilities import percentError

from iris import neurgenData
from src.utilities import percentError

#   Build the inputs
all_inputs, all_targets = neurgenData()

net = NeuralNet()
net.init_layers(4, [6], 3)

net.randomize_network()
net.set_halt_on_extremes(True)

#   Set to constrain beginning weights to -.5 to .5
#       Just to show we can
#net.set_random_constraint(.5)
net.set_learnrate(.1)

net.set_all_inputs(all_inputs)
net.set_all_targets(all_targets)

length = len(all_inputs)
learn_end_point = int(length * .5)
Exemplo n.º 8
0
 y_train = y[0:i]
 y_test = y[i:i + 1]
 y_train = np.array(y_train).reshape((len(y_train), 1))
 y_test = np.array(y_test).reshape((len(y_test), 1))
 #transformando os dados para estar no intervalo de 0 a 1
 scaler_x = MinMaxScaler()
 x_train = scaler_x.fit_transform(x_train)
 x_test = scaler_x.transform(x_test)
 scaler_y = MinMaxScaler()
 y_train = scaler_y.fit_transform(y_train)
 y_test = scaler_y.transform(y_test)
 x_input = np.concatenate(
     (x_train, x_test, np.zeros((1, np.shape(x_train)[1]))))
 y_input = np.concatenate((y_train, y_test, np.zeros((1, 1))))
 #elaboracao do modelo de rede neural com os parametros definidos
 fit1 = NeuralNet()
 fit1.init_layers(input_nodes, [hidden_nodes], output_nodes,
                  ElmanSimpleRecurrent())
 fit1.randomize_network()
 fit1.layers[1].set_activation_type('sigmoid')
 fit1.set_learnrate(0.05)
 fit1.set_all_inputs(x_input)
 fit1.set_all_targets(y_input)
 fit1.set_learn_range(0, i)
 fit1.set_test_range(i, i + 1)
 fit1.learn(epochs=100, show_epoch_results=True, random_testing=False)
 mse = fit1.test()
 all_mse.append(mse)
 print("test set MSE = ", np.round(mse, 6))
 target = [item[0][0] for item in fit1.test_targets_activations]
 target = scaler_y.inverse_transform(
Exemplo n.º 9
0
def serNeural(sDay,nAhead,x0,hWeek):
    nLin = sDay.shape[0] + nAhead
    nFit = sDay.shape[0] if int(x0['obs_time']) <= 14 else int(x0['obs_time'])
    predS = getHistory(sDay,nAhead,x0,hWeek)
    weekS = [x.isocalendar()[1] for x in sDay.index]
    population = [[float(i),sDay['y'][i],float(i%7),weekS[i]] for i in range(sDay.shape[0])]
    all_inputs = []
    all_targets = []
    factorY = sDay['y'].mean()
    factorT = 1.0 / float(len(population))*factorY
    factorD = 1./7.*factorY
    factorW = 1./52.*factorY
    factorS = 4.*sDay['y'].std()
    factorH = factorY/sDay['hist'].mean()

    def population_gen(population):
        pop_sort = [item for item in population]
#        random.shuffle(pop_sort)
        for item in pop_sort:
            yield item
            
    for t,y,y1,y2 in population_gen(population):
        #all_inputs.append([t*factorT,(.5-random.random())*factorS+factorY,y1*factorD,y2*factorW])
        all_inputs.append([y1*factorD,(.5-random.random())*factorS+factorY,y2*factorW])
        all_targets.append([y])

    if False:
        plt.plot([x[0] for x in all_inputs],'-',label='targets0')
        plt.plot([x[1] for x in all_inputs],'-',label='targets1')
        plt.plot([x[2] for x in all_inputs],'-',label='targets2')
        # plt.plot([x[3] for x in all_inputs],'-',label='targets3')
        plt.plot([x[0] for x in all_targets],'-',label='actuals')
        plt.legend(loc='lower left', numpoints=1)
        plt.show()

    net = NeuralNet()
    net.init_layers(3,[10],1,NARXRecurrent(3,.6,2,.4))
    net.randomize_network()
    net.set_random_constraint(.5)
    net.set_learnrate(.1)
    net.set_all_inputs(all_inputs)
    net.set_all_targets(all_targets)
    #predS['pred'] = [item[0][0] for item in net.test_targets_activations]
    length = len(all_inputs)
    learn_end_point = int(length * .8)
    # random.sample(all_inputs,10)
    net.set_learn_range(0, learn_end_point)
    net.set_test_range(learn_end_point + 1, length - 1)
    net.layers[1].set_activation_type('tanh')

    net.learn(epochs=125,show_epoch_results=True,random_testing=False)
    mse = net.test()
    #net.save(os.environ['LAV_DIR'] + "/out/train/net.txt")

    test_positions = [item[0][0] for item in net.get_test_data()]
    all_targets1 = [item[0][0] for item in net.test_targets_activations]
    all_actuals = [item[1][0] for item in net.test_targets_activations]
    #   This is quick and dirty, but it will show the results
    plt.subplot(3, 1, 1)
    plt.plot([i for i in sDay['y']],'-')
    plt.title("Population")
    plt.grid(True)
    
    plt.subplot(3, 1, 2)
    plt.plot(test_positions, all_targets1, 'b-', label='targets')
    plt.plot(test_positions, all_actuals, 'r-', label='actuals')
    plt.grid(True)
    plt.legend(loc='lower left', numpoints=1)
    plt.title("Test Target Points vs Actual Points")

    plt.subplot(3, 1, 3)
    plt.plot(range(1, len(net.accum_mse) + 1, 1), net.accum_mse)
    plt.xlabel('epochs')
    plt.ylabel('mean squared error')
    plt.grid(True)
    plt.title("Mean Squared Error by Epoch")
    plt.show()
Exemplo n.º 10
0
y = y.reshape(len(y), 1)

scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
x = scaler.fit_transform(x)
y = scaler.fit_transform(y)

random.seed(101)
input_nodes = 1
hidden_nodes = 10
output_nodes = 1
output_order = 1
input_order = 1
incoming_weight_from_output = 0.3
incoming_weight_from_input = 0.6

fit1 = NeuralNet()
fit1.init_layers(
    input_nodes, [hidden_nodes], output_nodes,
    NARXRecurrent(output_order, incoming_weight_from_output, input_order,
                  incoming_weight_from_input))
fit1.randomize_network()
fit1.layers[1].set_activation_type('sigmoid')
fit1.set_learnrate(0.35)
fit1.set_all_inputs(x)
fit1.set_all_targets(y)

length = len(x)
learn_end_point = int(length * 0.85)
fit1.set_learn_range(0, learn_end_point)
fit1.set_test_range(learn_end_point + 1, length - 1)