Example #1
0
class NARXRecurrentTest(unittest.TestCase):
    """
    Tests NARXRecurrent

    """
    def setUp(self):

        self.net = NeuralNet()
        self.net.init_layers(2, [1], 1)

        self.rec_config = NARXRecurrent(output_order=1,
                                        incoming_weight_from_output=.9,
                                        input_order=1,
                                        incoming_weight_from_input=.7)

    def test_class_init_(self):

        self.assertEqual(0, self.rec_config.existing_weight)
        self.assertEqual(None, self.rec_config._node_type)
        self.assertEqual([1, .9], self.rec_config.output_values)
        self.assertEqual([1, .7], self.rec_config.input_values)

    def test_get_source_nodes(self):

        self.rec_config._node_type = NODE_OUTPUT
        self.assertEqual(self.net.layers[-1].get_nodes(NODE_OUTPUT),
                         self.rec_config.get_source_nodes(self.net))

        self.rec_config._node_type = NODE_INPUT
        self.assertEqual(self.net.layers[0].get_nodes(NODE_INPUT),
                         self.rec_config.get_source_nodes(self.net))
class NARXRecurrentTest(unittest.TestCase):
    """
    Tests NARXRecurrent

    """

    def setUp(self):

        self.net = NeuralNet()
        self.net.init_layers(2, [1], 1)

        self.rec_config = NARXRecurrent(
                                        output_order=1,
                                        incoming_weight_from_output=.9,
                                        input_order=1,
                                        incoming_weight_from_input=.7)

    def test_class_init_(self):

        self.assertEqual(0, self.rec_config.existing_weight)
        self.assertEqual(None, self.rec_config._node_type)
        self.assertEqual([1, .9], self.rec_config.output_values)
        self.assertEqual([1, .7], self.rec_config.input_values)

    def test_get_source_nodes(self):

        self.rec_config._node_type = NODE_OUTPUT
        self.assertEqual(
            self.net.layers[-1].get_nodes(NODE_OUTPUT),
            self.rec_config.get_source_nodes(self.net))

        self.rec_config._node_type = NODE_INPUT
        self.assertEqual(
            self.net.layers[0].get_nodes(NODE_INPUT),
            self.rec_config.get_source_nodes(self.net))
Example #3
0
    def setUp(self):

        self.net = NeuralNet()
        self.net.init_layers(2, [1], 1)

        self.rec_config = NARXRecurrent(output_order=1,
                                        incoming_weight_from_output=.9,
                                        input_order=1,
                                        incoming_weight_from_input=.7)
    def setUp(self):

        self.net = NeuralNet()
        self.net.init_layers(2, [1], 1)

        self.rec_config = NARXRecurrent(
                                        output_order=1,
                                        incoming_weight_from_output=.9,
                                        input_order=1,
                                        incoming_weight_from_input=.7)
Example #5
0
                        xs[14], xs[15], xs[16], xs[17], xs[18], xs[19], xs[20]])
    ys = np.concatenate([ys[0],ys[1],ys[2],ys[3], ys[4], ys[5], ys[6],
                         ys[7], ys[8], ys[9], ys[10], ys[11], ys[12], ys[13],
                        ys[14], ys[15], ys[16], ys[17], ys[18], ys[19], ys[20]])

#createnetwork and run training                     
    for l1, l2 in zip (xs, ys):
        output_order = 6
        incoming_weight_from_output = 1.
        input_order = 0
        incoming_weight_from_input = 0.
            
        net = NeuralNet()
        net.init_layers(4, [l1,l2], 1, NARXRecurrent(
                    output_order,
                    incoming_weight_from_output,
                    input_order,
                    incoming_weight_from_input))
        
        net.randomize_network()
        net.set_halt_on_extremes(True)
        
        
        #   Set to constrain beginning weights to -.5 to .5
        #       Just to show we can
        
        net.set_random_constraint(.5)
        net.set_learnrate(.1)
        net.set_all_inputs(all_inputs)
        net.set_all_targets(all_targets)
        
Example #6
0
    return population, all_inputs, all_targets


# generate data
population, all_inputs, all_targets = generate_data()

# NARXRecurrent
input_nodes, hidden_nodes, output_nodes = 1, 10, 1
output_order, incoming_weight_from_output = 3, .6
input_order, incoming_weight_from_input = 2, .4

# init neural network
net = NeuralNet()
net.init_layers(
    input_nodes, [hidden_nodes], output_nodes,
    NARXRecurrent(output_order, incoming_weight_from_output, input_order,
                  incoming_weight_from_input))
net.randomize_network()
net.set_halt_on_extremes(True)

# set constrains and rates
net.set_random_constraint(.5)
net.set_learnrate(.1)

# set inputs and outputs
net.set_all_inputs(all_inputs)
net.set_all_targets(all_targets)

# set lengths
length = len(all_inputs)
learn_end_point = int(length * .8)
Example #7
0
def serNeural(sDay,nAhead,x0,hWeek):
    nLin = sDay.shape[0] + nAhead
    nFit = sDay.shape[0] if int(x0['obs_time']) <= 14 else int(x0['obs_time'])
    predS = getHistory(sDay,nAhead,x0,hWeek)
    weekS = [x.isocalendar()[1] for x in sDay.index]
    population = [[float(i),sDay['y'][i],float(i%7),weekS[i]] for i in range(sDay.shape[0])]
    all_inputs = []
    all_targets = []
    factorY = sDay['y'].mean()
    factorT = 1.0 / float(len(population))*factorY
    factorD = 1./7.*factorY
    factorW = 1./52.*factorY
    factorS = 4.*sDay['y'].std()
    factorH = factorY/sDay['hist'].mean()

    def population_gen(population):
        pop_sort = [item for item in population]
#        random.shuffle(pop_sort)
        for item in pop_sort:
            yield item
            
    for t,y,y1,y2 in population_gen(population):
        #all_inputs.append([t*factorT,(.5-random.random())*factorS+factorY,y1*factorD,y2*factorW])
        all_inputs.append([y1*factorD,(.5-random.random())*factorS+factorY,y2*factorW])
        all_targets.append([y])

    if False:
        plt.plot([x[0] for x in all_inputs],'-',label='targets0')
        plt.plot([x[1] for x in all_inputs],'-',label='targets1')
        plt.plot([x[2] for x in all_inputs],'-',label='targets2')
        # plt.plot([x[3] for x in all_inputs],'-',label='targets3')
        plt.plot([x[0] for x in all_targets],'-',label='actuals')
        plt.legend(loc='lower left', numpoints=1)
        plt.show()

    net = NeuralNet()
    net.init_layers(3,[10],1,NARXRecurrent(3,.6,2,.4))
    net.randomize_network()
    net.set_random_constraint(.5)
    net.set_learnrate(.1)
    net.set_all_inputs(all_inputs)
    net.set_all_targets(all_targets)
    #predS['pred'] = [item[0][0] for item in net.test_targets_activations]
    length = len(all_inputs)
    learn_end_point = int(length * .8)
    # random.sample(all_inputs,10)
    net.set_learn_range(0, learn_end_point)
    net.set_test_range(learn_end_point + 1, length - 1)
    net.layers[1].set_activation_type('tanh')

    net.learn(epochs=125,show_epoch_results=True,random_testing=False)
    mse = net.test()
    #net.save(os.environ['LAV_DIR'] + "/out/train/net.txt")

    test_positions = [item[0][0] for item in net.get_test_data()]
    all_targets1 = [item[0][0] for item in net.test_targets_activations]
    all_actuals = [item[1][0] for item in net.test_targets_activations]
    #   This is quick and dirty, but it will show the results
    plt.subplot(3, 1, 1)
    plt.plot([i for i in sDay['y']],'-')
    plt.title("Population")
    plt.grid(True)
    
    plt.subplot(3, 1, 2)
    plt.plot(test_positions, all_targets1, 'b-', label='targets')
    plt.plot(test_positions, all_actuals, 'r-', label='actuals')
    plt.grid(True)
    plt.legend(loc='lower left', numpoints=1)
    plt.title("Test Target Points vs Actual Points")

    plt.subplot(3, 1, 3)
    plt.plot(range(1, len(net.accum_mse) + 1, 1), net.accum_mse)
    plt.xlabel('epochs')
    plt.ylabel('mean squared error')
    plt.grid(True)
    plt.title("Mean Squared Error by Epoch")
    plt.show()