Пример #1
0
def main():

    # criando o dataset, onde os dados de entrada no dataset será um vetor de tamanho 2
    # e o dado de saída será um escalar
    dataset = SupervisedDataSet(2, 1)

    criandoDataset(dataset)

    # criando a rede neural
    # onde terá, respectivamente, a quantidade de entrada na rede
    # quantidade de neurônios na camada intermediária
    # dimensão de saída da rede
    # utilizando uma adaptação da rede ao longo do tempo
    network = buildNetwork(dataset.indim, 4, dataset.outdim, bias=True)

    # criando o método de treino da rede
    # passando a rede
    # passando o dataset
    # passando a taxa de aprendizado
    # aumentando o cálculo que maximiza o treinamento da rede
    trainer = BackpropTrainer(network,
                              dataset,
                              learningrate=0.01,
                              momentum=0.99)

    # looping que faz o treino da  função
    for epocas in range(0, 1000):

        trainer.train()

    # realizando o teste
    datasetTeste = SupervisedDataSet(2, 1)
    criandoDataset(datasetTeste)
    trainer.testOnData(datasetTeste, verbose=True)
Пример #2
0
def montaRede(dadosEntrada, dadosSaida):
    """
    Função na qual def

    :param dadosEntrada: parâmetros de entrada na rede neural
    :param dadosSaida:  parâmetros de saída da rede neural
    :return: retorna a rede de treinamento treinada e os dados supervisionados
    """

    entradaTreino = np.concatenate(
        (dadosEntrada[:35], dadosEntrada[50:85], dadosEntrada[100:135]))
    saidaTreino = np.concatenate(
        (dadosSaida[:35], dadosSaida[50:85], dadosSaida[100:135]))
    entradaTeste = np.concatenate(
        (dadosEntrada[35:50], dadosEntrada[85:100], dadosEntrada[135:]))
    saidaTeste = np.concatenate(
        (dadosSaida[35:50], dadosSaida[85:100], dadosSaida[135:]))

    treinaRede(entradaTreino, saidaTreino)

    # criando o dataset de treinamento
    # serão 4 dados de entrada
    # será um dado de saída
    treinamento = treinaRede(entradaTreino, saidaTreino)

    # rede neural do tamanho do treinamento
    # com 2 neurônios na camada intermediária
    # com o dado de output sendo o tamanho da rede
    # utilizando bias
    redeNeural = buildNetwork(treinamento.indim,
                              2,
                              treinamento.outdim,
                              bias=True)

    # criando a rede neural treinada
    redeNeuralTreinada = BackpropTrainer(redeNeural,
                                         treinamento,
                                         learningrate=0.3,
                                         momentum=0.9)

    for epocas in range(0, 10000):

        redeNeuralTreinada.train()

    teste = SupervisedDataSet(4, 1)

    for i in range(len(entradaTeste)):

        teste.addSample(entradaTeste[i], saidaTeste[i])

    return redeNeuralTreinada, teste
Пример #3
0
hiddenLayerTwo = SigmoidLayer(4, "two")
outLayer = LinearLayer(1)
inToHiddenOne = FullConnection(inLayer, hiddenLayerOne)
hiddenOneToTwo = FullConnection(hiddenLayerOne, hiddenLayerTwo)
hiddenTwoToOut = FullConnection(hiddenLayerTwo, outLayer)

#wire the layers and connections to a net
net = FeedForwardNetwork()
net.addInputModule(inLayer)
net.addModule(hiddenLayerOne)
net.addModule(hiddenLayerTwo)
net.addOutputModule(outLayer)
net.addConnection(inToHiddenOne)
net.addConnection(hiddenOneToTwo)
net.addConnection(hiddenTwoToOut)
net.sortModules()

print(net)

trainer = BackpropTrainer(net, ds)

for i in range(20):
    for j in range(1000):
        trainer.train()
    printNetResult(i, net)

print(net)
print(inToHiddenOne.params)
print(hiddenOneToTwo.params)
print(hiddenTwoToOut.params)
    T = BackpropTrainer(N, D, learningrate=0.1, momentum=0.9)

    i = 0
    error = []
    time_before = time.time()
    while i < 50 and T.testOnData(D) > 0.001:
        errordata = T.testOnData(D)

        if i % 1 == 0:
            print i, '\tMSE:', round(errordata, 6), '\tTime:', round(
                time.time() - time_before, 6)

        #Store the error in a list to plot
        error.append(errordata)

        T.train()
        i += 1

    #print 'It took ', time.time()-time_before, ' seconds to train the NN'

    #Display the error in a chart
    plot(error)
    show()

    #------------------------------------------------------------------
    #Test the NN
    #for counter,item in enumerate(train_data):
    #print 'Real:', answer[counter], ' NN:', N.activate(item)[0]

    #------------------------------------------------------------------------------
    #Save the trained network to a pickle
Пример #5
0
    #Train the NN with backpropagation
    T = BackpropTrainer(N, D, learningrate = 0.1, momentum = 0.9)

    i=0
    error = []
    time_before = time.time()
    while i < 50 and T.testOnData(D) > 0.001:
        errordata = T.testOnData(D)

        if i % 1 == 0:
            print i, '\tMSE:', round(errordata,6), '\tTime:', round(time.time()-time_before,6)

        #Store the error in a list to plot
        error.append(errordata)

        T.train()
        i += 1

    #print 'It took ', time.time()-time_before, ' seconds to train the NN'

    #Display the error in a chart
    plot(error)
    show()


    #------------------------------------------------------------------
    #Test the NN
    #for counter,item in enumerate(train_data):
        #print 'Real:', answer[counter], ' NN:', N.activate(item)[0]

Пример #6
0
class NeuralNetwork(object):
    """
    The neural network class does all the heavy lifting to incorporate pybrain
    neural networks into the NowTrade ecosystem.
    """
    def __init__(self,
                 train_data,
                 prediction_data,
                 network_type=FEED_FORWARD_NETWORK,
                 network_dataset_type=SUPERVISED_DATASET,
                 trainer_type=BACKPROP_TRAINER):
        self.train_data = train_data
        self.prediction_data = prediction_data
        self.network_type = network_type
        self.network_dataset_type = network_dataset_type
        self.trainer_type = trainer_type
        self.network = None
        self.network_dataset = None
        self.dataset = None
        self.trainer = None
        self.trained_iterations = 0
        self.momentum = None
        self.learning_rate = None
        self.hidden_layers = None
        self.prediction_window = None
        self.logger = logger.Logger(self.__class__.__name__)
        self.logger.info('train_data: %s  prediction_data: %s, network_type: %s, \
                          network_dataset_type: %s, trainer_type: %s'
                         %(train_data, prediction_data, network_type, \
                           network_dataset_type, trainer_type))

    def save(self):
        """
        Returns the pickled trained/tested neural network as a string.
        """
        return cPickle.dumps(self)

    def save_to_file(self, filename):
        """
        Saves a neural network to file for later use.

        Look into pybrain.datasets.supervised.SupervisedDataSet.saveToFile()
        http://pybrain.org/docs/api/datasets/superviseddataset.html
        """
        file_handler = open(filename, 'wb')
        cPickle.dump(self, file_handler)
        file_handler.close()

    def build_network(self, dataset, new=True, **kwargs):
        """
        Builds a neural network using the dataset provided.
        Expected keyword args:
            - 'hidden_layers'
            - 'prediction_window'
            - 'learning_rate'
            - 'momentum'
        """
        self.hidden_layers = kwargs.get('hidden_layers', 3)
        self.prediction_window = kwargs.get('prediction_window', 1)
        self.learning_rate = kwargs.get('learning_rate', 0.1)
        self.momentum = kwargs.get('momentum', 0.01)
        if not new:
            self.network.sorted = False
            self.network.sortModules()
            if self.network_dataset_type == SUPERVISED_DATASET:
                self.ready_supervised_dataset(dataset)
            else:
                raise InvalidNetworkDatasetType()
        else:
            if self.network_type == FEED_FORWARD_NETWORK:
                self.network = buildNetwork(len(self.train_data),
                                            self.hidden_layers, 1)
            else:
                raise InvalidNetworkType()
            if self.network_dataset_type == SUPERVISED_DATASET:
                self.ready_supervised_dataset(dataset)
            else:
                raise InvalidNetworkDatasetType()
            if self.trainer_type == BACKPROP_TRAINER:
                self.trainer = BackpropTrainer(self.network,
                                               learningrate=self.learning_rate,
                                               momentum=self.momentum,
                                               verbose=True)
                self.trainer.setData(self.network_dataset)
            else:
                raise InvalidTrainerType()

    def ready_supervised_dataset(self, dataset):
        """
        Ready the supervised dataset for training.

        @TODO: Need to randomize the data being fed to the network.
        See randomBatches() here: http://pybrain.org/docs/api/datasets/superviseddataset.html
        """
        self.network_dataset = SupervisedDataSet(len(self.train_data), 1)
        # Currently only supports log function for normalizing data
        training_values = np.log(dataset.data_frame[self.train_data])
        results = np.log(dataset.data_frame[self.prediction_data].shift(
            -self.prediction_window))
        training_values['PREDICTION_%s' % self.prediction_data[0]] = results
        training_values = training_values.dropna()
        for _, row_data in enumerate(training_values.iterrows()):
            _, data = row_data
            sample = list(data[:-1])
            result = [data[-1]]
            self.network_dataset.addSample(sample, result)

    def train(self, cycles=1):
        """
        Trains the network the number of iteration specified in the cycles parameter.
        """
        for _ in range(cycles):
            res = self.trainer.train()
            self.trained_iterations += 1
        return res

    def train_until_convergence(self,
                                max_cycles=1000,
                                continue_cycles=10,
                                validation_proportion=0.25):
        """
        Wrapper around the pybrain BackpropTrainer trainUntilConvergence method.

        @see: http://pybrain.org/docs/api/supervised/trainers.html
        """
        self.trainer = \
            self.trainer.trainUntilConvergence(maxEpochs=max_cycles,
                                               continueEpochs=continue_cycles,
                                               validationProportion=validation_proportion)

    def _activate(self, data):
        """
        Activates the network using the data specified.
        Returns the network's prediction.
        """
        return self.network.activate(data)[0]

    def activate_all(self, data_frame):
        """
        Activates the network for all values in the dataframe specified.
        """
        dataframe = np.log(data_frame[self.train_data])
        res = []
        for _, row_data in enumerate(dataframe.iterrows()):
            _, data = row_data
            sample = list(data)
            res.append(self._activate(sample))
        return np.exp(res)
Пример #7
0
class StrategyANN(Strategy):

    def __init__(self, features_num, hidden_neurons_num):
        super().__init__()
        self.is_learning = True

        self.features_num = features_num
#         self.net = buildNetwork(features_num, hidden_neurons_num, 1, bias = True)
#         self.net = buildNetwork(features_num, hidden_neurons_num, hidden_neurons_num, 1, bias = True)
#         self.net = ConvolutionalBoardNetwork(Board.BOARD_SIZE, 5, 3)
#         self.trainer = BackpropTrainer(self.net)
        
        self.net_attack = buildNetwork(features_num, hidden_neurons_num, hidden_neurons_num, 1, bias = True)
        self.net_defence = buildNetwork(features_num, hidden_neurons_num, hidden_neurons_num, 1, bias = True)
        self.trainer_attack = BackpropTrainer(self.net_attack)
        self.trainer_defence = BackpropTrainer(self.net_defence)
                
        self.gamma = 0.9
        self.errors = []
        self.buf = np.zeros(200)
        self.buf_index = 0
        self.setup()        
        
        
    def update_at_end(self, old, new):
        if not self.needs_update():
            return
                
        if new.winner == Board.STONE_EMPTY:
            reward = 0
        else:
            reward = 2 if self.stand_for == new.winner else -2
        
        if old is None:
            if self.prev_state is not None:
                self._update_impl(self.prev_state, new, reward)
        else:    
            self._update_impl(old, new, reward)
    

    def update(self, old, new):
        if not self.needs_update():
            return
        
        if self.prev_state is None:
            self.prev_state = old
            return       
        
        if new is None:
            self._update_impl(self.prev_state, old, 0)
        
        self.prev_state = old
 
          
    def _update_impl(self, old, new, reward):
        old_input = self.get_input_values(old)

        v1_a = self.net_attack.activate(self.get_input_values(new))
        target = self.gamma * v1_a
        
        ds_a = SupervisedDataSet(self.features_num, 1)
        ds_a.addSample(old_input, target + max(0, reward))
        ds_d = SupervisedDataSet(self.features_num, 1)
        ds_d.addSample(old_input, target + min(0, reward))
#         self.trainer.setData(ds)
#         err = self.trainer.train()
        self.trainer_attack.setData(ds_a)
        self.trainer_attack.train()
        self.trainer_defence.setData(ds_d)
        self.trainer_defence.train()
        
#         self.buf[self.buf_index] = err
#         self.buf_index += 1
#         if self.buf_index >= self.buf.size:
#             if len(self.errors) < 2000:
#                 self.errors.append(np.average(self.buf))
#             self.buf.fill(0)
#             self.buf_index = 0
            

    def board_value(self, board, context):
        iv = self.get_input_values(board)
#         return self.net.activate(iv)
        return self.net_attack.activate(iv), self.net_defence.activate(iv)
    
    def _decide_move(self, moves):
        best_move_a, best_av = None, None
        best_move_d, best_dv = None, None
        for m in moves:
            iv = self.get_input_values(m)
            av, dv = self.net_attack.activate(iv), self.net_defence.activate(iv)
            if best_av is None or best_av < av:
                best_move_a, best_av = m, av
            if best_dv is None or best_dv < dv:
                best_move_d, best_dv = m, dv
        return best_move_a if best_av >= best_dv else best_move_d
            

    def preferred_board(self, old, moves, context):
        if not moves:
            return old
        if len(moves) == 1:
            return moves[0]

        if np.random.rand() < self.epsilon:  # exploration
            the_board = random.choice(moves)
            the_board.exploration = True
            return the_board
        else:
#             board_most_value = max(moves, key=lambda m: self.board_value(m, context))            
#             return board_most_value
            return self._decide_move(moves)
        

    def get_input_values(self, board):
        '''
        Returns:
        -----------
        vector: numpy.1darray
            the input vector
        '''
#         print('boar.stone shape: ' + str(board.stones.shape))
        v = board.stones
#         print('vectorized board shape: ' + str(v.shape))

#         print('b[%d], w[%d]' % (black, white))
        iv = np.zeros(v.shape[0] * 2 + 2)
  
        iv[0:v.shape[0]] = (v == Board.STONE_BLACK).astype(int)
        iv[v.shape[0]:v.shape[0] * 2] = (v == Board.STONE_WHITE).astype(int)
        who = board.whose_turn_now()
        iv[-2] = 1 if who == Board.STONE_BLACK else 0  # turn to black move
        iv[-1] = 1 if who == Board.STONE_WHITE else 0  # turn to white move
#         print(iv.shape)
#         print(iv)
        return iv

    def save(self, file):
        pass

    def load(self, file):
        pass

    def setup(self):
        self.prev_state = None
    
    def mind_clone(self):
        pass
Пример #8
0
 def train(self, x_data, y_data):
     trainer = BackpropTrainer(self.net, self._prepare_dataset(x_data, y_data))
     trainer.train()
Пример #9
0
class StrategyANN(Strategy):
    def __init__(self, features_num, hidden_neurons_num):
        super().__init__()
        self.is_learning = True

        self.features_num = features_num
        #         self.net = buildNetwork(features_num, hidden_neurons_num, 1, bias = True)
        #         self.net = buildNetwork(features_num, hidden_neurons_num, hidden_neurons_num, 1, bias = True)
        #         self.net = ConvolutionalBoardNetwork(Board.BOARD_SIZE, 5, 3)
        #         self.trainer = BackpropTrainer(self.net)

        self.net_attack = buildNetwork(features_num,
                                       hidden_neurons_num,
                                       hidden_neurons_num,
                                       1,
                                       bias=True)
        self.net_defence = buildNetwork(features_num,
                                        hidden_neurons_num,
                                        hidden_neurons_num,
                                        1,
                                        bias=True)
        self.trainer_attack = BackpropTrainer(self.net_attack)
        self.trainer_defence = BackpropTrainer(self.net_defence)

        self.gamma = 0.9
        self.errors = []
        self.buf = np.zeros(200)
        self.buf_index = 0
        self.setup()

    def update_at_end(self, old, new):
        if not self.needs_update():
            return

        if new.winner == Board.STONE_EMPTY:
            reward = 0
        else:
            reward = 2 if self.stand_for == new.winner else -2

        if old is None:
            if self.prev_state is not None:
                self._update_impl(self.prev_state, new, reward)
        else:
            self._update_impl(old, new, reward)

    def update(self, old, new):
        if not self.needs_update():
            return

        if self.prev_state is None:
            self.prev_state = old
            return

        if new is None:
            self._update_impl(self.prev_state, old, 0)

        self.prev_state = old

    def _update_impl(self, old, new, reward):
        old_input = self.get_input_values(old)

        v1_a = self.net_attack.activate(self.get_input_values(new))
        target = self.gamma * v1_a

        ds_a = SupervisedDataSet(self.features_num, 1)
        ds_a.addSample(old_input, target + max(0, reward))
        ds_d = SupervisedDataSet(self.features_num, 1)
        ds_d.addSample(old_input, target + min(0, reward))
        #         self.trainer.setData(ds)
        #         err = self.trainer.train()
        self.trainer_attack.setData(ds_a)
        self.trainer_attack.train()
        self.trainer_defence.setData(ds_d)
        self.trainer_defence.train()

#         self.buf[self.buf_index] = err
#         self.buf_index += 1
#         if self.buf_index >= self.buf.size:
#             if len(self.errors) < 2000:
#                 self.errors.append(np.average(self.buf))
#             self.buf.fill(0)
#             self.buf_index = 0

    def board_value(self, board, context):
        iv = self.get_input_values(board)
        #         return self.net.activate(iv)
        return self.net_attack.activate(iv), self.net_defence.activate(iv)

    def _decide_move(self, moves):
        best_move_a, best_av = None, None
        best_move_d, best_dv = None, None
        for m in moves:
            iv = self.get_input_values(m)
            av, dv = self.net_attack.activate(iv), self.net_defence.activate(
                iv)
            if best_av is None or best_av < av:
                best_move_a, best_av = m, av
            if best_dv is None or best_dv < dv:
                best_move_d, best_dv = m, dv
        return best_move_a if best_av >= best_dv else best_move_d

    def preferred_board(self, old, moves, context):
        if not moves:
            return old
        if len(moves) == 1:
            return moves[0]

        if np.random.rand() < self.epsilon:  # exploration
            the_board = random.choice(moves)
            the_board.exploration = True
            return the_board
        else:
            #             board_most_value = max(moves, key=lambda m: self.board_value(m, context))
            #             return board_most_value
            return self._decide_move(moves)

    def get_input_values(self, board):
        '''
        Returns:
        -----------
        vector: numpy.1darray
            the input vector
        '''
        #         print('boar.stone shape: ' + str(board.stones.shape))
        v = board.stones
        #         print('vectorized board shape: ' + str(v.shape))

        #         print('b[%d], w[%d]' % (black, white))
        iv = np.zeros(v.shape[0] * 2 + 2)

        iv[0:v.shape[0]] = (v == Board.STONE_BLACK).astype(int)
        iv[v.shape[0]:v.shape[0] * 2] = (v == Board.STONE_WHITE).astype(int)
        who = board.whose_turn_now()
        iv[-2] = 1 if who == Board.STONE_BLACK else 0  # turn to black move
        iv[-1] = 1 if who == Board.STONE_WHITE else 0  # turn to white move
        #         print(iv.shape)
        #         print(iv)
        return iv

    def save(self, file):
        pass

    def load(self, file):
        pass

    def setup(self):
        self.prev_state = None

    def mind_clone(self):
        pass
Пример #10
0
print_net()

print("TRAINING")

def squashIrisSpecies(species):
    return 1/float(species) - 0.167

dataset = SupervisedDataSet(4, 3)
for datum in IRIS_TRAIN_SET:
    species = datum[-1]
    if species == 1:
        ideals = [1,0,0]
    elif species == 2:
        ideals = [0,1,0]
    elif species == 3:
        ideals = [0,0,1]
    dataset.addSample(datum[:-1], ideals)

trainer = BackpropTrainer(net, dataset=dataset, learningrate=0.01, verbose=True)

for i in range(1000):
    error = trainer.train()
    if i % 20 == 0:
        print("Iteration {0} error {1}".format(i, error))

print("POST TRAINING")
print_net()
for test in IRIS_TEST_SET:
    inputs = test[:-1]
    species = test[-1]
    print(test, net.activate(inputs))
Пример #11
0
from pybrain.datasets.supervised import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.supervised.trainers.backprop import BackpropTrainer
from pybrain.supervised.trainers.mixturedensity import BackpropTrainerMix

def printNetResult(identifier, net):
    print(identifier, net.activate((0, 0)), net.activate((0, 1)), net.activate((1, 0)), net.activate((1, 1)))    

ds = SupervisedDataSet(2,1)

ds.addSample((0, 0), (0,))
ds.addSample((0, 1), (1,))
ds.addSample((1, 0), (1,))
ds.addSample((1, 1), (0,))

for input, target in ds:
    print(input, target)
    
#net = buildNetwork(2, 3, 1, bias=True, hiddenclass=TanhLayer)#1000
# net = buildNetwork(2, 6, 1, bias=True) # 3000
net = buildNetwork(2, 3, 1, bias=True)

trainer = BackpropTrainer(net, ds)

for i in range(20):
    for j in range(1000):               
        trainer.train()
    printNetResult(i, net)
Пример #12
0
		out = [r[i]]
		ds.appendLinked(inl, out)

#trainer = RPropMinusTrainer(n, dataset = ds)
trainer = BackpropTrainer(n, dataset = ds)

print("Generating dataset took", time()-start)

lastlen = 0

start = time()

try:
	while True:
		epochstart = time()
		error = trainer.train()
		tpe = time()-epochstart
		epochs += 1
		out = str(error) + " error " + str(epochs) + " epochs " + str(tpe) + " time per epoch"
		clearspaces = " "*(lastlen-len(out))
		lastlen = len(out)
		if epochs%100 == 0:
			thisrun = time()-start
			totaltime += thisrun
			start = time()
			try:
				NetworkWriter.writeToFile(n, 'net.xml')
				pickle.dump({"epochs":epochs, "time":totaltime}, open( "stats.p", "wb" ) )
				print("Autosaved network and stats after 100 epochs and ",thisrun,"seconds. Total time",round(totaltime/36)/100, "hours")
			except:
				print("Error autosaving network")
Пример #13
0
from pybrain.structure.networks.feedforward import FeedForwardNetwork
from pybrain.supervised.trainers.backprop import BackpropTrainer

network = FeedForwardNetwork()  # create network
inputLayer = SigmoidLayer(1)  # maybe LinearLayer ?
hiddenLayer = SigmoidLayer(4)
outputLayer = SigmoidLayer(1)  # maybe LinearLayer ?

network.addInputModule(inputLayer)
network.addModule(hiddenLayer)
network.addOutputModule(outputLayer)
# Connection
network.addConnection(FullConnection(inputLayer, hiddenLayer))
network.addConnection(FullConnection(hiddenLayer, outputLayer))

network.sortModules()

dataTrain = SupervisedDataSet(1, 1)  # input, target
dataTrain.addSample(
    1, 0.76
)  # it seems to me that input(our x), target(value y) from function sin(x)*sin(2*x)

trainer = BackpropTrainer(
    network, dataTrain)  # it's back prop, we use our network and our data
print(trainer.train())  # i think it's value trained

print(network.params)  # i think that are wights
# print(network)

# print(network.activate([-1]))
Пример #14
0
class NeuralNetwork(object):
    """
    The neural network class does all the heavy lifting to incorporate pybrain
    neural networks into the NowTrade ecosystem.
    """
    def __init__(self, train_data, prediction_data, network_type=FEED_FORWARD_NETWORK,
                 network_dataset_type=SUPERVISED_DATASET,
                 trainer_type=BACKPROP_TRAINER):
        self.train_data = train_data
        self.prediction_data = prediction_data
        self.network_type = network_type
        self.network_dataset_type = network_dataset_type
        self.trainer_type = trainer_type
        self.network = None
        self.network_dataset = None
        self.dataset = None
        self.trainer = None
        self.trained_iterations = 0
        self.momentum = None
        self.learning_rate = None
        self.hidden_layers = None
        self.prediction_window = None
        self.logger = logger.Logger(self.__class__.__name__)
        self.logger.info('train_data: %s  prediction_data: %s, network_type: %s, \
                          network_dataset_type: %s, trainer_type: %s'
                         %(train_data, prediction_data, network_type, \
                           network_dataset_type, trainer_type))

    def save(self):
        """
        Returns the pickled trained/tested neural network as a string.
        """
        return cPickle.dumps(self)

    def save_to_file(self, filename):
        """
        Saves a neural network to file for later use.

        Look into pybrain.datasets.supervised.SupervisedDataSet.saveToFile()
        http://pybrain.org/docs/api/datasets/superviseddataset.html
        """
        file_handler = open(filename, 'wb')
        cPickle.dump(self, file_handler)
        file_handler.close()

    def build_network(self, dataset, new=True, **kwargs):
        """
        Builds a neural network using the dataset provided.
        Expected keyword args:
            - 'hidden_layers'
            - 'prediction_window'
            - 'learning_rate'
            - 'momentum'
        """
        self.hidden_layers = kwargs.get('hidden_layers', 3)
        self.prediction_window = kwargs.get('prediction_window', 1)
        self.learning_rate = kwargs.get('learning_rate', 0.1)
        self.momentum = kwargs.get('momentum', 0.01)
        if not new:
            self.network.sorted = False
            self.network.sortModules()
            if self.network_dataset_type == SUPERVISED_DATASET:
                self.ready_supervised_dataset(dataset)
            else: raise InvalidNetworkDatasetType()
        else:
            if self.network_type == FEED_FORWARD_NETWORK:
                self.network = buildNetwork(len(self.train_data), self.hidden_layers, 1)
            else: raise InvalidNetworkType()
            if self.network_dataset_type == SUPERVISED_DATASET:
                self.ready_supervised_dataset(dataset)
            else: raise InvalidNetworkDatasetType()
            if self.trainer_type == BACKPROP_TRAINER:
                self.trainer = BackpropTrainer(self.network,
                                               learningrate=self.learning_rate,
                                               momentum=self.momentum,
                                               verbose=True)
                self.trainer.setData(self.network_dataset)
            else: raise InvalidTrainerType()

    def ready_supervised_dataset(self, dataset):
        """
        Ready the supervised dataset for training.

        @TODO: Need to randomize the data being fed to the network.
        See randomBatches() here: http://pybrain.org/docs/api/datasets/superviseddataset.html
        """
        self.network_dataset = SupervisedDataSet(len(self.train_data), 1)
        # Currently only supports log function for normalizing data
        training_values = np.log(dataset.data_frame[self.train_data])
        results = np.log(dataset.data_frame[self.prediction_data].shift(-self.prediction_window))
        training_values['PREDICTION_%s' %self.prediction_data[0]] = results
        training_values = training_values.dropna()
        for _, row_data in enumerate(training_values.iterrows()):
            _, data = row_data
            sample = list(data[:-1])
            result = [data[-1]]
            self.network_dataset.addSample(sample, result)

    def train(self, cycles=1):
        """
        Trains the network the number of iteration specified in the cycles parameter.
        """
        for _ in range(cycles):
            res = self.trainer.train()
            self.trained_iterations += 1
        return res

    def train_until_convergence(self, max_cycles=1000, continue_cycles=10,
                                validation_proportion=0.25):
        """
        Wrapper around the pybrain BackpropTrainer trainUntilConvergence method.

        @see: http://pybrain.org/docs/api/supervised/trainers.html
        """
        self.trainer = \
            self.trainer.trainUntilConvergence(maxEpochs=max_cycles,
                                               continueEpochs=continue_cycles,
                                               validationProportion=validation_proportion)

    def _activate(self, data):
        """
        Activates the network using the data specified.
        Returns the network's prediction.
        """
        return self.network.activate(data)[0]

    def activate_all(self, data_frame):
        """
        Activates the network for all values in the dataframe specified.
        """
        dataframe = np.log(data_frame[self.train_data])
        res = []
        for _, row_data in enumerate(dataframe.iterrows()):
            _, data = row_data
            sample = list(data)
            res.append(self._activate(sample))
        return np.exp(res)