Esempio n. 1
0
    def __init__(self, features_num, hidden_neurons_num):
        super().__init__()
        self.is_learning = True

        self.features_num = features_num
        #         self.net = buildNetwork(features_num, hidden_neurons_num, 1, bias = True)
        #         self.net = buildNetwork(features_num, hidden_neurons_num, hidden_neurons_num, 1, bias = True)
        #         self.net = ConvolutionalBoardNetwork(Board.BOARD_SIZE, 5, 3)
        #         self.trainer = BackpropTrainer(self.net)

        self.net_attack = buildNetwork(features_num,
                                       hidden_neurons_num,
                                       hidden_neurons_num,
                                       1,
                                       bias=True)
        self.net_defence = buildNetwork(features_num,
                                        hidden_neurons_num,
                                        hidden_neurons_num,
                                        1,
                                        bias=True)
        self.trainer_attack = BackpropTrainer(self.net_attack)
        self.trainer_defence = BackpropTrainer(self.net_defence)

        self.gamma = 0.9
        self.errors = []
        self.buf = np.zeros(200)
        self.buf_index = 0
        self.setup()
Esempio n. 2
0
def begin1():

    cbf = readFromCsv("cbf2")
    numdataset = np.array(cbf, dtype=np.float64)
    #训练数据,验证数据,今天的数据
    tgdataset, vadataset, tydata = dataSplit(numdataset)
    #归一的参数
    gydata, dmean, dstd = gyData(tgdataset)

    #验证和今天的数据
    gyvadata = calFeature(vadataset, dmean, dstd)
    gytydata = calFeature(tydata, dmean, dstd)

    #神经网络
    trainingset = buildTrainingSet(gydata)

    for i in range(1000):
        net = buildNetwork(15,
                           8,
                           1,
                           bias=True,
                           hiddenclass=TanhLayer,
                           outclass=TanhLayer)
        trainer = BackpropTrainer(net, trainingset)
        trainer.trainEpochs(epochs=100)
        rate = va.calRightRate(gyvadata, net)
        if rate > 0.6:
            NetworkWriter.writeToFile(
                net, '../netv3/zxtx_8l_100t_6_' + str(rate) + ".xml")
            print(va.calRightRate(gyvadata, net))
            print(va.calRightRate(gytydata, net))
        print(str(i) + " times " + str(rate))


# begin1();
Esempio n. 3
0
 def __init__(self,
              module,
              dataset=None,
              learningrate=0.01,
              lrdecay=1.0,
              momentum=0.,
              verbose=False,
              batchlearning=False,
              weightdecay=0.):
     BackpropTrainer.__init__(self, module, dataset, learningrate, lrdecay,
                              momentum, verbose, batchlearning)
Esempio n. 4
0
def montaRede(dadosEntrada, dadosSaida):
    """
    Função na qual def

    :param dadosEntrada: parâmetros de entrada na rede neural
    :param dadosSaida:  parâmetros de saída da rede neural
    :return: retorna a rede de treinamento treinada e os dados supervisionados
    """

    entradaTreino = np.concatenate(
        (dadosEntrada[:35], dadosEntrada[50:85], dadosEntrada[100:135]))
    saidaTreino = np.concatenate(
        (dadosSaida[:35], dadosSaida[50:85], dadosSaida[100:135]))
    entradaTeste = np.concatenate(
        (dadosEntrada[35:50], dadosEntrada[85:100], dadosEntrada[135:]))
    saidaTeste = np.concatenate(
        (dadosSaida[35:50], dadosSaida[85:100], dadosSaida[135:]))

    treinaRede(entradaTreino, saidaTreino)

    # criando o dataset de treinamento
    # serão 4 dados de entrada
    # será um dado de saída
    treinamento = treinaRede(entradaTreino, saidaTreino)

    # rede neural do tamanho do treinamento
    # com 2 neurônios na camada intermediária
    # com o dado de output sendo o tamanho da rede
    # utilizando bias
    redeNeural = buildNetwork(treinamento.indim,
                              2,
                              treinamento.outdim,
                              bias=True)

    # criando a rede neural treinada
    redeNeuralTreinada = BackpropTrainer(redeNeural,
                                         treinamento,
                                         learningrate=0.3,
                                         momentum=0.9)

    for epocas in range(0, 10000):

        redeNeuralTreinada.train()

    teste = SupervisedDataSet(4, 1)

    for i in range(len(entradaTeste)):

        teste.addSample(entradaTeste[i], saidaTeste[i])

    return redeNeuralTreinada, teste
Esempio n. 5
0
def train(input, hidden, output, dataset):
    """
    Method to build and train a neural network with backpropagation until it converges
    :param input: input nodes for network (1)
    :param hidden: hidden nodes for network
    :param output: output nodes for network (1)
    :param dataset: SupervisedDataSet, maps input->output: 1->1, 2->2 ... 8->8
    :return: the trained neural network
    """
    net = buildNetwork(input, hidden, output, hiddenclass=TanhLayer)
    trainer = BackpropTrainer(net, dataset, learningrate=0.01)
    trainer.trainUntilConvergence(verbose=False, validationProportion=0.15, maxEpochs=1000, continueEpochs=10)

    return net
Esempio n. 6
0
 def build_network(self, dataset, new=True, **kwargs):
     """
     Builds a neural network using the dataset provided.
     Expected keyword args:
         - 'hidden_layers'
         - 'prediction_window'
         - 'learning_rate'
         - 'momentum'
     """
     self.hidden_layers = kwargs.get('hidden_layers', 3)
     self.prediction_window = kwargs.get('prediction_window', 1)
     self.learning_rate = kwargs.get('learning_rate', 0.1)
     self.momentum = kwargs.get('momentum', 0.01)
     if not new:
         self.network.sorted = False
         self.network.sortModules()
         if self.network_dataset_type == SUPERVISED_DATASET:
             self.ready_supervised_dataset(dataset)
         else: raise InvalidNetworkDatasetType()
     else:
         if self.network_type == FEED_FORWARD_NETWORK:
             self.network = buildNetwork(len(self.train_data), self.hidden_layers, 1)
         else: raise InvalidNetworkType()
         if self.network_dataset_type == SUPERVISED_DATASET:
             self.ready_supervised_dataset(dataset)
         else: raise InvalidNetworkDatasetType()
         if self.trainer_type == BACKPROP_TRAINER:
             self.trainer = BackpropTrainer(self.network,
                                            learningrate=self.learning_rate,
                                            momentum=self.momentum,
                                            verbose=True)
             self.trainer.setData(self.network_dataset)
         else: raise InvalidTrainerType()
Esempio n. 7
0
def get_trained_ann(dataset, ann=None, test_train_prop=0.25, max_epochs=50):
    tstdata, trndata = dataset.splitWithProportion(test_train_prop)
    trndata._convertToOneOfMany()
    tstdata._convertToOneOfMany()
    if not ann:
        ann = build_ann(trndata.indim, trndata.outdim)
#        ann = build_exp_ann(trndata.indim, trndata.outdim)
#    trainer = RPropMinusTrainer(ann)
    trainer = BackpropTrainer(ann, dataset=trndata,learningrate=0.01, momentum=0.5, verbose=True)
    trnresult = tstresult = 0
#    for i in range(10):
    trainer.trainUntilConvergence(maxEpochs=max_epochs, verbose=True)
    trnresult = percentError( trainer.testOnClassData(), trndata['class'] )
    tstresult = percentError( trainer.testOnClassData(dataset=tstdata ), tstdata['class'] )
#        print trnresult, tstresult
    return ann, trnresult, tstresult
Esempio n. 8
0
 def init(self):
     self.networks = []
     self.trainers = []
     self.starting_weights = []
     for i in range(self.size):  #@UnusedVariable
         if self.num_hid2 == 0:
             network = buildNetwork(self.num_inp,
                                    self.num_hid1,
                                    self.num_out,
                                    hiddenclass=SigmoidLayer,
                                    bias=True)
         else:
             network = buildNetwork(self.num_inp,
                                    self.num_hid1,
                                    self.num_hid2,
                                    self.num_out,
                                    hiddenclass=SigmoidLayer,
                                    bias=True)
         starting_weights = network.params.copy()
         trainer = BackpropTrainer(network,
                                   learningrate=LEARNING_RATE,
                                   momentum=MOMENTUM_LOW,
                                   verbose=False)
         self.networks.append(network)
         self.trainers.append(trainer)
         self.starting_weights.append(starting_weights)
Esempio n. 9
0
def NetworkTrain(trainDataSet, mnetwork=NetworkBuild(), file='NetworkDump.pkl',maxEpochs=100):
    mnetwork = NetworkBuild(new = True)
    assert len(mnetwork[0].inmodules) == len(mnetwork[1].keys())
    print('DEBUG')
    #print(trainDataSet)
    print("lens " + str(len(trainDataSet[0][0])) + " " + str(len(mnetwork[0].inmodules)))
    # 定义数据集的格式
    DS = SupervisedDataSet(len(trainDataSet[0][0]), len(trainDataSet[0][1]))

    for itrainDataSet in trainDataSet:
        indata = itrainDataSet[0]
        outdata = itrainDataSet[1]

        DS.addSample(indata, outdata)

    # 如果要获得里面的输入/输出时,可以用
    # 如果要把数据集切分成训练集和测试集,可以用下面的语句,训练集:测试集=8:2
    # 为了方便之后的调用,可以把输入和输出拎出来




    # 训练器采用BP算法
    # verbose = True即训练时会把Total error打印出来,库里默认训练集和验证集的比例为4:1,可以在括号里更改
    mnetwork[0].sortModules()
    trainer = BackpropTrainer(mnetwork[0], DS, verbose=True, learningrate=0.01)
    # 0.0575
    # maxEpochs即你需要的最大收敛迭代次数,这里采用的方法是训练至收敛,我一般设为1000
    trainer.trainUntilConvergence(maxEpochs=maxEpochs)
    '''
    for mod in mnetwork[0].modules:
        print "Module:", mod.name
        if mod.paramdim > 0:
            print "--parameters:", mod.params
        for conn in mnetwork[0].connections[mod]:
            print "-connection to", conn.outmod.name
            if conn.paramdim > 0:
                print "- parameters", conn.params
        if hasattr(mnetwork[0], "recurrentConns"):
            print "Recurrent connections"
            for conn in mnetwork[0].recurrentConns:
                print "-", conn.inmod.name, " to", conn.outmod.name
                if conn.paramdim > 0:
                    print "- parameters", conn.params
        '''
    pickle.dump(mnetwork, open(file, 'wb'))
    return mnetwork
 def train(self,
           dataset,
           maxEpochs=10,
           learningrate=0.01,
           momentum=0.99,
           continueEpochs=10,
           validationProportion=0.25):
     '''trains a network with the given dataset
     
     :param SupervisedDataSet dataset: the training dataset
     :param int maxEpochs: max number of iterations to train the network
     :parma float learningrate: helps to 
     :param float momentum: helps out of local minima while training, to get better results
     '''
     self.trainer = BackpropTrainer(self.net,
                                    learningrate=learningrate,
                                    momentum=momentum)
     self.trainer.trainOnDataset(dataset, maxEpochs)
    def train(self, x, y, class_number=-1):
        self.__class_num = max(np.unique(y).size, class_number)
        if max(y) == self.__class_num:
            self.__class_zero_indexing = False
            y = np.array([i - 1 for i in y])

        DS = ClassificationDataSet(x.shape[1], nb_classes=self.__class_num)
        DS.setField('input', x)
        DS.setField('target', y.reshape(y.size, 1))
        DS._convertToOneOfMany()

        hidden_num = (DS.indim + DS.outdim) / 2

        self.__pybrain_bpnn = buildNetwork(DS.indim, hidden_num, DS.outdim, bias=True, hiddenclass=SigmoidLayer, outclass=SoftmaxLayer)

        trainer = BackpropTrainer(self.__pybrain_bpnn, dataset=DS, learningrate=0.07, lrdecay=1.0, momentum=0.6)

        trainer.trainUntilConvergence(DS, maxEpochs=30)
Esempio n. 12
0
 def trainConvergence(self, dataset, maxEpochs = 10, learningrate = 0.01, momentum = 0.99, continueEpochs=10, validationProportion=0.25):
     '''trains a network with the given dataset nutil it converges
     
     :param SupervisedDataSet dataset: the training dataset
     :param int maxEpochs: max number of iterations to train the network
     :parma float learningrate: helps to 
     :param float momentum: helps out of local minima while training, to get better results
     '''
     self.trainer = BackpropTrainer(self.net, learningrate = learningrate, momentum = momentum)
     self.trainer.trainUntilConvergence(dataset, maxEpochs, False, continueEpochs, validationProportion)
Esempio n. 13
0
    def __init__(self, features_num, hidden_neurons_num):
        super().__init__()
        self.is_learning = True

        self.features_num = features_num
#         self.net = buildNetwork(features_num, hidden_neurons_num, 1, bias = True)
#         self.net = buildNetwork(features_num, hidden_neurons_num, hidden_neurons_num, 1, bias = True)
#         self.net = ConvolutionalBoardNetwork(Board.BOARD_SIZE, 5, 3)
#         self.trainer = BackpropTrainer(self.net)
        
        self.net_attack = buildNetwork(features_num, hidden_neurons_num, hidden_neurons_num, 1, bias = True)
        self.net_defence = buildNetwork(features_num, hidden_neurons_num, hidden_neurons_num, 1, bias = True)
        self.trainer_attack = BackpropTrainer(self.net_attack)
        self.trainer_defence = BackpropTrainer(self.net_defence)
                
        self.gamma = 0.9
        self.errors = []
        self.buf = np.zeros(200)
        self.buf_index = 0
        self.setup()        
Esempio n. 14
0
    def train(self, players=2, games=300, epochs=50, print_fitness=False):
        """
        nn = Coup_NN()
        nn.train(2, print_fitness=True)
        """
        from pybrain.tools.shortcuts import buildNetwork
        from pybrain import SigmoidLayer
        from pybrain.supervised.trainers.backprop import BackpropTrainer
        from pybrain.datasets import SupervisedDataSet
        from simulations import simulations
        from collections import Counter
        
        INPUT_NEURONS_PER_PLAYER = 5
        OUTPUT_NEURONS = 5
        HIDDEN_NEURONS = 10
    
        ds = SupervisedDataSet(players * INPUT_NEURONS_PER_PLAYER, OUTPUT_NEURONS)
        self.NETS[players] = buildNetwork(players * INPUT_NEURONS_PER_PLAYER, HIDDEN_NEURONS, OUTPUT_NEURONS, bias=True, outputbias= True, hiddenclass=SigmoidLayer)
        trainer = BackpropTrainer(self.NETS[players], ds, learningrate= 0.1)
        WINS = []
        POSITIONS = []
    
        for _ in range(games):
            game_result = simulations.duel(Play_Coup(2))
            WINS.append(game_result.winner.alpha)
            POSITIONS.append(game_result.influence_binary)
            ds.addSample(game_result.influence_binary, game_result.winner.influence_binary)        
    
        trainer.trainEpochs(epochs)

        if print_fitness:
            norm_results = dict(Counter(WINS).most_common())
            nn_results = dict(Counter(self.game_winner(self.NETS[players].activate(p)) for p in POSITIONS).most_common())
    
            print(''.ljust(25), 'normal', 'nn')
            for pair in set(nn_results.keys() + norm_results.keys()):
                print(pair.ljust(25), str(norm_results.get(pair,0)).ljust(6), str(nn_results.get(pair,0)).ljust(6))
            
        with open('coup_nn-{0}'.format(players), 'w') as neunet:
            pickle.dump(self.NETS[players], neunet)
def main (  ) :

    # criando os dados para treino
    datasetTreino = montaDados ()

    # criando os dados para teste
    datasetTeste = montaDados()

    # definindo a estrutura de como será a rede neural
    # a entrada será a dimensão de entrada do dataset = 3
    # terá 6 neurônios na primeira camada intermediária
    # terá 6 neurônios na segunda camada escondida
    # terá como dimensão de saída o tamanho do dado de saída = 1
    # terá a função de autocorreção para melhor adaptação da rede
    network = buildNetwork( datasetTreino.indim, 12, 6, datasetTreino.outdim, bias=True )

    # criando a rede neural
    # terá como estrutura de rede neural definida no objeto network
    # utilizará os dados do dataset para treino
    neuralNetwork = BackpropTrainer ( network, datasetTreino, learningrate=0.01, momentum=0.9 )

    # treinando a rede
    neuralNetwork.trainEpochs ( 1500 )

    # validando a rede
    neuralNetwork.testOnData ( datasetTeste, verbose=True )
Esempio n. 16
0
def main():

    # criando o dataset, onde os dados de entrada no dataset será um vetor de tamanho 2
    # e o dado de saída será um escalar
    dataset = SupervisedDataSet(2, 1)

    criandoDataset(dataset)

    # criando a rede neural
    # onde terá, respectivamente, a quantidade de entrada na rede
    # quantidade de neurônios na camada intermediária
    # dimensão de saída da rede
    # utilizando uma adaptação da rede ao longo do tempo
    network = buildNetwork(dataset.indim, 4, dataset.outdim, bias=True)

    # criando o método de treino da rede
    # passando a rede
    # passando o dataset
    # passando a taxa de aprendizado
    # aumentando o cálculo que maximiza o treinamento da rede
    trainer = BackpropTrainer(network,
                              dataset,
                              learningrate=0.01,
                              momentum=0.99)

    # looping que faz o treino da  função
    for epocas in range(0, 1000):

        trainer.train()

    # realizando o teste
    datasetTeste = SupervisedDataSet(2, 1)
    criandoDataset(datasetTeste)
    trainer.testOnData(datasetTeste, verbose=True)
Esempio n. 17
0
    def train(network_file, input_length, output_length, training_data_file,
              learning_rate, momentum, stop_on_convergence, epochs, classify):
        n = get_network(network_file)
        if classify:
            ds = ClassificationDataSet(int(input_length),
                                       int(output_length) * 2)
            ds._convertToOneOfMany()
        else:
            ds = SupervisedDataSet(int(input_length), int(output_length))
        training_data = get_training_data(training_data_file)

        NetworkManager.last_training_set_length = 0
        for line in training_data:
            data = [float(x) for x in line.strip().split(',') if x != '']
            input_data = tuple(data[:(int(input_length))])
            output_data = tuple(data[(int(input_length)):])
            ds.addSample(input_data, output_data)
            NetworkManager.last_training_set_length += 1

        t = BackpropTrainer(n,
                            learningrate=learning_rate,
                            momentum=momentum,
                            verbose=True)
        print "training network " + network_storage_path + network_file

        if stop_on_convergence:
            t.trainUntilConvergence(ds, epochs)
        else:
            if classify:
                t.trainOnDataset(ds['class'], epochs)
            else:
                t.trainOnDataset(ds, epochs)

        error = t.testOnData()
        print "training done"
        if not math.isnan(error):
            save_network(n, network_file)
        else:
            print "error occured, network not saved"

        print "network saved"

        return error
Esempio n. 18
0
 def build_network(self, dataset, new=True, **kwargs):
     """
     Builds a neural network using the dataset provided.
     Expected keyword args:
         - 'hidden_layers'
         - 'prediction_window'
         - 'learning_rate'
         - 'momentum'
     """
     self.hidden_layers = kwargs.get('hidden_layers', 3)
     self.prediction_window = kwargs.get('prediction_window', 1)
     self.learning_rate = kwargs.get('learning_rate', 0.1)
     self.momentum = kwargs.get('momentum', 0.01)
     if not new:
         self.network.sorted = False
         self.network.sortModules()
         if self.network_dataset_type == SUPERVISED_DATASET:
             self.ready_supervised_dataset(dataset)
         else:
             raise InvalidNetworkDatasetType()
     else:
         if self.network_type == FEED_FORWARD_NETWORK:
             self.network = buildNetwork(len(self.train_data),
                                         self.hidden_layers, 1)
         else:
             raise InvalidNetworkType()
         if self.network_dataset_type == SUPERVISED_DATASET:
             self.ready_supervised_dataset(dataset)
         else:
             raise InvalidNetworkDatasetType()
         if self.trainer_type == BACKPROP_TRAINER:
             self.trainer = BackpropTrainer(self.network,
                                            learningrate=self.learning_rate,
                                            momentum=self.momentum,
                                            verbose=True)
             self.trainer.setData(self.network_dataset)
         else:
             raise InvalidTrainerType()
Esempio n. 19
0
    def __init__(self, **kwargs):

        self.max_depth = 0
        self.stats = {}

        self.calculation_time = float(kwargs.get('time', 1))
        self.max_moves = int(kwargs.get('max_moves', Board.BOARD_SIZE_SQ))

        # Exploration constant, increase for more exploratory moves,
        # decrease to prefer moves with known higher win rates.
        self.C = float(kwargs.get('C', 1.4))

        self.features_num = Board.BOARD_SIZE_SQ * 3 + 2
        self.hidden_neurons_num = self.features_num * 2
        self.net = buildNetwork(self.features_num,
                                self.hidden_neurons_num,
                                2,
                                bias=True,
                                outclass=SigmoidLayer)
        self.trainer = BackpropTrainer(self.net)

        self.total_sim = 0
        self.observation = []
Esempio n. 20
0
def begin2():

    cbf = readFromCsv("cbf2")
    numdataset = np.array(cbf, dtype=np.float64)
    #训练数据,验证数据,今天的数据
    tgdataset, vadataset, tydata = dataSplit(numdataset)
    #归一的参数
    gydata, dmean, dstd = gyData(tgdataset)

    #验证和今天的数据
    gyvadata = calFeature(vadataset, dmean, dstd)
    gytydata = calFeature(tydata, dmean, dstd)

    tset = buildTrainingSet(gyvadata)

    net = NetworkReader.readFrom("../netv3/zxtx_8l_100t_6_0.785714285714.xml")
    trainer = BackpropTrainer(net, tset)
    trainer.trainEpochs(epochs=100)

    li = []
    for ele in gytydata[0]:
        li.append(ele)

    print(dec2int(net.activate(li[:-1])))
Esempio n. 21
0
def main():
    print "Calculating mfcc...."
    mfcc_coeff_vectors_dict = {}
    for i in range(1, 201):
        extractor = FeatureExtractor(
            '/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Happiness/HappinessAudios/' + str(i) + '.wav')
        mfcc_coeff_vectors = extractor.calculate_mfcc()
        mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})

    for i in range(201, 401):
        extractor = FeatureExtractor(
            '/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Sadness/SadnessAudios/' + str(i - 200) + '.wav')
        mfcc_coeff_vectors = extractor.calculate_mfcc()
        mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})

    audio_with_min_frames, min_frames = get_min_frames_audio(
        mfcc_coeff_vectors_dict)
    processed_mfcc_coeff = preprocess_input_vectors(
        mfcc_coeff_vectors_dict, min_frames)
    # frames = min_frames
    # print frames
    # print len(processed_mfcc_coeff['1'])
    # for each_vector in processed_mfcc_coeff['1']:
    #     print len(each_vector)
    print "mffcc found..."
    classes = ["happiness", "sadness"]

    training_data = ClassificationDataSet(
        26, target=1, nb_classes=2, class_labels=classes)
    # training_data = SupervisedDataSet(13, 1)
    try:
        network = NetworkReader.readFrom(
            'network_state_frame_level_new2_no_pp1.xml')
    except:
        for i in range(1, 51):
            mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
            for each_vector in mfcc_coeff_vectors:
                training_data.appendLinked(each_vector, [1])

        for i in range(201, 251):
            mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
            for each_vector in mfcc_coeff_vectors:
                training_data.appendLinked(each_vector, [0])

        training_data._convertToOneOfMany()
        print "prepared training data.."
        print training_data.indim, training_data.outdim
        network = buildNetwork(
            training_data.indim, 5, training_data.outdim, fast=True)
        trainer = BackpropTrainer(network, learningrate=0.01, momentum=0.99)
        print "Before training...", trainer.testOnData(training_data)
        trainer.trainOnDataset(training_data, 1000)
        print "After training...", trainer.testOnData(training_data)
        NetworkWriter.writeToFile(
            network, "network_state_frame_level_new2_no_pp.xml")
def main():
    print "Calculating mfcc...."
    mfcc_coeff_vectors_dict = {}
    for i in range(1, 201):
        extractor = FeatureExtractor('/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Happiness/HappinessAudios/' + str(i) + '.wav')
        mfcc_coeff_vectors = extractor.calculate_mfcc()
        mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})

    for i in range(201, 401):
        extractor = FeatureExtractor('/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Sadness/SadnessAudios/' + str(i - 200) + '.wav')
        mfcc_coeff_vectors = extractor.calculate_mfcc()
        mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})

    audio_with_min_frames, min_frames = get_min_frames_audio(mfcc_coeff_vectors_dict)
    processed_mfcc_coeff = preprocess_input_vectors(mfcc_coeff_vectors_dict, min_frames)
    frames = min_frames
    print "mfcc found...."
    classes = ["happiness", "sadness"]
    try:
        network = NetworkReader.readFrom('network_state_new_.xml')
    except:
        # Create new network and start Training
        training_data = ClassificationDataSet(frames * 26, target=1, nb_classes=2, class_labels=classes)
        # training_data = SupervisedDataSet(frames * 39, 1)
        for i in range(1, 151):
            mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
            training_data.appendLinked(mfcc_coeff_vectors.ravel(), [1])
            # training_data.addSample(mfcc_coeff_vectors.ravel(), [1])

        for i in range(201, 351):
            mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
            training_data.appendLinked(mfcc_coeff_vectors.ravel(), [0])
            # training_data.addSample(mfcc_coeff_vectors.ravel(), [0])

        training_data._convertToOneOfMany()
        network = buildNetwork(training_data.indim, 5, training_data.outdim)
        trainer = BackpropTrainer(network, learningrate=0.01, momentum=0.99)
        print "Before training...", trainer.testOnData(training_data)
        trainer.trainOnDataset(training_data, 1000)
        print "After training...", trainer.testOnData(training_data)
        NetworkWriter.writeToFile(network, "network_state_new_.xml")

    print "*" * 30 , "Happiness Detection", "*" * 30
    for i in range(151, 201):
        output = network.activate(processed_mfcc_coeff[str(i)].ravel())
        # print output,
        # if output > 0.7:
        #     print "happiness"
        class_index = max(xrange(len(output)), key=output.__getitem__)
        class_name = classes[class_index]
        print class_name
Esempio n. 23
0
    def __init__(self, **kwargs):

        self.max_depth = 0
        self.stats = {}

        self.calculation_time = float(kwargs.get('time', 1))
        self.max_moves = int(kwargs.get('max_moves', Board.BOARD_SIZE_SQ))

        # Exploration constant, increase for more exploratory moves,
        # decrease to prefer moves with known higher win rates.
        self.C = float(kwargs.get('C', 1.4))

        self.features_num = Board.BOARD_SIZE_SQ * 3 + 2
        self.hidden_neurons_num = self.features_num * 2
        self.net = buildNetwork(self.features_num, self.hidden_neurons_num, 2, bias=True, outclass=SigmoidLayer)
        self.trainer = BackpropTrainer(self.net)

        self.total_sim = 0
        self.observation = []
Esempio n. 24
0
    def fit(self, inp, y, sample_weight=None):
        self.classes_, y = numpy.unique(y, return_inverse=True)
        self.n_classes_ = len(self.classes_)

        n_features = inp.shape[1]
        random_state = check_random_state(self.random_state)

        # We need to build an ImportanceDataSet from inp, y and sample_weight
        dataset = ImportanceDataSet(n_features, self.n_classes_)
        if sample_weight is None:
            sample_weight = numpy.ones(len(y))
        for x, label_pos, weight in izip(inp, y, sample_weight):
            target = numpy.zeros(self.n_classes_)
            target[label_pos] = 1
            weight = weight * numpy.ones(self.n_classes_)
            dataset.newSequence()
            dataset.addSample(x, target, weight)

        if self.hidden_neurons is None:
            hidden_neurons = (n_features + self.n_classes_)/2
        else:
            hidden_neurons = self.hidden_neurons
        self.network_ = buildNetwork(
            n_features, hidden_neurons, self.n_classes_,
            outclass=self._get_output_class()
        )

        # Set the initial parameters in a repeatable way
        net_params = random_state.random_sample(self.network_.paramdim)
        self.network_.params[:] = net_params

        self.trainer_ = BackpropTrainer(
            self.network_, dataset=dataset, learningrate=self.learning_rate,
            lrdecay=self.lr_decay, momentum=self.momentum,
            weightdecay=self.weight_decay
        )
        self.trainer_.trainUntilConvergence(
            random_state, maxEpochs=self.max_epochs,
            continueEpochs=self.continue_epochs,
            validationProportion=self.validation_percent
        )
        return self
Esempio n. 25
0
def get_trained_ann(dataset, ann=None, test_train_prop=0.25, max_epochs=50):
    tstdata, trndata = dataset.splitWithProportion(test_train_prop)
    trndata._convertToOneOfMany()
    tstdata._convertToOneOfMany()
    if not ann:
        ann = build_ann(trndata.indim, trndata.outdim)


#        ann = build_exp_ann(trndata.indim, trndata.outdim)
#    trainer = RPropMinusTrainer(ann)
    trainer = BackpropTrainer(ann,
                              dataset=trndata,
                              learningrate=0.01,
                              momentum=0.5,
                              verbose=True)
    trnresult = tstresult = 0
    #    for i in range(10):
    trainer.trainUntilConvergence(maxEpochs=max_epochs, verbose=True)
    trnresult = percentError(trainer.testOnClassData(), trndata['class'])
    tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                             tstdata['class'])
    #        print trnresult, tstresult
    return ann, trnresult, tstresult
testdata, traindata = alldata.splitWithProportion(0.25)
testdata._convertToOneOfMany()
traindata._convertToOneOfMany()

print("data dimensions ", len(traindata), traindata.indim, testdata.indim)
print("sample test data input", traindata['input'][0])
print("sample test data target", traindata['target'][0])
print("sample test data class",  traindata["class"][0])

#for key in traindata.data:
#    print(key)

fnn = buildNetwork(traindata.indim, 5, traindata.outdim, outclass = SoftmaxLayer)

trainer = BackpropTrainer(fnn, dataset=traindata, momentum=0.1, verbose=True, weightdecay=0.01)

ticks = arange(-3., 6., 0.2)
X, Y = meshgrid(ticks, ticks)

griddata = ClassificationDataSet(2, 1, nb_classes=3)
for i in range(X.size):
    griddata.addSample([X.ravel()[i],Y.ravel()[i]], [0])

griddata._convertToOneOfMany()

for i in range(20):
    trainer.trainEpochs(1) # usually 5
    
trainresult = percentError(trainer.testOnClassData(), traindata["class"])
testresult = percentError(trainer.testOnClassData(), testdata["class"])
from pybrain.datasets.supervised import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.supervised.trainers.backprop import BackpropTrainer
from pybrain.supervised.trainers.mixturedensity import BackpropTrainerMix

def printNetResult(identifier, net):
    print(identifier, net.activate((0, 0)), net.activate((0, 1)), net.activate((1, 0)), net.activate((1, 1)))    

ds = SupervisedDataSet(2,1)

ds.addSample((0, 0), (0,))
ds.addSample((0, 1), (1,))
ds.addSample((1, 0), (1,))
ds.addSample((1, 1), (0,))

for input, target in ds:
    print(input, target)
    
#net = buildNetwork(2, 3, 1, bias=True, hiddenclass=TanhLayer)#1000
# net = buildNetwork(2, 6, 1, bias=True) # 3000
net = buildNetwork(2, 3, 1, bias=True)

trainer = BackpropTrainer(net, ds)

for i in range(20):
    for j in range(1000):               
        trainer.train()
    printNetResult(i, net)
net.addModule(hiddenLayer0)
net.addModule(hiddenLayer1)
net.addOutputModule(outLayer)

in_to_hidden0 = FullConnection(inLayer, hiddenLayer0)
hidden0_to_hidden1 = FullConnection(hiddenLayer0, hiddenLayer1)
hidden1_to_out = FullConnection(hiddenLayer1, outLayer)

net.addConnection(in_to_hidden0)
net.addConnection(hidden0_to_hidden1)
net.addConnection(hidden1_to_out)

net.sortModules()

#net = buildNetwork(2, 4, 3, 1, bias=True, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, ds, verbose = True, learningrate=0.01)
#trainer = BackpropTrainer(net, ds)

net_output_file = open("hw1data.net",'w')

print net
print >> net_output_file, net

for mod in net.modules:
    print "Module:", mod.name
    print >> net_output_file,"Module:", mod.name
    if mod.paramdim > 0:
        print "--parameters:", mod.params
        print >> net_output_file, "--parameters:", mod.params
    for conn in net.connections[mod]:
        print >> net_output_file, "-connection to", conn.outmod.name
Esempio n. 29
0
    denorm_output = []
#    prediction = [[],[],[]]
    prediction = []
   
    training_normalization(args.f1,args.min,args.max)
    #initialize dataset for neural network with 5 input + bias and 3 target 
    DS = SupervisedDataSet(5,1)

    #adding datasets to the network
    for i in range (0,len(normal_array[0])):
#       DS.addSample([normal_array[0][i],normal_array[1][i],normal_array[2][i],normal_array[3][i],normal_array[4][i]],[normal_array[5][i],normal_array[6][i],normal_array[7][i]])
       DS.addSample([normal_array[0][i],normal_array[1][i],normal_array[2][i],normal_array[3][i],normal_array[4][i]],[normal_array[5][i]])

#    NN = buildNetwork(5,4,3,bias =True,hiddenclass=TanhLayer)
    NN = buildNetwork(DS.indim,5,DS.outdim,bias = True,hiddenclass=TanhLayer)
    TRAINER = BackpropTrainer(NN,dataset=DS,learningrate = 0.01,momentum = 0.99)

    print 'MSE before',TRAINER.testOnData(DS)
    TRAINER.trainOnDataset(DS,500)
    print 'MSE after',TRAINER.testOnData(DS)

# testing 
#clearing arrays
    normal_array           = [[],[],[],[],[],[],[],[]]
    normalized_input  = [[],[],[],[],[]]
    max_array                 = [[],[],[],[],[],[],[],[]]
    min_array                  = [[],[],[],[],[],[],[],[]]   
    range_array              = [[],[],[],[],[],[],[],[]]
    x_axis                          = []
    pred_arr           = []
    act_arr                  = []
Esempio n. 30
0
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers.backprop import BackpropTrainer
from odyseus_model import OdyseusRecursiveTwoThrusters
import numpy as np
import optparse
from tools.common_run import add_common_options

if __name__ == "__main__":
    usage = "python run_pretrain.py resulting_file.neu \n this script pretrain net of model described in|" \
            " OdyseusRecursiveTwoThrusters, for 5 input nodes. Resulting network is saved to desired file"
    parser = optparse.OptionParser(usage=usage)
    opts, args = parser.parse_args()
    ds = SupervisedDataSet(5, 2)
    ds.addSample((50,50,50,0,0),(0,1))
    ds.addSample((0,50,50,50,0),(1,1))
    ds.addSample((0,0,50,0,0), (1,1))
    ds.addSample((0,0,50,50,50),(1,0))
    net = OdyseusRecursiveTwoThrusters.random_net()
    trainer = BackpropTrainer(net, ds)
    trainer.trainEpochs(3600)
    np.savetxt(args[0], trainer.module.params, delimiter=',')
Esempio n. 31
0
class NeuralNetwork(object):
    """
    The neural network class does all the heavy lifting to incorporate pybrain
    neural networks into the NowTrade ecosystem.
    """
    def __init__(self, train_data, prediction_data, network_type=FEED_FORWARD_NETWORK,
                 network_dataset_type=SUPERVISED_DATASET,
                 trainer_type=BACKPROP_TRAINER):
        self.train_data = train_data
        self.prediction_data = prediction_data
        self.network_type = network_type
        self.network_dataset_type = network_dataset_type
        self.trainer_type = trainer_type
        self.network = None
        self.network_dataset = None
        self.dataset = None
        self.trainer = None
        self.trained_iterations = 0
        self.momentum = None
        self.learning_rate = None
        self.hidden_layers = None
        self.prediction_window = None
        self.logger = logger.Logger(self.__class__.__name__)
        self.logger.info('train_data: %s  prediction_data: %s, network_type: %s, \
                          network_dataset_type: %s, trainer_type: %s'
                         %(train_data, prediction_data, network_type, \
                           network_dataset_type, trainer_type))

    def save(self):
        """
        Returns the pickled trained/tested neural network as a string.
        """
        return cPickle.dumps(self)

    def save_to_file(self, filename):
        """
        Saves a neural network to file for later use.

        Look into pybrain.datasets.supervised.SupervisedDataSet.saveToFile()
        http://pybrain.org/docs/api/datasets/superviseddataset.html
        """
        file_handler = open(filename, 'wb')
        cPickle.dump(self, file_handler)
        file_handler.close()

    def build_network(self, dataset, new=True, **kwargs):
        """
        Builds a neural network using the dataset provided.
        Expected keyword args:
            - 'hidden_layers'
            - 'prediction_window'
            - 'learning_rate'
            - 'momentum'
        """
        self.hidden_layers = kwargs.get('hidden_layers', 3)
        self.prediction_window = kwargs.get('prediction_window', 1)
        self.learning_rate = kwargs.get('learning_rate', 0.1)
        self.momentum = kwargs.get('momentum', 0.01)
        if not new:
            self.network.sorted = False
            self.network.sortModules()
            if self.network_dataset_type == SUPERVISED_DATASET:
                self.ready_supervised_dataset(dataset)
            else: raise InvalidNetworkDatasetType()
        else:
            if self.network_type == FEED_FORWARD_NETWORK:
                self.network = buildNetwork(len(self.train_data), self.hidden_layers, 1)
            else: raise InvalidNetworkType()
            if self.network_dataset_type == SUPERVISED_DATASET:
                self.ready_supervised_dataset(dataset)
            else: raise InvalidNetworkDatasetType()
            if self.trainer_type == BACKPROP_TRAINER:
                self.trainer = BackpropTrainer(self.network,
                                               learningrate=self.learning_rate,
                                               momentum=self.momentum,
                                               verbose=True)
                self.trainer.setData(self.network_dataset)
            else: raise InvalidTrainerType()

    def ready_supervised_dataset(self, dataset):
        """
        Ready the supervised dataset for training.

        @TODO: Need to randomize the data being fed to the network.
        See randomBatches() here: http://pybrain.org/docs/api/datasets/superviseddataset.html
        """
        self.network_dataset = SupervisedDataSet(len(self.train_data), 1)
        # Currently only supports log function for normalizing data
        training_values = np.log(dataset.data_frame[self.train_data])
        results = np.log(dataset.data_frame[self.prediction_data].shift(-self.prediction_window))
        training_values['PREDICTION_%s' %self.prediction_data[0]] = results
        training_values = training_values.dropna()
        for _, row_data in enumerate(training_values.iterrows()):
            _, data = row_data
            sample = list(data[:-1])
            result = [data[-1]]
            self.network_dataset.addSample(sample, result)

    def train(self, cycles=1):
        """
        Trains the network the number of iteration specified in the cycles parameter.
        """
        for _ in range(cycles):
            res = self.trainer.train()
            self.trained_iterations += 1
        return res

    def train_until_convergence(self, max_cycles=1000, continue_cycles=10,
                                validation_proportion=0.25):
        """
        Wrapper around the pybrain BackpropTrainer trainUntilConvergence method.

        @see: http://pybrain.org/docs/api/supervised/trainers.html
        """
        self.trainer = \
            self.trainer.trainUntilConvergence(maxEpochs=max_cycles,
                                               continueEpochs=continue_cycles,
                                               validationProportion=validation_proportion)

    def _activate(self, data):
        """
        Activates the network using the data specified.
        Returns the network's prediction.
        """
        return self.network.activate(data)[0]

    def activate_all(self, data_frame):
        """
        Activates the network for all values in the dataframe specified.
        """
        dataframe = np.log(data_frame[self.train_data])
        res = []
        for _, row_data in enumerate(dataframe.iterrows()):
            _, data = row_data
            sample = list(data)
            res.append(self._activate(sample))
        return np.exp(res)
    min_max_scaler = preprocessing.MinMaxScaler()
    train_data = min_max_scaler.fit_transform(train_data)

    #Create the training data
    D = SupervisedDataSet(len(train_data[0]), 1)  #input, target

    for counter, item in enumerate(train_data):
        D.addSample(train_data[counter], answer[counter])

    #print D['target']

    #Create the NN
    N = buildNetwork(len(train_data[0]), 200, 1, bias=True)  #152 76=(152+1)/2

    #Train the NN with backpropagation
    T = BackpropTrainer(N, D, learningrate=0.1, momentum=0.9)

    i = 0
    error = []
    time_before = time.time()
    while i < 50 and T.testOnData(D) > 0.001:
        errordata = T.testOnData(D)

        if i % 1 == 0:
            print i, '\tMSE:', round(errordata, 6), '\tTime:', round(
                time.time() - time_before, 6)

        #Store the error in a list to plot
        error.append(errordata)

        T.train()
Esempio n. 33
0
    #Create the training data
    D = SupervisedDataSet(len(train_data[0]),1) #input, target

    for counter,item in enumerate(train_data):
        D.addSample(train_data[counter], answer[counter])

    #print D['target']


    #Create the NN
    N = buildNetwork(len(train_data[0]),200,1, bias=True) #152 76=(152+1)/2


    #Train the NN with backpropagation
    T = BackpropTrainer(N, D, learningrate = 0.1, momentum = 0.9)

    i=0
    error = []
    time_before = time.time()
    while i < 50 and T.testOnData(D) > 0.001:
        errordata = T.testOnData(D)

        if i % 1 == 0:
            print i, '\tMSE:', round(errordata,6), '\tTime:', round(time.time()-time_before,6)

        #Store the error in a list to plot
        error.append(errordata)

        T.train()
        i += 1
Esempio n. 34
0
    training_normalization(args.f1, args.min, args.max)
    #initialize dataset for neural network with 5 input + bias and 3 target
    DS = SupervisedDataSet(5, 1)

    #adding datasets to the network
    for i in range(0, len(normal_array[0])):
        #       DS.addSample([normal_array[0][i],normal_array[1][i],normal_array[2][i],normal_array[3][i],normal_array[4][i]],[normal_array[5][i],normal_array[6][i],normal_array[7][i]])
        DS.addSample([
            normal_array[0][i], normal_array[1][i], normal_array[2][i],
            normal_array[3][i], normal_array[4][i]
        ], [normal_array[5][i]])

#    NN = buildNetwork(5,4,3,bias =True,hiddenclass=TanhLayer)
    NN = buildNetwork(DS.indim, 5, DS.outdim, bias=True, hiddenclass=TanhLayer)
    TRAINER = BackpropTrainer(NN, dataset=DS, learningrate=0.01, momentum=0.99)

    print 'MSE before', TRAINER.testOnData(DS)
    TRAINER.trainOnDataset(DS, 500)
    print 'MSE after', TRAINER.testOnData(DS)

    # testing
    #clearing arrays
    normal_array = [[], [], [], [], [], [], [], []]
    normalized_input = [[], [], [], [], []]
    max_array = [[], [], [], [], [], [], [], []]
    min_array = [[], [], [], [], [], [], [], []]
    range_array = [[], [], [], [], [], [], [], []]
    x_axis = []
    pred_arr = []
    act_arr = []
Esempio n. 35
0
net.addModule(hiddenLayer0)
net.addModule(hiddenLayer1)
net.addOutputModule(outLayer)

in_to_hidden0 = FullConnection(inLayer, hiddenLayer0)
hidden0_to_hidden1 = FullConnection(hiddenLayer0, hiddenLayer1)
hidden1_to_out = FullConnection(hiddenLayer1, outLayer)

net.addConnection(in_to_hidden0)
net.addConnection(hidden0_to_hidden1)
net.addConnection(hidden1_to_out)

net.sortModules()

#net = buildNetwork(2, 4, 3, 1, bias=True, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, ds, verbose=True, learningrate=0.01)
#trainer = BackpropTrainer(net, ds)

net_output_file = open("hw1data.net", 'w')

print net
print >> net_output_file, net

for mod in net.modules:
    print "Module:", mod.name
    print >> net_output_file, "Module:", mod.name
    if mod.paramdim > 0:
        print "--parameters:", mod.params
        print >> net_output_file, "--parameters:", mod.params
    for conn in net.connections[mod]:
        print >> net_output_file, "-connection to", conn.outmod.name
Esempio n. 36
0
def ANN(
    trainFeature, trainLabel, testFeature, testLabel, netStructure, para_rate, para_momentum
):  # netStructure is a list [in, hidden, out], momentum is a parameter in SGD
    sampleNum = trainFeature.shape[0]
    featureNum = trainFeature.shape[1]
    Dataset = SupervisedDataSet(featureNum, 1)
    i = 0
    while i < sampleNum:
        print(i)
        Dataset.addSample(list(trainFeature[i]), [trainLabel[i]])
        i += 1
    Network = buildNetwork(
        netStructure[0],
        netStructure[1],
        netStructure[2],
        netStructure[3],
        hiddenclass=SigmoidLayer,
        outclass=SigmoidLayer,
    )
    T = BackpropTrainer(Network, Dataset, learningrate=para_rate, momentum=para_momentum, verbose=True)
    # print(Dataset['input'])
    errorList = []
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    while abs(T.testOnData(Dataset) - errorList[-1]) > 0.0001:
        T.trainOnDataset(Dataset)
        errorList.append(T.testOnData(Dataset))
    pass  # this step is for the output of predictedLabel
    print(np.array([Network.activate(x) for x in trainFeature]))
    # print(testLabel)
    print(Network.activate([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
    return errorList
Esempio n. 37
0
	while len(b) < len(r):
		b = [0] + b

	a = a[::-1]
	b = b[::-1]
	r = r[::-1]

	ds.newSequence()
	for i in range(len(a)):
		inl = [a[i], b[i]]
		out = [r[i]]
		ds.appendLinked(inl, out)

#trainer = RPropMinusTrainer(n, dataset = ds)
trainer = BackpropTrainer(n, dataset = ds)

print("Generating dataset took", time()-start)

lastlen = 0

start = time()

try:
	while True:
		epochstart = time()
		error = trainer.train()
		tpe = time()-epochstart
		epochs += 1
		out = str(error) + " error " + str(epochs) + " epochs " + str(tpe) + " time per epoch"
		clearspaces = " "*(lastlen-len(out))
Esempio n. 38
0
    return n

n = buildNetwork(mx_lag, mx_lag/2, 1, hiddenclass=LSTMLayer, bias=True, recurrent=True)
#n.randomize()

ts = get_sysprice_list('2011-01-01 00:00:00', '2013-05-31 23:00:00', frequency='daily')
ds = NN_data(ts, mx_lag)[0]
forecast_actual_data = get_sysprice_list('2013-06-01 00:00:00', '2013-06-18 23:00:00', frequency='daily')
'''min_by_mae = [1000, 1000]
min_by_mse = [1000,1000]
best_net_by_mse = None
best_net_by_mae = None'''

#n=pickle.load(open("/home/martin/dev/python_pickle/min_by_mae_nn.p", "rb"))
#n = mk_nn(10)
trainer = BackpropTrainer(n, ds, verbose=True)
trainer.trainUntilConvergence(maxEpochs=5)
forecast = NN_forecast("2013-05-01", "2013-05-31", max_lag=mx_lag, trained_nnet = n, forecast_period_in_days=31)
print_errors(forecast.values, forecast_actual_data.values)
#start timing
'''limit = 120 # seconds
start = time.clock()
while(True):
    duration = time.clock()- start
    if duration >= limit:
        print "execution time: " + str(duration) + "seconds"
        break
    n = mk_nn(10)
    trainer = BackpropTrainer(n, ds, verbose=True, weightdecay=0.01)

#trainer.trainUntilConvergence()
Esempio n. 39
0
class PyBrainNetwork(ANNWrapper):

    def __init__(self, hidden_neurons=None, output_class="softmax",
                 learning_rate=0.01, lr_decay=1.0, momentum=0.0,
                 weight_decay=0.0, max_epochs=None, continue_epochs=10,
                 validation_percent=0.25, random_state=None):
        super(PyBrainNetwork, self).__init__()
        self.hidden_neurons = hidden_neurons
        self.output_class = output_class
        self.learning_rate = learning_rate
        self.lr_decay = lr_decay
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.max_epochs = max_epochs
        self.continue_epochs = continue_epochs
        self.validation_percent = validation_percent
        self.random_state = random_state
        self.network_ = None
        self.trainer_ = None

    def fit(self, inp, y, sample_weight=None):
        self.classes_, y = numpy.unique(y, return_inverse=True)
        self.n_classes_ = len(self.classes_)

        n_features = inp.shape[1]
        random_state = check_random_state(self.random_state)

        # We need to build an ImportanceDataSet from inp, y and sample_weight
        dataset = ImportanceDataSet(n_features, self.n_classes_)
        if sample_weight is None:
            sample_weight = numpy.ones(len(y))
        for x, label_pos, weight in izip(inp, y, sample_weight):
            target = numpy.zeros(self.n_classes_)
            target[label_pos] = 1
            weight = weight * numpy.ones(self.n_classes_)
            dataset.newSequence()
            dataset.addSample(x, target, weight)

        if self.hidden_neurons is None:
            hidden_neurons = (n_features + self.n_classes_)/2
        else:
            hidden_neurons = self.hidden_neurons
        self.network_ = buildNetwork(
            n_features, hidden_neurons, self.n_classes_,
            outclass=self._get_output_class()
        )

        # Set the initial parameters in a repeatable way
        net_params = random_state.random_sample(self.network_.paramdim)
        self.network_.params[:] = net_params

        self.trainer_ = BackpropTrainer(
            self.network_, dataset=dataset, learningrate=self.learning_rate,
            lrdecay=self.lr_decay, momentum=self.momentum,
            weightdecay=self.weight_decay
        )
        self.trainer_.trainUntilConvergence(
            random_state, maxEpochs=self.max_epochs,
            continueEpochs=self.continue_epochs,
            validationProportion=self.validation_percent
        )
        return self

    def activate(self, x):
        return self.network_.activate(x)

    def _get_output_class(self):
        if self.output_class == "softmax":
            return SoftmaxLayer
        elif self.output_class == "tanh":
            return TanhLayer
        elif self.output_class == "sigmoid":
            return SigmoidLayer
        elif self.output_class == "linear":
            return LinearLayer
        else:
            raise ValueError(
                "output_class can be: softmax, tanh, sigmoid, linear"
            )
Esempio n. 40
0
# 将数据扁平化
X = datasets.reshape((datasets.shape[0], datasets.shape[1] * datasets.shape[2]))
X_train ,X_test, y_train, y_test = train_test_split(X, y, train_size=0.9)

# 添加数据到数据格式中
training =SupervisedDataSet(X.shape[1], y.shape[1])
for i in range(X_train.shape[0]):
    training.addSample(X_train[i], y_train[i])
testing = SupervisedDataSet(X.shape[1], y.shape[1])
for i in range(X_test.shape[0]):
    testing.addSample(X_test[i], y_test[i])

# 搭建三层网络
net = buildNetwork(X.shape[1], 150, y.shape[1] ,bias=True)
# 使用BP算法
trainer = BackpropTrainer(net, training ,weightdecay=0.01)
# 训练步数
trainer.trainEpochs(epochs=50)
# 保存模型
# model_filename = open('CAPTCHA_predictor.model','wb')
# pickle.dump(trainer,model_filename,0)
# model_filename.close()

predictions = trainer.testOnClassData(dataset=testing)

from sklearn.metrics import f1_score,classification_report
print(classification_report(y_test.argmax(axis=1), predictions))


Esempio n. 41
0
        f_input_cost.write("%f %f\n" % (x, z_cost))
    f_input.close()
    f_input_cost.close()

    eval_dataset = SupervisedDataSet(1, 1)
    eval_costset = SupervisedDataSet(1, 1)
    for i in range(RANDOM_TRAINING_SAMPLES):
        x = random.uniform(-3, 3)
        z = fn(x)
        z_cost = cost_fn(x)
        eval_dataset.addSample([x], [z])
        eval_costset.addSample([x], [z_cost])

    value_network = buildNetwork(1, 20, 1, hiddenclass=SigmoidLayer, bias=True)
    value_trainer = BackpropTrainer(value_network,
                                    learningrate=0.01,
                                    momentum=0.90,
                                    verbose=True)

    print value_network

    cost_network = buildNetwork(1,
                                40,
                                20,
                                1,
                                hiddenclass=SigmoidLayer,
                                bias=True)
    cost_trainer = BackpropTrainer(cost_network,
                                   learningrate=0.01,
                                   momentum=0.00,
                                   verbose=True)
Esempio n. 42
0
'''
Created on Nov 21, 2011

@author: reza
'''
from pybrain.datasets.supervised import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers.backprop import BackpropTrainer

if __name__ == '__main__':
    dataset = SupervisedDataSet(2, 1)
    dataset.addSample([0, 0], [0])
    dataset.addSample([0, 1], [1])
    dataset.addSample([1, 0], [1])
    dataset.addSample([1, 1], [0])
    
    network = buildNetwork(2, 4, 1)
    trainer = BackpropTrainer(network, learningrate = 0.01, momentum = 0.2,
                              verbose = False)
    
    print 'MSE before', trainer.testOnData(dataset)
    trainer.trainOnDataset(dataset, 1000)
    print 'MSE after', trainer.testOnData(dataset)
    
    z = network.activate([0, 0])
    print z
    
    print 'Final Weights: ', network.params
    
    
Esempio n. 43
0
hiddenLayerTwo = SigmoidLayer(4, "two")
outLayer = LinearLayer(1)
inToHiddenOne = FullConnection(inLayer, hiddenLayerOne)
hiddenOneToTwo = FullConnection(hiddenLayerOne, hiddenLayerTwo)
hiddenTwoToOut = FullConnection(hiddenLayerTwo, outLayer)

#wire the layers and connections to a net
net = FeedForwardNetwork()
net.addInputModule(inLayer)
net.addModule(hiddenLayerOne)
net.addModule(hiddenLayerTwo)
net.addOutputModule(outLayer)
net.addConnection(inToHiddenOne)
net.addConnection(hiddenOneToTwo)
net.addConnection(hiddenTwoToOut)
net.sortModules()

print(net)

trainer = BackpropTrainer(net, ds)

for i in range(20):
    for j in range(1000):
        trainer.train()
    printNetResult(i, net)

print(net)
print(inToHiddenOne.params)
print(hiddenOneToTwo.params)
print(hiddenTwoToOut.params)
Esempio n. 44
0
def main():

    # carregando o dataset do iris
    dataset = montaDataset()

    # carregando os datasets temporários
    # separando 60% dos dados
    # para treino e 30 % para teste
    datasetTreinoTemporario, dadosRepartidosTemporario = dataset.splitWithProportion(
        0.6)

    # carregando os datasets temporários
    # separando os dados repartidos
    # em 50 % para teste e
    # os outros 50 % para validação
    datasetTesteTemporario, datasetValidacaoTemporario = dadosRepartidosTemporario.splitWithProportion(
        0.5)

    # montandos os datasets finais
    datasetTreino = montaDatasetConvertido(datasetTreinoTemporario)
    datasetTeste = montaDatasetConvertido(datasetTesteTemporario)
    datasetValidacao = montaDatasetConvertido(datasetValidacaoTemporario)

    # definindo a estrutura da rede adpatadas ao SoftmaxLayer
    # com dimensão 4 de entrada
    # com 20 neurônios na 1 camada intermediária
    # com 2 camadas
    # com dimensão 3 na saída
    # terá como saída adaptada à classe SoftmaxLayer
    network = buildNetwork(4, 20, 2, 3, outclass=SoftmaxLayer)

    # convertendo os dados para o objeto ConvertToOneOfMany
    datasetTreino._convertToOneOfMany()
    datasetTeste._convertToOneOfMany()
    datasetValidacao._convertToOneOfMany()

    # criando a rede neural treinada
    # passando a estrutura da rede neural
    # dataset para treino
    neuralNetwork = BackpropTrainer(network,
                                    dataset=datasetTreino,
                                    learningrate=0.02,
                                    momentum=0.14,
                                    verbose=True)

    # treinando a rede até ela convergir
    # e semparando os erros em um array de erro do treino
    # e um array dos erros da validação
    errosTreino, validacaoErros = neuralNetwork.trainUntilConvergence(
        dataset=datasetTreino, maxEpochs=1000)

    # criando a camada externa de teste
    outTest = network.activateOnDataset(datasetTeste).argmax(axis=1)
    print(5 * "-" + " Precisão de teste de treinamento " + 5 * "-" + "\n")
    print("Precisão no teste : {} %".format(
        100 - percentError(outTest, datasetTeste["class"])))

    # criando a camada externa de validação
    outVal = network.activateOnDataset(datasetValidacao).argmax(axis=1)
    print("\n" + 5 * "-" + " Precisão de teste de treinamento " + 5 * "-" +
          "\n")
    print("Precisão na validação : {} %".format(
        100 - percentError(outVal, datasetTeste["class"])))

    # fazendo o plot gráfico dos erros
    plt.plot(errosTreino, "b", validacaoErros, "r")
    plt.show()
Esempio n. 45
0
n.sortModules()


print 'build set'

alldata = ClassificationDataSet(dim, 1, nb_classes=2)

(data,label,items) = BinReader.readData(ur'F:\AliRecommendHomeworkData\1212新版\train15_17.expand.samp.norm.bin') 
#(train,label,data) = BinReader.readData(r'C:\data\small\norm\train1217.bin')
for i in range(len(data)):
    alldata.addSample(data[i],label[i])

tstdata, trndata = alldata.splitWithProportion(0.25)

trainer = BackpropTrainer(n,trndata,momentum=0.1,verbose=True,weightdecay=0.01)

print 'start'
#trainer.trainEpochs(1)
trainer.trainUntilConvergence(maxEpochs=2)
trnresult = percentError(trainer.testOnClassData(),trndata['class'])

tstresult = percentError(trainer.testOnClassData(dataset=tstdata), tstdata['class'])

print "epoch: %4d" % trainer.totalepochs, \
        "  train error: %5.2f%%" % trnresult, \
        "  test error: %5.2f%%" % tstresult

print 'get result'

#trainer.trainUntilConvergence()
from pybrain.datasets.supervised import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers.backprop import BackpropTrainer


dataset = SupervisedDataSet(2, 1)

dataset.addSample([1, 1], [0])
dataset.addSample([1, 0], [1])
dataset.addSample([0, 1], [1])
dataset.addSample([0, 0], [0])

network = buildNetwork(dataset.indim, 2, dataset.outdim, bias=True)

trainer = BackpropTrainer(network, dataset, learningrate=0.01, momentum=0.99)

for epoch in range(1000):
    trainer.train()

test_data = SupervisedDataSet(2, 1)
test_data.addSample([1, 1], [0])
test_data.addSample([1, 0], [1])
test_data.addSample([0, 1], [1])
test_data.addSample([0, 0], [0])

trainer.testOnData(test_data, verbose=True)
Esempio n. 47
0
print

#PYBRAIN
from pybrain.tools.shortcuts import buildNetwork
from pybrain import LinearLayer, SigmoidLayer, FeedForwardNetwork, FullConnection, BiasUnit, SoftmaxLayer
from pybrain.supervised.trainers.backprop import BackpropTrainer
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.datasets import SupervisedDataSet


ds = SupervisedDataSet(2,1 )

ds.addSample((0, 0), (0,))
ds.addSample((0, 1), (1,))
ds.addSample((1, 0), (1,))
ds.addSample((1, 1), (0,))


net = buildNetwork(2, 2, 1, bias=True, outputbias= True, hiddenclass=SigmoidLayer)
trainer = BackpropTrainer(net, ds, learningrate= 0.1)

t1 = time()
trainer.trainEpochs(2000)
print "Time PyBrain {}".format(time()-t1)

#PRINT RESULTS
for x in X:
    print "{} ==> {}".format( x, net.activate(x) )

Esempio n. 48
0
# Run:
# python2 feed_forward_neural_network.py X,
# where X is the desired amount of hidden nodes
from sys import argv
hidden = int(argv[1])  # X

# Expects one dimensional input and target output
ds = SupervisedDataSet(1, 1)
for x in range(1, 9):
    ds.addSample(x, x)

# 1 input, X hidden, 1 output
network = buildNetwork(1, hidden, 1, hiddenclass=TanhLayer)

# Init BackpropTrainer
trainer = BackpropTrainer(network, dataset=ds)

# Train until convergence
trainer.trainUntilConvergence(verbose=False,
                              validationData=0.15,
                              maxEpochs=1000,
                              continueEpochs=10)

# Activating the network on different integers such as the inputs in the data-set
print("// Hidden nodes: {}".format(hidden))
for x in range(1, 9):
    print("{} --> {}".format(x, network.activate([x])[0]))

# Activating with decimal inputs outside of dataset
print("\nDecimal input outside dataset:")
for x in range(100, 109):
Esempio n. 49
0
 def train(self, x_data, y_data):
     trainer = BackpropTrainer(self.net, self._prepare_dataset(x_data, y_data))
     trainer.train()
Esempio n. 50
0
def base_experiment():
    (eval_dataset, eval_costset) = DomainFnApprox.make_evaluation_datasets()

    random_train_dataset = SupervisedDataSet(2, 1)
    random_train_costset = SupervisedDataSet(2, 1)
    for i in range(RANDOM_TRAINING_SAMPLES):
        x = random.uniform(X_MIN, X_MAX)
        y = random.uniform(Y_MIN, Y_MAX)
        z = FN(x, y)
        z_cost = COST_FN(x, y)
        random_train_dataset.addSample([x, y], [z])
        random_train_costset.addSample([x, y], [z_cost])

    value_network = buildNetwork(2,
                                 80,
                                 20,
                                 1,
                                 hiddenclass=SigmoidLayer,
                                 bias=True)
    value_trainer = BackpropTrainer(value_network,
                                    learningrate=LEARNING_RATE,
                                    momentum=MOMENTUM,
                                    verbose=True)

    print 'Value Network Topology:'
    print value_network

    cost_network = buildNetwork(2,
                                80,
                                20,
                                1,
                                hiddenclass=SigmoidLayer,
                                bias=True)
    cost_trainer = BackpropTrainer(cost_network,
                                   learningrate=LEARNING_RATE,
                                   momentum=MOMENTUM,
                                   verbose=True)

    #    test_derivatives(value_network, [1, 1])
    #    test_derivatives(cost_network, [1, 1])

    print 'Value MSE before: %.4f' % value_trainer.testOnData(eval_dataset)
    value_trainer.trainUntilConvergence(random_train_dataset,
                                        continueEpochs=6,
                                        maxEpochs=MAX_EPOCHS)
    #    value_trainer.trainOnDataset(random_train_dataset, 1000)
    print 'Value MSE after: %.4f' % value_trainer.testOnData(eval_dataset)

    print 'Cost MSE before: %.4f' % cost_trainer.testOnData(eval_costset)
    cost_trainer.trainUntilConvergence(random_train_costset,
                                       continueEpochs=6,
                                       maxEpochs=MAX_EPOCHS)
    #    cost_trainer.trainOnDataset(random_train_costset, 1000)
    print 'Cost MSE after: %.4f' % cost_trainer.testOnData(eval_costset)

    #    test_derivatives(value_network, [1, 1])
    #    test_derivatives(cost_network, [1, 1])

    f_value = open('../data/learnedvalue.txt', 'w')
    f_cost = open('../data/learnedcost.txt', 'w')
    unit = (X_MAX - X_MIN) / (EVAL_SAMPLES_AXIS - 1)
    for i in range(EVAL_SAMPLES_AXIS):
        for j in range(EVAL_SAMPLES_AXIS):
            x = X_MIN + i * unit
            y = Y_MIN + j * unit
            z = value_network.activate([x, y])
            z_cost = cost_network.activate([x, y])
            f_value.write('%f %f %f\n' % (x, y, z[0]))
            f_cost.write('%f %f %f\n' % (x, y, z_cost[0]))
    f_value.close()
    f_cost.close()
Esempio n. 51
0
class StrategyANN(Strategy):

    def __init__(self, features_num, hidden_neurons_num):
        super().__init__()
        self.is_learning = True

        self.features_num = features_num
#         self.net = buildNetwork(features_num, hidden_neurons_num, 1, bias = True)
#         self.net = buildNetwork(features_num, hidden_neurons_num, hidden_neurons_num, 1, bias = True)
#         self.net = ConvolutionalBoardNetwork(Board.BOARD_SIZE, 5, 3)
#         self.trainer = BackpropTrainer(self.net)
        
        self.net_attack = buildNetwork(features_num, hidden_neurons_num, hidden_neurons_num, 1, bias = True)
        self.net_defence = buildNetwork(features_num, hidden_neurons_num, hidden_neurons_num, 1, bias = True)
        self.trainer_attack = BackpropTrainer(self.net_attack)
        self.trainer_defence = BackpropTrainer(self.net_defence)
                
        self.gamma = 0.9
        self.errors = []
        self.buf = np.zeros(200)
        self.buf_index = 0
        self.setup()        
        
        
    def update_at_end(self, old, new):
        if not self.needs_update():
            return
                
        if new.winner == Board.STONE_EMPTY:
            reward = 0
        else:
            reward = 2 if self.stand_for == new.winner else -2
        
        if old is None:
            if self.prev_state is not None:
                self._update_impl(self.prev_state, new, reward)
        else:    
            self._update_impl(old, new, reward)
    

    def update(self, old, new):
        if not self.needs_update():
            return
        
        if self.prev_state is None:
            self.prev_state = old
            return       
        
        if new is None:
            self._update_impl(self.prev_state, old, 0)
        
        self.prev_state = old
 
          
    def _update_impl(self, old, new, reward):
        old_input = self.get_input_values(old)

        v1_a = self.net_attack.activate(self.get_input_values(new))
        target = self.gamma * v1_a
        
        ds_a = SupervisedDataSet(self.features_num, 1)
        ds_a.addSample(old_input, target + max(0, reward))
        ds_d = SupervisedDataSet(self.features_num, 1)
        ds_d.addSample(old_input, target + min(0, reward))
#         self.trainer.setData(ds)
#         err = self.trainer.train()
        self.trainer_attack.setData(ds_a)
        self.trainer_attack.train()
        self.trainer_defence.setData(ds_d)
        self.trainer_defence.train()
        
#         self.buf[self.buf_index] = err
#         self.buf_index += 1
#         if self.buf_index >= self.buf.size:
#             if len(self.errors) < 2000:
#                 self.errors.append(np.average(self.buf))
#             self.buf.fill(0)
#             self.buf_index = 0
            

    def board_value(self, board, context):
        iv = self.get_input_values(board)
#         return self.net.activate(iv)
        return self.net_attack.activate(iv), self.net_defence.activate(iv)
    
    def _decide_move(self, moves):
        best_move_a, best_av = None, None
        best_move_d, best_dv = None, None
        for m in moves:
            iv = self.get_input_values(m)
            av, dv = self.net_attack.activate(iv), self.net_defence.activate(iv)
            if best_av is None or best_av < av:
                best_move_a, best_av = m, av
            if best_dv is None or best_dv < dv:
                best_move_d, best_dv = m, dv
        return best_move_a if best_av >= best_dv else best_move_d
            

    def preferred_board(self, old, moves, context):
        if not moves:
            return old
        if len(moves) == 1:
            return moves[0]

        if np.random.rand() < self.epsilon:  # exploration
            the_board = random.choice(moves)
            the_board.exploration = True
            return the_board
        else:
#             board_most_value = max(moves, key=lambda m: self.board_value(m, context))            
#             return board_most_value
            return self._decide_move(moves)
        

    def get_input_values(self, board):
        '''
        Returns:
        -----------
        vector: numpy.1darray
            the input vector
        '''
#         print('boar.stone shape: ' + str(board.stones.shape))
        v = board.stones
#         print('vectorized board shape: ' + str(v.shape))

#         print('b[%d], w[%d]' % (black, white))
        iv = np.zeros(v.shape[0] * 2 + 2)
  
        iv[0:v.shape[0]] = (v == Board.STONE_BLACK).astype(int)
        iv[v.shape[0]:v.shape[0] * 2] = (v == Board.STONE_WHITE).astype(int)
        who = board.whose_turn_now()
        iv[-2] = 1 if who == Board.STONE_BLACK else 0  # turn to black move
        iv[-1] = 1 if who == Board.STONE_WHITE else 0  # turn to white move
#         print(iv.shape)
#         print(iv)
        return iv

    def save(self, file):
        pass

    def load(self, file):
        pass

    def setup(self):
        self.prev_state = None
    
    def mind_clone(self):
        pass
Esempio n. 52
0
#PYBRAIN
from pybrain.tools.shortcuts import buildNetwork
from pybrain import LinearLayer, SigmoidLayer, FeedForwardNetwork, FullConnection, BiasUnit, SoftmaxLayer
from pybrain.supervised.trainers.backprop import BackpropTrainer
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.datasets import SupervisedDataSet

ds = SupervisedDataSet(2, 1)

ds.addSample((0, 0), (0, ))
ds.addSample((0, 1), (1, ))
ds.addSample((1, 0), (1, ))
ds.addSample((1, 1), (0, ))

net = buildNetwork(2,
                   2,
                   1,
                   bias=True,
                   outputbias=True,
                   hiddenclass=SigmoidLayer)
trainer = BackpropTrainer(net, ds, learningrate=0.1)

t1 = time()
trainer.trainEpochs(2000)
print "Time PyBrain {}".format(time() - t1)

#PRINT RESULTS
for x in X:
    print "{} ==> {}".format(x, net.activate(x))
Esempio n. 53
0
class MonteCarlo(object):
    def __init__(self, **kwargs):

        self.max_depth = 0
        self.stats = {}

        self.calculation_time = float(kwargs.get('time', 1))
        self.max_moves = int(kwargs.get('max_moves', Board.BOARD_SIZE_SQ))

        # Exploration constant, increase for more exploratory moves,
        # decrease to prefer moves with known higher win rates.
        self.C = float(kwargs.get('C', 1.4))

        self.features_num = Board.BOARD_SIZE_SQ * 3 + 2
        self.hidden_neurons_num = self.features_num * 2
        self.net = buildNetwork(self.features_num, self.hidden_neurons_num, 2, bias=True, outclass=SigmoidLayer)
        self.trainer = BackpropTrainer(self.net)

        self.total_sim = 0
        self.observation = []


    def select(self, board, moves, who, **kwargs):
        # Bail out early if there is no real choice to be made.
        if not moves:
            return
        if len(moves) == 1:
            return moves[0]

        if Game.on_training:
            self.calculation_time = 60
        else:
            self.calculation_time = 1

        self.max_depth = 0
        self.stats = {}
        games = 0
        begin = time.time()
        while time.time() - begin < self.calculation_time:
            self.sim(board)
            games += 1
            if games > 10:
                break

        self.stats.update(games=games, max_depth=self.max_depth, time=str(time.time() - begin))
        print(self.stats['games'], self.stats['time'])

        move, _ = self.get_best(board, moves, who)
        return move


    def sim(self, board):
        visited_path = []
        state = board
        winner = Board.STONE_EMPTY
        for _ in range(1, self.max_moves + 1):
            moves, player, _ = Game.possible_moves(state)
            state_new, state_new_val = self.get_best(state, moves, player)
            visited_path.append((player, state, state_new, state_new_val))
            over, winner, _ = state_new.is_over(state)
            if over:
                break
            state = state_new

        self.total_sim += 1

        ds = SupervisedDataSet(self.features_num, 2)
        for player, state, new, val in visited_path:
            plays = val[1] * self.total_sim + 1
            wins = val[0] * self.total_sim
            if player == winner:
                wins += 1
            ds.addSample(self.get_input_values(state, new, player), (wins, plays))
        self.trainer.trainOnDataset(ds)


    def get_best(self, state, moves, who):
        outputs = []
        for s in moves:
            out = self.net.activate(self.get_input_values(state, s, who))
            outputs.append(out)
        a = np.array(outputs)
        b = a[:, 0] / a[:, 1] + self.C * np.log(np.sum(a[:, 1])) / a[:, 1]
        i = np.argmax(b)
        return moves[i], a[i]


    def get_input_values(self, board, new_board, who):
        v = board.stones
        sz = v.shape[0]
        iv = np.zeros(self.features_num)
        iv[0:sz] = (v == Board.STONE_BLACK).astype(int)
        iv[sz:sz * 2] = (v == Board.STONE_WHITE).astype(int)
        iv[sz * 2:sz * 3] = (new_board.stones != v).astype(int)
        iv[-2] = 1 if who == Board.STONE_BLACK else 0  # turn to black move
        iv[-1] = 1 if who == Board.STONE_WHITE else 0  # turn to white move
        return iv


    def swallow(self, who, st0, st1, **kwargs):
        self.observation.append((who, st0, st1))

    def absorb(self, winner, **kwargs):
        self.total_sim += 1

        ds = SupervisedDataSet(self.features_num, 2)
        for who, s0, s1 in self.observation:
            if who != Board.STONE_BLACK:
                continue
            input_vec = self.get_input_values(s0, s1, who)
            val = self.net.activate(input_vec)
            plays = val[1] * self.total_sim + 1
            wins = val[0] * self.total_sim
            if who == winner:
                wins += 1
            ds.addSample(input_vec, (wins, plays))
        self.trainer.trainOnDataset(ds)

    def void(self):
        self.observation = []
Esempio n. 54
0
    return ls;



#读入字符串
dataset = readFromCsv("cbf");
#化为float
numdataset = np.array(dataset,dtype=np.float64);
#原始分割为两组
trainingset,vdataset = dataSplit(numdataset);
# print(len(trainingset),len(vdataset));
#分别归一化
gytdataset = gyData(trainingset);
gyvdataset = gyData(vdataset);




#下面的是训练神经网络

# #最终的训练集,用归一化的数据来构成训练集
bts = buildTrainingSet(gytdataset);
# ll = [3382.9879,3384.0262,3358.7953,3373.3446,179423841,2.31148615058,4.4,4.4,4.35,4.36,0.4556,4518585,19794038.0,4363744000.0,4363744000.0];
# print(calTodayFeature(ll,trainingset));
net = buildNetwork(15, 4, 2, bias=True,hiddenclass=SigmoidLayer,outclass=SigmoidLayer)
trainer = BackpropTrainer(net, bts)
trainer.trainEpochs(epochs=100);
NetworkWriter.writeToFile(net, '../net/jxkj_4l_100t.xml')
#

print(ve.calRightRate(gyvdataset,net));
Esempio n. 55
0
class NeuralNetwork(object):
    '''Neural network wrapper for the pybrain implementation
    '''

    def __init__(self):
        self.path = os.path.dirname(os.path.abspath(__file__)) + "/../../data/"
        self.net = None
        self.trainer = None

    def createNew(self, nInputs, nHiddenLayers, nOutput, bias):
        '''builds a new neural network
        
        :param int nInputs: the number of input nodes
        :param int nHiddenLayers: the number of hidden layers
        :parma int nOutputs: the number of output nodes
        :param bool bias: if True an bias node will be added
        
        :return: instance of a new neural network
        :rtype: NeuralNetwork
        '''
        self.net = buildNetwork(nInputs, nHiddenLayers, nOutput, bias=bias, hiddenclass=TanhLayer)
        return self

    def train(self, dataset, maxEpochs = 10, learningrate = 0.01, momentum = 0.99, continueEpochs=10, validationProportion=0.25):
        '''trains a network with the given dataset
        
        :param SupervisedDataSet dataset: the training dataset
        :param int maxEpochs: max number of iterations to train the network
        :parma float learningrate: helps to 
        :param float momentum: helps out of local minima while training, to get better results
        '''
        self.trainer = BackpropTrainer(self.net, learningrate = learningrate, momentum = momentum)
        self.trainer.trainOnDataset(dataset, maxEpochs)

    def trainConvergence(self, dataset, maxEpochs = 10, learningrate = 0.01, momentum = 0.99, continueEpochs=10, validationProportion=0.25):
        '''trains a network with the given dataset nutil it converges
        
        :param SupervisedDataSet dataset: the training dataset
        :param int maxEpochs: max number of iterations to train the network
        :parma float learningrate: helps to 
        :param float momentum: helps out of local minima while training, to get better results
        '''
        self.trainer = BackpropTrainer(self.net, learningrate = learningrate, momentum = momentum)
        self.trainer.trainUntilConvergence(dataset, maxEpochs, False, continueEpochs, validationProportion)

    def test(self, data=None, verbose=False):
        if not self.trainer:
            raise ValueError("call train() first, to create a valid trainer object") 
        return self.trainer.testOnData(data, verbose)

    def activate(self, value, rnd=False):
        inpt = self.net.activate(value)[0]
        if rnd:
            return self._clazz(inpt)
        return inpt

    def _clazz(self, inpt):
        clazz = round(inpt)
        if (clazz < 0):
            return 0
        if (clazz > 1):
            return 1
        return int(clazz)

    def save(self, name):
        '''saves the neural network
        
        :param string name: filename for the to be saved network
        '''
        f = open(self.path + name + FILE_EXTENSION, 'w')
        pickle.dump(self.net, f)
        f.close()

    def load(self, name):
        '''loades the neural network
        
        :param string name: filename for the to be loaded network
        
        :return: instance of a saved neural network
        :rtype: NeuralNetwork
        '''
        f = open(self.path + name + FILE_EXTENSION, 'r')
        self.net = pickle.load(f)
        f.close()
        return self

    def __repr__(self):
        return "%s\n%s" % (self.__class__.__name__, str(self.net))
Esempio n. 56
0
        net.reset()
        for i, t in seq:
            res = net.activate(i)
            if verbose:
                print(t, res)
            r += sum((t-res)**2)
            samples += 1
        if verbose:
            print('-'*20)
    r /= samples
    if not silent:
        print('MSE:', r)
    return r

if __name__ == "__main__":
    N = buildParityNet()
    DS = ParityDataSet()
    evalRnnOnSeqDataset(N, DS, verbose = True)
    print('(preset weights)')
    N.randomize()
    evalRnnOnSeqDataset(N, DS)
    print('(random weights)')


    # Backprop improves the network performance, and sometimes even finds the global optimum.
    N.reset()
    bp = BackpropTrainer(N, DS, verbose = True)
    bp.trainEpochs(5000)
    evalRnnOnSeqDataset(N, DS)
    print('(backprop-trained weights)')
Esempio n. 57
0
__author__ = 'Stubborn'


from pybrain.datasets.supervised import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers.backprop import BackpropTrainer

D = SupervisedDataSet(2, 1)
# 2 imput --> 1 output

D.addSample([0,0], [0])
D.addSample([0,1], [1])
D.addSample([1,0], [1])
D.addSample([1,1], [0])
# 4 kombinationer av input och dess output "OR" funktion

N = buildNetwork(2, 4, 1)
# multilayer perception? med 1 gomt lager

T = BackpropTrainer(N, learningrate = 0.01, momentum = 0.99)
# momentum = reduced learningrate

print (('MSE before'), T.testOnData(D))
T.trainOnDataset(D, 1000)
T.trainUntilConvergence()
print (('MSE after'), T.testOnData(D))
print D