def test_train(self, epochs=1):
		print("Training...")

		split = int(len(self.samples) * 0.7)
		train_samples = self.samples[0:split]
		train_labels  = self.labels[0:split]

		test_samples = self.samples[split:]
		test_labels  = self.labels[split:]

		net = buildNetwork(300, 300, 1)	
		ds = SupervisedDataSet(300, 1)
		for i in range(len(train_samples)):  
			ds.addSample(tuple(np.array(train_samples[i], dtype='float64')), (train_labels[i],))
		
		trainer = BackpropTrainer(net, ds, verbose=True)
		trainer.trainEpochs(epochs)
		self.totalEpochs = epochs
		
		error = 0
		counter = 0
		for i in range(0, 100):
			output = net.activate(tuple(np.array(test_samples[i], dtype='float64')))
			if round(output[0]) != test_labels[i]:
				counter += 1
				print(counter, " : output : ", output[0], " real answer : ", test_labels[i])
				error += 1
			else:
				counter += 1
				print(counter, " : output : ", output[0], " real answer : ", test_labels[i])
		
		print("Trained with " + str(epochs) + " epochs; Total: " + str(self.totalEpochs) + ";")
		return error
Exemplo n.º 2
0
def anntrain(xdata,ydata):#,epochs):
    #print len(xdata[0])
    ds=SupervisedDataSet(len(xdata[0]),1)
    #ds=ClassificationDataSet(len(xdata[0]),1, nb_classes=2)
    for i,algo in enumerate (xdata):
        ds.addSample(algo,ydata[i])
    #ds._convertToOneOfMany( ) esto no
    net= FeedForwardNetwork()
    inp=LinearLayer(len(xdata[0]))
    h1=SigmoidLayer(1)
    outp=LinearLayer(1)
    net.addOutputModule(outp) 
    net.addInputModule(inp) 
    net.addModule(h1)
    #net=buildNetwork(len(xdata[0]),1,1,hiddenclass=TanhLayer,outclass=SoftmaxLayer)
    
    net.addConnection(FullConnection(inp, h1))  
    net.addConnection(FullConnection(h1, outp))

    net.sortModules()

    trainer=BackpropTrainer(net,ds)#, verbose=True)#dataset=ds,verbose=True)
    #trainer.trainEpochs(40)
    trainer.trainOnDataset(ds,40) 
    #trainer.trainUntilConvergence(ds, 20, verbose=True, validationProportion=0.15)
    trainer.testOnData()#verbose=True)
    #print 'Final weights:',net.params
    return net
Exemplo n.º 3
0
def readFromExcel(inCols,targetCols, numRows, fileName, offset=0, sheet=0, dataSet=None, conversionFun=None):
    """Populates a given dataset or creates a new SupervisedDataSet from an exccel file.
       
       inCols = array of colum numbers containing the input data colums, colums are indexed from 0
       targetCols = array of colum numbers containing the target data colums, colums are indexed from 0
       numRows = the number of rows ofs data
       fileName= the name of the excel file
       offset = the row the vaild data starts at
       sheet = the sheet of the workbook the data is on, indexed from 0 as it is in xlrd
       dataSet = the dataset to be populated, a SupervisedDataSet if created if it is None
       conversionFun = used to preprocess data.
    """
    book = open_workbook(fileName)
    sheet=book.sheet_by_index(sheet)
    
    if dataSet is None:
        dataSet=SupervisedDataSet(len(inCols),len(targetCols))
    for r in range(offset,(offset+numRows)):
        input=[]
        target=[]
        for inC in inCols:
            input.append(sheet.cell_value(r,inC))

        for tC in targetCols:
            target.append(sheet.cell_value(r,tC))
        try:
            if conversionFun:
                input=[conversionFun(i) for i in input]
                target=[conversionFun(t) for t in target]
                print input,target
        
            dataSet.addSample(input, target)
        except Exception:
            print 'rejected row {}'.format(r)
    return dataSet
Exemplo n.º 4
0
def create_dataset():
    dataset = SupervisedDataSet(1, 1)

    for x in arange(0, 4*pi, pi/30):
        dataset.addSample(x, sin(x))

    return dataset
Exemplo n.º 5
0
 def createDataSet(self, trainInput, trainOut):
     ds = SupervisedDataSet(trainInput.shape[1], 1)
     # adhoc - no first input element
     # adding all train samples to dataset
     for x in range(len(trainInput)): #for x in range(len(trainInput)-1):
         ds.addSample(trainInput[x], trainOut[x])  # ds.addSample(trainInput[x + 1], trainOut[x])
     return ds
Exemplo n.º 6
0
def ANN(
    trainFeature, trainLabel, testFeature, testLabel, netStructure, para_rate, para_momentum
):  # netStructure is a list [in, hidden, out], momentum is a parameter in SGD
    sampleNum = trainFeature.shape[0]
    featureNum = trainFeature.shape[1]
    Dataset = SupervisedDataSet(featureNum, 1)
    i = 0
    while i < sampleNum:
        print(i)
        Dataset.addSample(list(trainFeature[i]), [trainLabel[i]])
        i += 1
    Network = buildNetwork(
        netStructure[0],
        netStructure[1],
        netStructure[2],
        netStructure[3],
        hiddenclass=SigmoidLayer,
        outclass=SigmoidLayer,
    )
    T = BackpropTrainer(Network, Dataset, learningrate=para_rate, momentum=para_momentum, verbose=True)
    # print(Dataset['input'])
    errorList = []
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    while abs(T.testOnData(Dataset) - errorList[-1]) > 0.0001:
        T.trainOnDataset(Dataset)
        errorList.append(T.testOnData(Dataset))
    pass  # this step is for the output of predictedLabel
    print(np.array([Network.activate(x) for x in trainFeature]))
    # print(testLabel)
    print(Network.activate([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
    return errorList
Exemplo n.º 7
0
    def make_evaluation_datasets(self):
        eval_dataset = SupervisedDataSet(self.inputdim, self.outputdim)
        eval_costset = SupervisedDataSet(self.inputdim, self.outputdim)

        f_sim = open('simdata/evalset.txt')

        f_input = open('../data/funcvalue.txt', 'w')
        f_input_cost = open('../data/funccost.txt', 'w')
        for line in f_sim:
            line_segs = line.split()
            x = line_segs[0]
            y = line_segs[1]
            dist = float(line_segs[2])
            angle = line_segs[3]

            if dist < 0:
                cost = self.COST_HIGH
            else:
                cost = self.COST_LOW

            eval_dataset.addSample([x, y], [dist, angle])
            eval_costset.addSample([x, y], [cost])

            f_input.write('%s %s %f\n' % (x, y, dist))
            f_input_cost.write('%s %s %f\n' % (x, y, cost))

        f_input.close()
        f_input_cost.close()

        return (eval_dataset, eval_costset)
Exemplo n.º 8
0
	def Predict(self, ticker, day):
		endDay = day-datetime.timedelta(1)
		startDay = endDay - datetime.timedelta(self.trainingPeriod)
		try:
			stockData = data.DataReader(ticker, 'yahoo', startDay, endDay)
		except:
			return [0]

		rawTrainFeatures = []
		rawTrainResponses = []
		for currentDay in range(self.windowLength, len(stockData)):
			window = stockData[currentDay-self.windowLength:currentDay]
			currentPrice = stockData.iloc[currentDay]['Open']
			response = stockData.iloc[currentDay]['Close']
			rawTrainFeatures.append(self.GetFeature(window))
			rawTrainResponses.append(response)

		rawTestFeatures = self.GetFeature(stockData[len(stockData)-self.windowLength:len(stockData)])

		# normalTrainFeatures, normalTestFeatures = self.NormalizeFeatures(rawTrainFeatures, rawTestFeatures)
		alldata = SupervisedDataSet(len(rawTrainFeatures[0]), 1)
		for index in range(0, len(rawTrainFeatures)):
			alldata.addSample(rawTrainFeatures[index],[rawTrainResponses[index]])

		self.network = buildNetwork(alldata.indim, (alldata.indim+alldata.outdim)/2, alldata.outdim, hiddenclass=SigmoidLayer, outclass=LinearLayer)
		trainer = BackpropTrainer(self.network, dataset=alldata)
		activations = []
		for i in range(50):
			for x in range(5):
				trainer.train()
		return float(self.network.activate(rawTestFeatures))
Exemplo n.º 9
0
def retrain(N, dataset, net):
    ds = SupervisedDataSet(20, 20)
    for data in dataset:
        ds.addSample(data[0], data[1])
    trainer = BackpropTrainer(net, ds)
    for i in range(N):
        trainer.train()
    return net
Exemplo n.º 10
0
 def make_ds_with_samples(sample_subset):
     ds = SupervisedDataSet(len(features.word_list),
                            len(features.class_list))
     ds_labels = []
     for sample_features, target, label in sample_subset:
         ds.addSample(sample_features, target)
         ds_labels.append(label)
     return (ds, ds_labels)
Exemplo n.º 11
0
def main():
    train_file = 'data/train.csv'
    # validation_file = 'data/validation.csv'
    output_model_file = 'model.xml'

    # hidden_size = 4
    epochs = 500

    # load data
    # def loadData():
    train = np.loadtxt(train_file, delimiter=' ')
    Input = train[0:,0:3]
    Output = train[0:,3:5]

    # validation = np.loadtxt(validation_file, delimiter=',')
    # train = np.vstack((train, validation))

    # x_train = train[:, 0:-1]
    # y_train = train[:, -1]
    # y_train = y_train.reshape(-1, 1)

    # input_size = x_train.shape[1]
    # target_size = y_train.shape[1]

    # prepare dataset
    # def prepare dataset(input_size, target_size):
    ds = SDS(Input,Output)
    # ds.addSample(input_size)
    # ds.setField('input', x_train)
    # ds.setField('target', y_train)

    # init and train
    # def initTrain(input_size, hidden_size, input, output):
    # net = buildNetwork(input_size, hidden_size, target_size, bias=True)
    net = buildNetwork(3,  # input layer
                                 4,  # hidden0
                                 2,  # output
                                 hiddenclass=SigmoidLayer,
                                 outclass=SigmoidLayer,
                                 bias=True
                                 )
    net = NetworkReader.readFrom('model.xml')
    for i,o in zip(Input,Output):
        ds.addSample(i,o)
        print i, o

    trainer = BackpropTrainer(net, ds)
        
    print "training for {} epochs...".format(epochs)

    for i in range(epochs):
        mse = trainer.train()
        rmse = sqrt(mse)
        print "training RMSE, epoch {}: {}".format(i + 1, rmse)
        if os.path.isfile("../stopfile.txt") == True:
            break
    
    NetworkWriter.writeToFile(net, output_model_file)
Exemplo n.º 12
0
def buildTrainingSet(gydataset):
    #最后的训练数据
    trainingset = SupervisedDataSet(15, 1)
    for line in gydataset:
        trainingset.addSample(
            (line[0], line[1], line[2], line[3], line[4], line[5], line[6],
             line[7], line[8], line[9], line[10], line[11], line[12], line[13],
             line[14]), line[15])
    return trainingset
Exemplo n.º 13
0
def train(N, dataset):
    ds = SupervisedDataSet(20, 20)
    for data in dataset:
        ds.addSample(data[0], data[1])
    net = buildNetwork(20, 20, 20, bias=True, hiddenclass=TanhLayer)
    trainer = BackpropTrainer(net, ds)
    for i in range(N):
        sys.stdout.write("Progress: %d/%d \r" % (i, N))
        sys.stdout.flush()
        trainer.train()
    return net
Exemplo n.º 14
0
 def update_neural_network(self, old_state, old_value, new_state,action, reward):
    desired_value = old_value + self.learning_rate * (reward + self.discount_factor * self.get_best_action(new_state)[1] - old_value)
    ds = SupervisedDataSet(self.states_and_actions_num,1)
    ds.addSample(old_state + action, desired_value)
    trainer = BackpropTrainer(self.neural_network,ds)
    trainer.train()
    
     
     
     
     
Exemplo n.º 15
0
    def __init__(self, domain, iters, trial_number):
        super(ActNetExperiment,
              self).__init__(domain, Experiment.EXP_ACTNET, iters,
                             ACTIVE_ENSEMBLE_SIZE, trial_number)
        # inputs: x, y, point's ambiguity, current average cost, current average value variance
        # output: ratio of next error * avg_cost to current
        self.cost_ensemble = Ensemble(self.ensemble_size, domain.inputdim,
                                      self.NUM_HIDDEN1, self.NUM_HIDDEN2, 1)
        self.train_costset = SupervisedDataSet(domain.inputdim, 1)

        # train cost network to reset costs
        points = self.domain.generate_grid_points(INIT_COST_SAMPLES_AXIS)
        init_costset = SupervisedDataSet(2, 1)
        for point in points:
            z_cost = self.domain.COST_LOW
            init_costset.addSample(point, [z_cost])

        print 'Initializing Cost Ensemble...'
        self.cost_ensemble.train(init_costset)
        self.cost_ensemble.save_starting_weights()

        self.perf_input_dim = 4
        self.perf_ensemble = Ensemble(self.ensemble_size, self.perf_input_dim,
                                      self.NUM_HIDDEN1, self.NUM_HIDDEN2, 1)
        self.train_inputs = []
        self.train_outputs = []
        #self.train_perfset = ImportanceDataSet(self.perf_input_dim, 1)
        self.train_perfset = SupervisedDataSet(self.perf_input_dim, 1)

        self.last_avg_value_var = None
        self.last_x_y_value_var = None
        self.last_x_y_cost = None
        self.last_x_y_actual_cost = None
        self.last_x_y_cost_var = None
        self.last_error_times_avg_cost = None
        self.last_predicted_drop = -1

        # train perf network to reset predictions
        #init_perfset = SupervisedDataSet(self.perf_input_dim, 1)
        init_perfset = SupervisedDataSet(self.perf_input_dim, 1)
        for i in range(INIT_PERF_SAMPLES):  #@UnusedVariable
            x_y_value_var = random.uniform(-.1, 5)
            avg_value_var = random.uniform(-.1, 5)
            cost_gutter = float(self.domain.COST_HIGH -
                                self.domain.COST_LOW) / 10
            x_y_cost = random.uniform(self.domain.COST_LOW - cost_gutter,
                                      self.domain.COST_HIGH + cost_gutter)
            x_y_cost_var = random.uniform(-.1, 20)
            inp = [x_y_value_var, avg_value_var, x_y_cost, x_y_cost_var]
            out = [2]
            init_perfset.addSample(inp, out)
        print 'Initializing Perf Ensemble...'
        self.perf_ensemble.train(init_perfset)
        self.perf_ensemble.save_starting_weights()
Exemplo n.º 16
0
def neuralNetwork(X, Y):
    print "Creating dataset..."
    ds = SupervisedDataSet(len(X[0]), 1)

    for x, y in zip(X, Y):
        ds.addSample(x, y)

    print "Creating neural network..."
    n = buildNetwork(ds.indim, int(ds.indim), ds.outdim)
    print "Training neural network..."
    t = BackpropTrainer(n, ds, verbose=True)
    errors = t.trainUntilConvergence(maxEpochs=10)
    return n
Exemplo n.º 17
0
def montaRede(dadosEntrada, dadosSaida):
    """
    Função na qual def

    :param dadosEntrada: parâmetros de entrada na rede neural
    :param dadosSaida:  parâmetros de saída da rede neural
    :return: retorna a rede de treinamento treinada e os dados supervisionados
    """

    entradaTreino = np.concatenate(
        (dadosEntrada[:35], dadosEntrada[50:85], dadosEntrada[100:135]))
    saidaTreino = np.concatenate(
        (dadosSaida[:35], dadosSaida[50:85], dadosSaida[100:135]))
    entradaTeste = np.concatenate(
        (dadosEntrada[35:50], dadosEntrada[85:100], dadosEntrada[135:]))
    saidaTeste = np.concatenate(
        (dadosSaida[35:50], dadosSaida[85:100], dadosSaida[135:]))

    treinaRede(entradaTreino, saidaTreino)

    # criando o dataset de treinamento
    # serão 4 dados de entrada
    # será um dado de saída
    treinamento = treinaRede(entradaTreino, saidaTreino)

    # rede neural do tamanho do treinamento
    # com 2 neurônios na camada intermediária
    # com o dado de output sendo o tamanho da rede
    # utilizando bias
    redeNeural = buildNetwork(treinamento.indim,
                              2,
                              treinamento.outdim,
                              bias=True)

    # criando a rede neural treinada
    redeNeuralTreinada = BackpropTrainer(redeNeural,
                                         treinamento,
                                         learningrate=0.3,
                                         momentum=0.9)

    for epocas in range(0, 10000):

        redeNeuralTreinada.train()

    teste = SupervisedDataSet(4, 1)

    for i in range(len(entradaTeste)):

        teste.addSample(entradaTeste[i], saidaTeste[i])

    return redeNeuralTreinada, teste
Exemplo n.º 18
0
    def absorb(self, winner, **kwargs):
        self.total_sim += 1

        ds = SupervisedDataSet(self.features_num, 2)
        for who, s0, s1 in self.observation:
            if who != Board.STONE_BLACK:
                continue
            input_vec = self.get_input_values(s0, s1, who)
            val = self.net.activate(input_vec)
            plays = val[1] * self.total_sim + 1
            wins = val[0] * self.total_sim
            if who == winner:
                wins += 1
            ds.addSample(input_vec, (wins, plays))
        self.trainer.trainOnDataset(ds)
Exemplo n.º 19
0
    def absorb(self, winner, **kwargs):
        self.total_sim += 1

        ds = SupervisedDataSet(self.features_num, 2)
        for who, s0, s1 in self.observation:
            if who != Board.STONE_BLACK:
                continue
            input_vec = self.get_input_values(s0, s1, who)
            val = self.net.activate(input_vec)
            plays = val[1] * self.total_sim + 1
            wins = val[0] * self.total_sim
            if who == winner:
                wins += 1
            ds.addSample(input_vec, (wins, plays))
        self.trainer.trainOnDataset(ds)
Exemplo n.º 20
0
    def __init__(self, domain, iters, trial_number):
        super(ActVarExperiment,
              self).__init__(domain, Experiment.EXP_ACTVAR, iters,
                             ACTIVE_ENSEMBLE_SIZE, trial_number)
        self.var_ensemble = Ensemble(self.ensemble_size, domain.inputdim,
                                     self.NUM_HIDDEN1, self.NUM_HIDDEN2, 1)
        self.train_varset = SupervisedDataSet(domain.inputdim, 1)

        # train cost network to reset costs
        points = self.domain.generate_grid_points(INIT_COST_SAMPLES_AXIS)
        init_varset = SupervisedDataSet(domain.inputdim, 1)
        for point in points:
            z_var = 1.0
            init_varset.addSample(point, [z_var])
        print 'Initializing Variance Ensemble...'
Exemplo n.º 21
0
def buildDataset(filenames,
                 history=2, # how many snapshots into the past?
                 ):
    D = SupervisedDataSet(set_feats + history * snap_feats, num_targ)
    for fname in filenames:
        rundata = quickload(fname)
        snapshots = rundata['snapshots']
        settings = rundata['setting']
        for i in range(len(snapshots) - history - 1):
            inp = parseFeatures(settings, snapshots[i:i + history])
            prevtarget = parseTarget(snapshots[i + history-1])
            nexttarget = parseTarget(snapshots[i + history])
            # percentage gain
            target = (-nexttarget+prevtarget)/(nexttarget+prevtarget)/2.
            D.addSample(inp, [target])        
    return D
Exemplo n.º 22
0
    def _update_impl(self, old, new, reward):
        old_input = self.get_input_values(old)

        v1_a = self.net_attack.activate(self.get_input_values(new))
        target = self.gamma * v1_a
        
        ds_a = SupervisedDataSet(self.features_num, 1)
        ds_a.addSample(old_input, target + max(0, reward))
        ds_d = SupervisedDataSet(self.features_num, 1)
        ds_d.addSample(old_input, target + min(0, reward))
#         self.trainer.setData(ds)
#         err = self.trainer.train()
        self.trainer_attack.setData(ds_a)
        self.trainer_attack.train()
        self.trainer_defence.setData(ds_d)
        self.trainer_defence.train()
Exemplo n.º 23
0
    def _update_impl(self, old, new, reward):
        old_input = self.get_input_values(old)

        v1_a = self.net_attack.activate(self.get_input_values(new))
        target = self.gamma * v1_a

        ds_a = SupervisedDataSet(self.features_num, 1)
        ds_a.addSample(old_input, target + max(0, reward))
        ds_d = SupervisedDataSet(self.features_num, 1)
        ds_d.addSample(old_input, target + min(0, reward))
        #         self.trainer.setData(ds)
        #         err = self.trainer.train()
        self.trainer_attack.setData(ds_a)
        self.trainer_attack.train()
        self.trainer_defence.setData(ds_d)
        self.trainer_defence.train()
Exemplo n.º 24
0
def NetworkTrain(trainDataSet, mnetwork=NetworkBuild(), file='NetworkDump.pkl',maxEpochs=100):
    mnetwork = NetworkBuild(new = True)
    assert len(mnetwork[0].inmodules) == len(mnetwork[1].keys())
    print('DEBUG')
    #print(trainDataSet)
    print("lens " + str(len(trainDataSet[0][0])) + " " + str(len(mnetwork[0].inmodules)))
    # 定义数据集的格式
    DS = SupervisedDataSet(len(trainDataSet[0][0]), len(trainDataSet[0][1]))

    for itrainDataSet in trainDataSet:
        indata = itrainDataSet[0]
        outdata = itrainDataSet[1]

        DS.addSample(indata, outdata)

    # 如果要获得里面的输入/输出时,可以用
    # 如果要把数据集切分成训练集和测试集,可以用下面的语句,训练集:测试集=8:2
    # 为了方便之后的调用,可以把输入和输出拎出来




    # 训练器采用BP算法
    # verbose = True即训练时会把Total error打印出来,库里默认训练集和验证集的比例为4:1,可以在括号里更改
    mnetwork[0].sortModules()
    trainer = BackpropTrainer(mnetwork[0], DS, verbose=True, learningrate=0.01)
    # 0.0575
    # maxEpochs即你需要的最大收敛迭代次数,这里采用的方法是训练至收敛,我一般设为1000
    trainer.trainUntilConvergence(maxEpochs=maxEpochs)
    '''
    for mod in mnetwork[0].modules:
        print "Module:", mod.name
        if mod.paramdim > 0:
            print "--parameters:", mod.params
        for conn in mnetwork[0].connections[mod]:
            print "-connection to", conn.outmod.name
            if conn.paramdim > 0:
                print "- parameters", conn.params
        if hasattr(mnetwork[0], "recurrentConns"):
            print "Recurrent connections"
            for conn in mnetwork[0].recurrentConns:
                print "-", conn.inmod.name, " to", conn.outmod.name
                if conn.paramdim > 0:
                    print "- parameters", conn.params
        '''
    pickle.dump(mnetwork, open(file, 'wb'))
    return mnetwork
Exemplo n.º 25
0
def create_datasets():
    train_ds = SupervisedDataSet(13, 1)
    test_ds = SupervisedDataSet(13, 1)

    with open(TRAIN_FN, 'r') as fn:
        for row in csv.reader(fn):
            input = [float(x) for x in row[:-1]]
            target = [int(row[-1])]
            train_ds.addSample(input, target)

    with open(TEST_FN, 'r') as fn:
        for row in csv.reader(fn):
            input = [float(x) for x in row[:-1]]
            target = [int(row[-1])]
            test_ds.addSample(input, target)

    return train_ds, test_ds
Exemplo n.º 26
0
def treinaRede(entradaTreino, saidaTreino):
    """
    Função que cria o método de treino da rede

    :param entradaTreino: dados de entrada do treino
    :param saidaTreino: dados de saída do treino
    :return: treinamento : objeto que diz qual será o treino da rede
    """
    # serão 4 dados de entrada
    # será um dado de saída
    treinamento = SupervisedDataSet(4, 1)

    for i in range(len(entradaTreino)):

        treinamento.addSample(entradaTreino[i], saidaTreino[i])

    return treinamento
Exemplo n.º 27
0
    def __init__(self, domain, iters, trial_number):
        super(ActCostExperiment,
              self).__init__(domain, Experiment.EXP_ACTCOST, iters,
                             ACTIVE_ENSEMBLE_SIZE, trial_number)
        self.cost_ensemble = Ensemble(self.ensemble_size, domain.inputdim,
                                      self.NUM_HIDDEN1, self.NUM_HIDDEN2, 1)
        self.train_costset = SupervisedDataSet(domain.inputdim, 1)

        # train cost network to reset costs
        points = self.domain.generate_grid_points(INIT_COST_SAMPLES_AXIS)
        init_costset = SupervisedDataSet(domain.inputdim, 1)
        for point in points:
            z_cost = self.domain.COST_LOW
            init_costset.addSample(point, [z_cost])
        print 'Initializing Cost Ensemble...'
        self.cost_ensemble.train(init_costset)
        self.cost_ensemble.save_starting_weights()
Exemplo n.º 28
0
 def apply_updates(self):
     dataset = SupervisedDataSet(self.inputdim, self.outputdim)
     for (si, ai) in self.updates.iterkeys():
         si_ai = '%s-%s' % (si, ai)
         network_in = self.network_inputs[si_ai]
         current_value = self.get_network_value(None, None, si_ai)
         new_value = [
             a + b for a, b in zip(current_value, self.updates[(si, ai)])
         ]
         dataset.addSample(network_in, new_value)
         if PRINT_GAME_RESULTS:
             print 'updating (%s, %s) from %s to %s' % (
                 si, ai, map(PrettyFloat,
                             current_value), map(PrettyFloat, new_value))
     # import pdb; pdb.set_trace()
     if dataset:  # len(dataset) > 0:
         self.trainer.setData(dataset)
         self.trainer.trainEpochs(NTD_TRAIN_EPOCHS)
Exemplo n.º 29
0
    def test_train(self, epochs=1):
        print("Training...")

        # split the array in a way that the net will be
        # trained with 70% of the images and
        # tested with the rest
        split = int(len(self.samples) * 0.7)
        train_samples = self.samples[0:split]
        train_labels = self.labels[0:split]

        test_samples = self.samples[split:]
        test_labels = self.labels[split:]

        # build the net with 300 input values representing
        # each pixel of the 10x10 image (100 values)
        # and its Red,Green,Blue values (3 values)
        net = buildNetwork(300, 300, 1)
        ds = SupervisedDataSet(300, 1)
        for i in range(len(train_samples)):
            ds.addSample(tuple(np.array(train_samples[i], dtype='float64')),
                         (train_labels[i], ))

        trainer = BackpropTrainer(net, ds, verbose=True)
        trainer.trainEpochs(epochs)
        self.totalEpochs = epochs

        error = 0
        counter = 0
        for i in range(0, 100):
            output = net.activate(
                tuple(np.array(test_samples[i], dtype='float64')))
            if round(output[0]) != test_labels[i]:
                counter += 1
                print(counter, " : output : ", output[0], " real answer : ",
                      test_labels[i])
                error += 1
            else:
                counter += 1
                print(counter, " : output : ", output[0], " real answer : ",
                      test_labels[i])

        print("Trained with " + str(epochs) + " epochs; Total: " +
              str(self.totalEpochs) + ";")
        return error
Exemplo n.º 30
0
def train_net():
    t = load_image_arr('example.jpg')  # example of image used by network
    #print('Resized t length:', len(t))
    global net
    net = buildNetwork(len(t), len(t), 1)
    ds = SupervisedDataSet(len(t), 1)
    
    for test_dir in get_folders_in(INIT_FOLDER, full=True):
        img_class = os.path.basename(test_dir)
        for test_pic in get_list_of_files(test_dir, '.jpg'):
            #print('Adding {0} with class {1}'.format(test_pic, img_class))
            ds.addSample(load_image_arr(test_pic), (img_class, ))  # <- class
    
    trainer = BackpropTrainer(net, ds)
    error = 10
    iteration = 0
    while error > 0.1:
        error = trainer.train()
        iteration += 1
        yield 'Iteration: {0}. Error: {1}'.format(iteration, error)
Exemplo n.º 31
0
def ANN(
    trainFeature, trainLabel, testFeature, testLabel, netStructure, para_rate,
    para_momentum
):  #netStructure is a list [in, hidden, out], momentum is a parameter in SGD
    sampleNum = trainFeature.shape[0]
    featureNum = trainFeature.shape[1]
    Dataset = SupervisedDataSet(featureNum, 1)
    i = 0
    while (i < sampleNum):
        print(i)
        Dataset.addSample(list(trainFeature[i]), [trainLabel[i]])
        i += 1
    Network = buildNetwork(netStructure[0],
                           netStructure[1],
                           netStructure[2],
                           netStructure[3],
                           hiddenclass=SigmoidLayer,
                           outclass=SigmoidLayer)
    T = BackpropTrainer(Network,
                        Dataset,
                        learningrate=para_rate,
                        momentum=para_momentum,
                        verbose=True)
    #print(Dataset['input'])
    errorList = []
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    while (abs(T.testOnData(Dataset) - errorList[-1]) > 0.0001):
        T.trainOnDataset(Dataset)
        errorList.append(T.testOnData(Dataset))
    pass  #this step is for the output of predictedLabel
    print(np.array([Network.activate(x) for x in trainFeature]))
    #print(testLabel)
    print(
        Network.activate([
            0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0
        ]))
    return (errorList)
Exemplo n.º 32
0
def test_derivatives(net, inp):
    print 'Computing derivatives:'
    derivatives = []
    point_x = inp[0]
    point_y = inp[1]
    xs = [point_x, point_y]
    zs = net.activate(xs)
    zstarget = [FN(point_x, point_y)]
    delta = 0.0001
    for i in range(len(net.params)):
        net.params[i] += delta
        newzs = net.activate(xs)
        d = (newzs[0] - zs[0]) / delta
        derivatives.append(d)
        net.params[i] -= delta

    print '[',
    for i in range(len(derivatives)):
        print '%+4.2f,' % derivatives[i],
    print ']'

    dset = SupervisedDataSet(2, 1)
    dset.addSample(xs, zstarget)

    calc_chained_derivs(net, dset)
    print 'Bprop derivatives (value):'
    print '[',
    for i in range(len(net.derivs)):
        print '%+4.2f,' % net.derivs[i],
    print ']'
    net.resetDerivatives()

    calc_chained_derivs(net, dset, use_error=True)
    print 'Bprop derivatives (error):'
    print '[',
    for i in range(len(net.derivs)):
        print '%+4.2f,' % net.derivs[i],
    print ']'
    net.resetDerivatives()
    print
	def test_train(self, epochs=1):
		print("Training...")

    # split the array in a way that the net will be
    # trained with 70% of the images and 
    # tested with the rest
		split = int(len(self.samples) * 0.7)
		train_samples = self.samples[0:split]
		train_labels  = self.labels[0:split]

		test_samples = self.samples[split:]
		test_labels  = self.labels[split:]

    # build the net with 300 input values representing 
    # each pixel of the 10x10 image (100 values)
    # and its Red,Green,Blue values (3 values)
		net = buildNetwork(300, 300, 1)	
		ds = SupervisedDataSet(300, 1)
		for i in range(len(train_samples)):  
			ds.addSample(tuple(np.array(train_samples[i], dtype='float64')), (train_labels[i],))
		
		trainer = BackpropTrainer(net, ds, verbose=True)
		trainer.trainEpochs(epochs)
		self.totalEpochs = epochs
		
		error = 0
		counter = 0
		for i in range(0, 100):
			output = net.activate(tuple(np.array(test_samples[i], dtype='float64')))
			if round(output[0]) != test_labels[i]:
				counter += 1
				print(counter, " : output : ", output[0], " real answer : ", test_labels[i])
				error += 1
			else:
				counter += 1
				print(counter, " : output : ", output[0], " real answer : ", test_labels[i])
		
		print("Trained with " + str(epochs) + " epochs; Total: " + str(self.totalEpochs) + ";")
		return error
Exemplo n.º 34
0
class NeuralNet:
    def __init__(self):
        self.net = None
        self.data_set = None
        self.trainer = None
        self.inputs = None
        self.targets = None

    def build(self, inputs, hidden, output):
        self.inputs = inputs
        self.targets = output
        self.net = buildNetwork(inputs, hidden, output, bias=True)

    def create_data_set(self):
        self.data_set = SupervisedDataSet(self.inputs, self.targets)

    def add_list_of_data(self, list_of_data, data_class):
        for dt in list_of_data:
            self.data_set.addSample(dt, data_class)

    def train(self):
        self.trainer = BackpropTrainer(self.net, self.data_set, learningrate=0.01)
        error = 10000
        iteration = 0
        while error > 0.001:
            error = self.trainer.train()
            print "Iteration: {0} Error {1}".format(iteration, error)
            iteration += 1

    def save_to_file(self, filename):
        with open(filename, 'w') as f:
            pickle.dump(self.net, f)

    def load_from_file(self, filename):
        with open(filename, 'r') as f:
            self.net = pickle.load(f)

    def apply_over_data(self, data):
        return self.net.activate(data)
Exemplo n.º 35
0
    def sim(self, board):
        visited_path = []
        state = board
        winner = Board.STONE_EMPTY
        for _ in range(1, self.max_moves + 1):
            moves, player, _ = Game.possible_moves(state)
            state_new, state_new_val = self.get_best(state, moves, player)
            visited_path.append((player, state, state_new, state_new_val))
            over, winner, _ = state_new.is_over(state)
            if over:
                break
            state = state_new

        self.total_sim += 1

        ds = SupervisedDataSet(self.features_num, 2)
        for player, state, new, val in visited_path:
            plays = val[1] * self.total_sim + 1
            wins = val[0] * self.total_sim
            if player == winner:
                wins += 1
            ds.addSample(self.get_input_values(state, new, player), (wins, plays))
        self.trainer.trainOnDataset(ds)
Exemplo n.º 36
0
    def make_evaluation_datasets(self):
        eval_dataset = SupervisedDataSet(self.inputdim, self.outputdim)
        eval_costset = SupervisedDataSet(self.inputdim, self.outputdim)
        f_input = open('../data/funcvalue.txt', 'w')
        f_input_cost = open('../data/funccost.txt', 'w')
        points = self.generate_grid_points(PLOT_SAMPLES_AXIS)
        for point in points:
            z = self.fn_base(point)
            z_cost = self.cost_fn(point)
            point_str = str(point).strip('[]').replace(',', '')
            f_input.write('%s %f\n' % (point_str, z[0]))
            f_input_cost.write('%s %f\n' % (point_str, z_cost))
        f_input.close()
        f_input_cost.close()

        points = self.generate_grid_points(self.EVAL_SAMPLES_AXIS)
        for point in points:
            z = self.fn_base(point)
            z_cost = self.cost_fn(point)
            eval_dataset.addSample(point, z)
            eval_costset.addSample(point, [z_cost])

        return (eval_dataset, eval_costset)
Exemplo n.º 37
0
def NeuralNetworks(data_set, label_set):

    # networks
    net = buildNet(len(data_set[0]), len(label_set[0]))
    #net = buildNetwork(len(data_set[0]),len(label_set[0]),bias = True, hiddenclass = SigmoidLayer)
    # data set
    ds = SupervisedDataSet(len(data_set[0]), len(label_set[0]))
    for i in xrange(len(label_set)):
        ds.addSample(data_set[i], label_set[i])
    # train
    trainer = BackpropTrainer(net,
                              ds,
                              momentum=0.0,
                              verbose=True,
                              weightdecay=0.0)
    #trainer.train()
    trainer.trainUntilConvergence(maxEpochs=5)
    #for i in xrange(200):
    #    trainer.trainEpochs(10)
    #trainresult = percentError(trainer.testOnClassData(),ds['target'])
    #print "epoch: %4d" % trainer.totalepochs, "train error %5.2f%%" % trainresult
    #for input, target, in ds:
    #    print input,target,net.activate(input)
    return net
Exemplo n.º 38
0
    def sim(self, board):
        visited_path = []
        state = board
        winner = Board.STONE_EMPTY
        for _ in range(1, self.max_moves + 1):
            moves, player = Game.possible_moves(state)
            state_new, state_new_val = self.get_best(state, moves, player)
            visited_path.append((player, state, state_new, state_new_val))
            over, winner, _ = state_new.is_over(state)
            if over:
                break
            state = state_new

        self.total_sim += 1

        ds = SupervisedDataSet(self.features_num, 2)
        for player, state, new, val in visited_path:
            plays = val[1] * self.total_sim + 1
            wins = val[0] * self.total_sim
            if player == winner:
                wins += 1
            ds.addSample(self.get_input_values(state, new, player),
                         (wins, plays))
        self.trainer.trainOnDataset(ds)
Exemplo n.º 39
0
def buildTrainingSet(dataset):
#     gy = dataset[:,:-1];
#     X_scaled = preprocessing.scale(gy)
#     gydataset = np.hstack((X_scaled,dataset[:,-1:]));
    gydataset = dataset;
#     print(gydataset[:,5:6]);
    #最后的训练数据
    trainingset = SupervisedDataSet(15, 2);
    for line in gydataset:
        if line[-1] == 0:
            trainingset.addSample((line[0],line[1],line[2],line[3],line[4],line[5],line[6],line[7],line[8],line[9],line[10],line[11],line[12],line[13],line[14]), (0,0));
        elif line[-1] == 1:
            trainingset.addSample((line[0],line[1],line[2],line[3],line[4],line[5],line[6],line[7],line[8],line[9],line[10],line[11],line[12],line[13],line[14]), (0,1));
        elif line[-1] == 2:
            trainingset.addSample((line[0],line[1],line[2],line[3],line[4],line[5],line[6],line[7],line[8],line[9],line[10],line[11],line[12],line[13],line[14]), (1,0));
        elif line[-1] == 3:
            trainingset.addSample((line[0],line[1],line[2],line[3],line[4],line[5],line[6],line[7],line[8],line[9],line[10],line[11],line[12],line[13],line[14]), (1,1));
    return trainingset;
Exemplo n.º 40
0
def montaDados ( ) :
    """

    Função na qual monta o dataset

    :return: dataset montado
    """
    dataset = SupervisedDataSet(3, 1)

    dataset.addSample ( [0, 0, 0], [0] )
    dataset.addSample ( [0, 1, 1], [0] )
    dataset.addSample ( [1, 0, 1], [0] )
    dataset.addSample ( [1, 1, 0], [0] )
    dataset.addSample ( [1, 0, 0], [1] )
    dataset.addSample ( [0, 0, 1], [1] )
    dataset.addSample ( [0, 1, 0], [0] )
    dataset.addSample ( [1, 1, 1], [1] )

    return dataset
Exemplo n.º 41
0
def base_experiment():
    (eval_dataset, eval_costset) = DomainFnApprox.make_evaluation_datasets()

    random_train_dataset = SupervisedDataSet(2, 1)
    random_train_costset = SupervisedDataSet(2, 1)
    for i in range(RANDOM_TRAINING_SAMPLES):
        x = random.uniform(X_MIN, X_MAX)
        y = random.uniform(Y_MIN, Y_MAX)
        z = FN(x, y)
        z_cost = COST_FN(x, y)
        random_train_dataset.addSample([x, y], [z])
        random_train_costset.addSample([x, y], [z_cost])

    value_network = buildNetwork(2,
                                 80,
                                 20,
                                 1,
                                 hiddenclass=SigmoidLayer,
                                 bias=True)
    value_trainer = BackpropTrainer(value_network,
                                    learningrate=LEARNING_RATE,
                                    momentum=MOMENTUM,
                                    verbose=True)

    print 'Value Network Topology:'
    print value_network

    cost_network = buildNetwork(2,
                                80,
                                20,
                                1,
                                hiddenclass=SigmoidLayer,
                                bias=True)
    cost_trainer = BackpropTrainer(cost_network,
                                   learningrate=LEARNING_RATE,
                                   momentum=MOMENTUM,
                                   verbose=True)

    #    test_derivatives(value_network, [1, 1])
    #    test_derivatives(cost_network, [1, 1])

    print 'Value MSE before: %.4f' % value_trainer.testOnData(eval_dataset)
    value_trainer.trainUntilConvergence(random_train_dataset,
                                        continueEpochs=6,
                                        maxEpochs=MAX_EPOCHS)
    #    value_trainer.trainOnDataset(random_train_dataset, 1000)
    print 'Value MSE after: %.4f' % value_trainer.testOnData(eval_dataset)

    print 'Cost MSE before: %.4f' % cost_trainer.testOnData(eval_costset)
    cost_trainer.trainUntilConvergence(random_train_costset,
                                       continueEpochs=6,
                                       maxEpochs=MAX_EPOCHS)
    #    cost_trainer.trainOnDataset(random_train_costset, 1000)
    print 'Cost MSE after: %.4f' % cost_trainer.testOnData(eval_costset)

    #    test_derivatives(value_network, [1, 1])
    #    test_derivatives(cost_network, [1, 1])

    f_value = open('../data/learnedvalue.txt', 'w')
    f_cost = open('../data/learnedcost.txt', 'w')
    unit = (X_MAX - X_MIN) / (EVAL_SAMPLES_AXIS - 1)
    for i in range(EVAL_SAMPLES_AXIS):
        for j in range(EVAL_SAMPLES_AXIS):
            x = X_MIN + i * unit
            y = Y_MIN + j * unit
            z = value_network.activate([x, y])
            z_cost = cost_network.activate([x, y])
            f_value.write('%f %f %f\n' % (x, y, z[0]))
            f_cost.write('%f %f %f\n' % (x, y, z_cost[0]))
    f_value.close()
    f_cost.close()
Exemplo n.º 42
0
        print '%+4.2f,' % net.derivs[i],
    print ']'
    net.resetDerivatives()
    print


if __name__ == '__main__':
    f_input = open('../data2d/inputplot2ddata.txt', 'w')
    f_input_cost = open('../data2d/inputcostplot2ddata.txt', 'w')
    dataset = SupervisedDataSet(1, 1)
    costset = SupervisedDataSet(1, 1)
    for i in range(RANDOM_TRAINING_SAMPLES):
        x = random.uniform(-3, 3)
        z = fn(x)
        z_cost = cost_fn(x)
        dataset.addSample([x], [z])
        costset.addSample([x], [z_cost])
        f_input.write("%f %f\n" % (x, z))
        f_input_cost.write("%f %f\n" % (x, z_cost))
    f_input.close()
    f_input_cost.close()

    eval_dataset = SupervisedDataSet(1, 1)
    eval_costset = SupervisedDataSet(1, 1)
    for i in range(RANDOM_TRAINING_SAMPLES):
        x = random.uniform(-3, 3)
        z = fn(x)
        z_cost = cost_fn(x)
        eval_dataset.addSample([x], [z])
        eval_costset.addSample([x], [z_cost])
Exemplo n.º 43
0
    def buildBMTrainer(self):
        x, y = self.readexcel()
        # 模拟size条数据:
        # self.writeexcel(size=100)
        # resx=contrib(x,0.9)
        # print '**********************'
        # print resx
        # x1=x[:,[3,4,5,6,7,8,9,10,11,0,1,2]]
        # resx1=contrib(x1)
        # print '**********************'
        # print resx1

        self.realy = y
        per = int(len(x))
        # 对数据进行归一化处理(一般来说使用Sigmoid时一定要归一化)
        self.sx = MinMaxScaler()
        self.sy = MinMaxScaler()

        xTrain = x[:per]
        xTrain = self.sx.fit_transform(xTrain)
        yTrain = y[:per]
        yTrain = self.sy.fit_transform(yTrain)

        # 初始化前馈神经网络
        self.__fnn = FeedForwardNetwork()

        # 构建输入层,隐藏层和输出层,一般隐藏层为3-5层,不宜过多
        inLayer = LinearLayer(x.shape[1], 'inLayer')
        hiddenLayer0 = SigmoidLayer(int(self.hiddendim / 3), 'hiddenLayer0')
        hiddenLayer1 = TanhLayer(self.hiddendim, 'hiddenLayer1')
        hiddenLayer2 = SigmoidLayer(int(self.hiddendim / 3), 'hiddenLayer2')
        outLayer = LinearLayer(self.rescol, 'outLayer')

        # 将构建的输出层、隐藏层、输出层加入到fnn中
        self.__fnn.addInputModule(inLayer)
        self.__fnn.addModule(hiddenLayer0)
        self.__fnn.addModule(hiddenLayer1)
        self.__fnn.addModule(hiddenLayer2)
        self.__fnn.addOutputModule(outLayer)

        # 对各层之间建立完全连接
        in_to_hidden = FullConnection(inLayer, hiddenLayer0)
        hidden_to_hidden0 = FullConnection(hiddenLayer0, hiddenLayer1)
        hidden_to_hidden1 = FullConnection(hiddenLayer1, hiddenLayer2)
        hidden_to_out = FullConnection(hiddenLayer2, outLayer)

        # 与fnn建立连接
        self.__fnn.addConnection(in_to_hidden)
        self.__fnn.addConnection(hidden_to_hidden0)
        self.__fnn.addConnection(hidden_to_hidden1)
        self.__fnn.addConnection(hidden_to_out)
        self.__fnn.sortModules()
        # 初始化监督数据集
        DS = SupervisedDataSet(x.shape[1], self.rescol)

        # 将训练的数据及标签加入到DS中
        # for i in range(len(xTrain)):
        #     DS.addSample(xTrain[i], yTrain[i])
        for i in range(len(xTrain)):
            DS.addSample(xTrain[i], yTrain[i])

        # 采用BP进行训练,训练至收敛,最大训练次数为1000
        trainer = BMBackpropTrainer(self.__fnn,
                                    DS,
                                    learningrate=0.0001,
                                    verbose=self.verbose)
        if self.myalg:
            trainingErrors = trainer.bmtrain(maxEpochs=10000,
                                             verbose=True,
                                             continueEpochs=3000,
                                             totalError=0.0001)
        else:
            trainingErrors = trainer.trainUntilConvergence(
                maxEpochs=10000, continueEpochs=3000, validationProportion=0.1)
        # CV = CrossValidator(trainer, DS, n_folds=4, valfunc=ModuleValidator.MSE)
        # CV.validate()
        # CrossValidator
        # trainingErrors = trainer.trainUntilConvergence(maxEpochs=10000,continueEpochs=5000, validationProportion=0.1)
        # self.finalError = trainingErrors[0][-2]
        # self.finalerror=trainingErrors[0][-2]
        # if (self.verbose):
        #     print '最后总体容差:', self.finalError
        self.__sy = self.sy
        self.__sx = self.sx
        for i in range(len(xTrain)):
            a = self.sy.inverse_transform(
                self.__fnn.activate(xTrain[i]).reshape(-1, 1))
            self.restest.append(
                self.sy.inverse_transform(
                    self.__fnn.activate(xTrain[i]).reshape(-1, 1))[0][0])
Exemplo n.º 44
0
from pybrain.datasets.supervised import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.supervised.trainers.backprop import BackpropTrainer

# Run:
# python2 feed_forward_neural_network.py X,
# where X is the desired amount of hidden nodes
from sys import argv
hidden = int(argv[1])  # X

# Expects one dimensional input and target output
ds = SupervisedDataSet(1, 1)
for x in range(1, 9):
    ds.addSample(x, x)

# 1 input, X hidden, 1 output
network = buildNetwork(1, hidden, 1, hiddenclass=TanhLayer)

# Init BackpropTrainer
trainer = BackpropTrainer(network, dataset=ds)

# Train until convergence
trainer.trainUntilConvergence(verbose=False,
                              validationData=0.15,
                              maxEpochs=1000,
                              continueEpochs=10)

# Activating the network on different integers such as the inputs in the data-set
print("// Hidden nodes: {}".format(hidden))
for x in range(1, 9):
Exemplo n.º 45
0
    min_array                  = [[],[],[],[],[],[],[],[]]   
    range_array              = [[],[],[],[],[],[],[],[]]
    args = parser.parse_args()
#    denorm_output = [[],[],[]]
    denorm_output = []
#    prediction = [[],[],[]]
    prediction = []
   
    training_normalization(args.f1,args.min,args.max)
    #initialize dataset for neural network with 5 input + bias and 3 target 
    DS = SupervisedDataSet(5,1)

    #adding datasets to the network
    for i in range (0,len(normal_array[0])):
#       DS.addSample([normal_array[0][i],normal_array[1][i],normal_array[2][i],normal_array[3][i],normal_array[4][i]],[normal_array[5][i],normal_array[6][i],normal_array[7][i]])
       DS.addSample([normal_array[0][i],normal_array[1][i],normal_array[2][i],normal_array[3][i],normal_array[4][i]],[normal_array[5][i]])

#    NN = buildNetwork(5,4,3,bias =True,hiddenclass=TanhLayer)
    NN = buildNetwork(DS.indim,5,DS.outdim,bias = True,hiddenclass=TanhLayer)
    TRAINER = BackpropTrainer(NN,dataset=DS,learningrate = 0.01,momentum = 0.99)

    print 'MSE before',TRAINER.testOnData(DS)
    TRAINER.trainOnDataset(DS,500)
    print 'MSE after',TRAINER.testOnData(DS)

# testing 
#clearing arrays
    normal_array           = [[],[],[],[],[],[],[],[]]
    normalized_input  = [[],[],[],[],[]]
    max_array                 = [[],[],[],[],[],[],[],[]]
    min_array                  = [[],[],[],[],[],[],[],[]]   
class Brain:		
	def __init__(self):
		classes = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 15, 17, 25])
		self.samples = []
		self.labels = []
		for i in range(len(classes)):
			prefix = "GTSRB/" + format(classes[i], '05d') + '/'
			file = open(prefix + 'GT-' + format(classes[i], '05d') + '.csv')
			reader = csv.reader(file, delimiter=';')
			next(reader, None)

			for row in reader:
				image = cv2.imread(prefix + row[0])
				self.samples.append(image)
				self.labels.append(i)

		self.samples = [cv2.resize(s, (10, 10)) for s in self.samples]
		self.samples = np.array(self.samples).astype(np.float32) / 255
		self.samples = [s.flatten() for s in self.samples]
		
		np.random.seed(0)
		np.random.shuffle(self.samples)
		np.random.seed(0)
		np.random.shuffle(self.labels)
	
		self.totalEpochs = 0

	def test_train(self, epochs=1):
		print("Training...")

		split = int(len(self.samples) * 0.7)
		train_samples = self.samples[0:split]
		train_labels  = self.labels[0:split]

		test_samples = self.samples[split:]
		test_labels  = self.labels[split:]

		net = buildNetwork(300, 300, 1)	
		ds = SupervisedDataSet(300, 1)
		for i in range(len(train_samples)):  
			ds.addSample(tuple(np.array(train_samples[i], dtype='float64')), (train_labels[i],))
		
		trainer = BackpropTrainer(net, ds, verbose=True)
		trainer.trainEpochs(epochs)
		self.totalEpochs = epochs
		
		error = 0
		counter = 0
		for i in range(0, 100):
			output = net.activate(tuple(np.array(test_samples[i], dtype='float64')))
			if round(output[0]) != test_labels[i]:
				counter += 1
				print(counter, " : output : ", output[0], " real answer : ", test_labels[i])
				error += 1
			else:
				counter += 1
				print(counter, " : output : ", output[0], " real answer : ", test_labels[i])
		
		print("Trained with " + str(epochs) + " epochs; Total: " + str(self.totalEpochs) + ";")
		return error
	
	def train_clean(self, epochs=1):
		print("Training...")
		self.totalEpochs = epochs
		
		train_samples = self.samples
		train_labels  = self.labels

		self.net_shared = buildNetwork(300, 300, 1)	
		self.ds_shared = SupervisedDataSet(300, 1)
		for i in range(len(train_samples)):  
			self.ds_shared.addSample(tuple(np.array(train_samples[i], dtype='float64')), (train_labels[i],))
		
		self.trainer_shared = BackpropTrainer(self.net_shared, self.ds_shared, verbose=True)
		self.trainer_shared.trainEpochs(epochs)
		
		print("Trained with " + str(epochs) + " epochs; Total: " + str(self.totalEpochs) + ";")

	def train_more(self, epochs=1):
		print("Training...")
		self.totalEpochs += epochs
		self.trainer_shared.trainEpochs(epochs)
		print("Trained with " + str(epochs) + " epochs more; Total: " + str(self.totalEpochs) + ";")
	
	def test_image(self, filename):		
		image = cv2.imread(filename)
		images = [image]
		
		images = [cv2.resize(s, (10, 10)) for s in images]
		images = np.array(images).astype(np.float32) / 255
		images = [s.flatten() for s in images]
		
		output = self.net_shared.activate(tuple(np.array(images[0], dtype='float64')))
		print("Output: ", output[0])
		return output[0]
			
	def import_network(self, filename):
		train_samples = self.samples
		train_labels  = self.labels
		
		np.random.seed(0)
		np.random.shuffle(train_samples)
		np.random.seed(0)
		np.random.shuffle(train_labels)
		
		self.net_shared = NetworkReader.readFrom(filename)
		self.ds_shared = SupervisedDataSet(300, 1)
		for i in range(len(train_samples)):  
			self.ds_shared.addSample(tuple(np.array(train_samples[i], dtype='float64')), (train_labels[i],))
			
		self.trainer_shared = BackpropTrainer(self.net_shared, self.ds_shared, verbose=True)
		
	def export_network(self, filename):
		NetworkWriter.writeToFile(self.net_shared, filename)
Exemplo n.º 47
0
 def _createData(self, nInput, nTarget, values):
     ds = SupervisedDataSet(nInput, nTarget)
     for inp, target in values:
         ds.addSample(inp, target)
     
     return ds
    result = []
    for el in x:
        if hasattr(el, "__iter__") and not isinstance(el, basestring):
            result.extend(flatten(el))
        else:
            result.append(el)
    return result

# fonte: http://www.acodemics.co.uk/2014/06/20/image-recognition-with-pybrains/

if __name__ == "__main__":

    base_dir = '/home/hoshiro/Pictures/test-img/characters/'
    t = loadImage(base_dir + 'a.png')
    net = buildNetwork(len(t), len(t), 1)
    ds = SupervisedDataSet(len(t), 1)
    ds.addSample(loadImage(base_dir + 'a.png'), (1,))
    ds.addSample(loadImage(base_dir + 'b.png'), (2,))
    ds.addSample(loadImage(base_dir + 'c.png'), (3,))
    ds.addSample(loadImage(base_dir + 'd.png'), (4,))

    trainer = BackpropTrainer(net, ds)
    error = 10
    iteration = 0
    while error > 0.001:
        error = trainer.train()
        iteration += 1
        print "Iteration: {0} Error {1}".format(iteration, error)

    print "\nResult: ", net.activate(loadImage(base_dir + 'b.png'))
Exemplo n.º 49
0
    #Limit the data
    #train_data = train_data[0:10]
    #answer = answer[0:10]


    #Standardize the input data (0-1 only in NN)
    min_max_scaler = preprocessing.MinMaxScaler()
    train_data = min_max_scaler.fit_transform(train_data)


    #Create the training data
    D = SupervisedDataSet(len(train_data[0]),1) #input, target

    for counter,item in enumerate(train_data):
        D.addSample(train_data[counter], answer[counter])

    #print D['target']


    #Create the NN
    N = buildNetwork(len(train_data[0]),200,1, bias=True) #152 76=(152+1)/2


    #Train the NN with backpropagation
    T = BackpropTrainer(N, D, learningrate = 0.1, momentum = 0.9)

    i=0
    error = []
    time_before = time.time()
    while i < 50 and T.testOnData(D) > 0.001:
Exemplo n.º 50
0
    result = []
    for el in x:
        if hasattr(el, "__iter__") and not isinstance(el, basestring):
            result.extend(flatten(el))
        else:
            result.append(el)
    return result
 
if __name__ == "__main__":
 
    t = loadImage('p/face_2copy.png')
    

    net = buildNetwork(len(t), .03*len(t), 1)
    ds = SupervisedDataSet(len(t), 1)
    ds.addSample(loadImage('pic/1.png'),(2,))
    ds.addSample(loadImage('pic/a.png'),(1,))
    ds.addSample(loadImage('pic/b.png'),(1,))
    ds.addSample(loadImage('pic/c.png'),(1,))
    ds.addSample(loadImage('pic/d.png'),(1,))
    ds.addSample(loadImage('pic/e.png'),(1,))
    ds.addSample(loadImage('pic/f.png'),(1,))
    ds.addSample(loadImage('pic/g.png'),(1,))
    ds.addSample(loadImage('pic/h.png'),(1,))
    ds.addSample(loadImage('pic/2.png'),(2,))
 
    trainer = BackpropTrainer(net, ds)
    error = 10
    iteration = 0
    while error > 0.001: 
        error = trainer.train()
Exemplo n.º 51
0
    result = []
    for el in x:
        if hasattr(el, "__iter__") and not isinstance(el, basestring):
            result.extend(flatten(el))
        else:
            result.append(el)
    return result
 
if __name__ == "__main__":
 
    t = loadImage('pic/alligatorcopy.png')
    

    net = buildNetwork(len(t), .03*len(t), 1)
    ds = SupervisedDataSet(len(t), 1)
    ds.addSample(loadImage('pic/alligatorcopy.png'),(1,))
    ds.addSample(loadImage('pic/catcopy.png'),(2,))
    ds.addSample(loadImage('pic/dogcopy.png'),(3,))
    ds.addSample(loadImage('pic/giraffecopy.png'),(4,))
    ds.addSample(loadImage('pic/gorillacopy.png'),(5,))
    
    trainer = BackpropTrainer(net, ds)
    error = 10
    iteration = 0
    while error > 0.001: 
        error = trainer.train()
        iteration += 1
        
    ap=['pic/alligatorcopy.png','pic/catcopy.png','pic/dogcopy.png','pic/giraffecopy.png','pic/gorillacopy.png']
    an=['alligator','cat','dog','giraffe','gorilla']
    for a in ap:
Exemplo n.º 52
0
from pybrain.supervised.trainers.backprop import BackpropTrainer
from pybrain.supervised.trainers.mixturedensity import BackpropTrainerMix
from pybrain.structure.networks.feedforward import FeedForwardNetwork
from pybrain.structure.modules.linearlayer import LinearLayer
from pybrain.structure.modules.sigmoidlayer import SigmoidLayer
from pybrain.structure.connections.full import FullConnection


def printNetResult(identifier, net):
    print(identifier, net.activate((0, 0)), net.activate((0, 1)),
          net.activate((1, 0)), net.activate((1, 1)))


ds = SupervisedDataSet(2, 1)

ds.addSample((0, 0), (0, ))
ds.addSample((0, 1), (1, ))
ds.addSample((1, 0), (1, ))
ds.addSample((1, 1), (0, ))

for input, target in ds:
    print(input, target)

#define layers and connections
inLayer = LinearLayer(2)
hiddenLayerOne = SigmoidLayer(4, "one")
hiddenLayerTwo = SigmoidLayer(4, "two")
outLayer = LinearLayer(1)
inToHiddenOne = FullConnection(inLayer, hiddenLayerOne)
hiddenOneToTwo = FullConnection(hiddenLayerOne, hiddenLayerTwo)
hiddenTwoToOut = FullConnection(hiddenLayerTwo, outLayer)
Exemplo n.º 53
0
    result = []
    for el in x:
        if hasattr(el, "__iter__") and not isinstance(el, basestring):
            result.extend(flatten(el))
        else:
            result.append(el)
    return result
 
if __name__ == "__main__":
 
    t = loadImage('dog1copy.png')
    
    net = buildNetwork(len(t), .03*len(t), 1)
    ds = SupervisedDataSet(len(t), 1)
    
    ds.addSample(loadImage('dog1copy.png'),(0,))
    ds.addSample(loadImage('dog2copy.png'),(0,))
    ds.addSample(loadImage('dog3copy.png'),(0,))
    ds.addSample(loadImage('dog4copy.png'),(0,))
    ds.addSample(loadImage('dog5copy.png'),(0,))
    ds.addSample(loadImage('dog6copy.png'),(0,))
    ds.addSample(loadImage('dog7copy.png'),(0,))
    ds.addSample(loadImage('giraffe1copy.png'),(1,))
    ds.addSample(loadImage('giraffe2copy.png'),(1,))
    ds.addSample(loadImage('giraffe3copy.png'),(1,))
    ds.addSample(loadImage('giraffe4copy.png'),(1,))
    ds.addSample(loadImage('giraffe5copy.png'),(1,))
    ds.addSample(loadImage('giraffe6copy.png'),(1,))
    ds.addSample(loadImage('giraffe7copy.png'),(1,))
    ds.addSample(loadImage('alligator1copy.png'),(2,))
    ds.addSample(loadImage('alligator2copy.png'),(2,))
Exemplo n.º 54
0
db = MongoClient('localhost').aitetris

output_model_file = 'model.pkl'

hidden_size = 20
epochs = 150

input_size = 12
target_size = 3

# prepare dataset


ds = SDS( input_size, target_size )
for i in range ( db.tetris.count() ):
	ds.addSample(db.tetris.find()[i][u'input'], db.tetris.find()[i][u'target'])

# init and train

net = buildNetwork( input_size, hidden_size, target_size, bias = True )
trainer = BackpropTrainer( net,ds )

print "training for {} epochs...".format( epochs )

for i in range( epochs ):
	mse = trainer.train()
	rmse = sqrt( mse )
	print "training RMSE, epoch {}: {}".format( i + 1, rmse )
	
pickle.dump( net, open( output_model_file, 'wb' ))
Exemplo n.º 55
0
    '3b.txt', '3c.txt', '4.txt', '4b.txt', '4c.txt', '5.txt', '5b.txt',
    '5c.txt', '6.txt', '6b.txt', '6c.txt', '7.txt', '7b.txt', '7c.txt',
    '8.txt', '8b.txt', '8c.txt', '9.txt', '9b.txt', '9c.txt', '0.txt',
    '0b.txt', '0c.txt'
]

# a resposta do número
resposta = [[1], [1], [1], [2], [2], [2], [3], [3], [3], [4], [4], [4], [5],
            [5], [5], [6], [6], [6], [7], [7], [7], [8], [8], [8], [9], [9],
            [9], [0], [0], [0]]
#resposta = [[1], [1], [1], [1], [1], [1], [1]]

i = 0
for arquivo in arquivos:  # para cada arquivo de treinamento
    data = getData(arquivo)  # pegue os dados do arquivo
    dataSet.addSample(data, resposta[i])  # add dados no dataSet
    i = i + 1

# trainer
trainer = BackpropTrainer(network, dataSet)
error = 1
iteration = 0
outputs = []
file = open("outputs.txt", "w")  # arquivo para guardar os resultados

while error > 0.001:  # 10 ^ -3
    error = trainer.train()
    outputs.append(error)
    iteration += 1
    print(iteration, error)
    file.write(str(error) + "\n")
Exemplo n.º 56
0
    result = []
    for el in x:
        if hasattr(el, "__iter__") and not isinstance(el, basestring):
            result.extend(flatten(el))
        else:
            result.append(el)
    return result
 
if __name__ == "__main__":
 
    t = loadImage('pic/a.png')
    

    net = buildNetwork(len(t), .03*len(t), 1)
    ds = SupervisedDataSet(len(t), 1)
    ds.addSample(loadImage('pic/a.png'),(1,))
    ds.addSample(loadImage('pic/d.png'),(1,))
    ds.addSample(loadImage('pic/e.png'),(1,))
    ds.addSample(loadImage('pic/1.png'),(0,))
    ds.addSample(loadImage('pic/b.png'),(1,))
    ds.addSample(loadImage('pic/2.png'),(0,))
 
    trainer = BackpropTrainer(net, ds)
    error = 10
    iteration = 0
    while error > 0.0001: 
        error = trainer.train()
        iteration += 1
        print "Iteration: {0} Error {1}".format(iteration, error)
 
    print "\nResult: ", net.activate(loadImage('pic/a.png'))
Exemplo n.º 57
0
    of hidden nodes, from 8 to 1
    """
    for hidden in reversed(range(1, 9)):
        net = train(1, hidden, 1, dataset)
        print("-----------------------------")
        print("Network with %i hidden nodes" % hidden)
        print("-----------------------------")
        for i in range(1, 9):
            print("Input: %i, Output: %.5f" % (i, net.activate([i])[0]))


if __name__ == "__main__":

    dataset = SupervisedDataSet(1, 1)
    for i in range(1, 9):
        dataset.addSample(i, i)

    values = [random.uniform(-15, 15) for _ in range(0, 9)]

    activate_various()
    decreasing_hidden_nodes()









Exemplo n.º 58
0
def flatten(x):
    result = []
    for el in x:
        if hasattr(el, "__iter__") and not isinstance(el, basestring):
            result.extend(flatten(el))
        else:
            result.append(el)
    return result
 
if __name__ == "__main__":
 
    t = loadImage('characters/a.png')
 
    net = buildNetwork(len(t), len(t), 1)
    ds = SupervisedDataSet(len(t), 1)
    ds.addSample(loadImage('characters/a.png'),(1,))
    ds.addSample(loadImage('characters/b.png'),(2,))
    ds.addSample(loadImage('characters/c.png'),(3,))
    ds.addSample(loadImage('characters/d.png'),(4,))
 
    trainer = BackpropTrainer(net, ds)
    error = 10
    iteration = 0
    while error > 0.001: 
        error = trainer.train()
        iteration += 1
        print "Iteration: {0} Error {1}".format(iteration, error)
 
    print "\nResult: ", net.activate(loadImage('characters/a.png'))
    print "\nResult: ", net.activate(loadImage('characters/b.png'))
    print "\nResult: ", net.activate(loadImage('characters/c.png'))