def save(self, filename: str = 'default'):
     t = self.ds
     f, l = self.nn.indim, self.nn.outdim
     self.ds = SupervisedDataSet(f, l)
     saving.saveObj(self, filename + '.nn')
     self.ds = t
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

net = buildNetwork(2, 3, 1)
data = SupervisedDataSet(2, 1)

data.addSample((0, 0), (0))
data.addSample((0, 1), (1))
data.addSample((1, 0), (1))
data.addSample((1, 1), (0))

trainer = BackpropTrainer(net, data)

epoch = 10000

for i in range(epoch):
    trainer.train()

print(net.activate([0, 0]))
print(net.activate([1, 0]))
print(net.activate([0, 1]))
print(net.activate([1, 1]))
original = template
template = cv2.Canny(template, 50, 200)
(tH, tW) = template.shape[:2]
plt.ion()

net = buildNetwork(3, 6, game.get_available_buttons_size())

image = game.get_game_screen()
finalImage = image

training = True
detected = True
avgX = 0.0
avgY = 0.0

ds = SupervisedDataSet(3, game.get_available_buttons_size())

# Output to CSV ------------------------


with open(__file__, "wb") as csv_file:
    writer = csv.writer(csv_file, delimiter=',')
        #for line in data:
         #   writer.writerow(line)

data = ["first_name,last_name,city".split(","),

            "Tyrese,Hirthe,Strackeport".split(","),

            "Jules,Dicki,Lake Nickolasville".split(","),
示例#4
0
# print("********eight********")
# for elem in eightNull.keys():
#     print(elem+'\n')

#get the training data:
readerTraining = csv.reader(
    file('/Users/jason/Desktop/modeling/data/TrainingData.csv', 'rb'))
data_training_raw = []
for line in readerTraining:
    if readerTraining.line_num == 1:
        continue
    else:
        data_training_raw.append(line)
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#construct NN for TWO NULL:
ds1 = SupervisedDataSet(12, 3)
ds2 = SupervisedDataSet(12, 3)
ds3 = SupervisedDataSet(12, 3)
ds4 = SupervisedDataSet(12, 3)
ds5 = SupervisedDataSet(12, 3)
ds6 = SupervisedDataSet(12, 3)
ds7 = SupervisedDataSet(12, 3)
ds8 = SupervisedDataSet(12, 3)
ds9 = SupervisedDataSet(12, 3)
ds10 = SupervisedDataSet(12, 3)
ds11 = SupervisedDataSet(12, 3)

for tuple in data_training_raw:
    tuple_data = []
    output_tmp1 = []
    for i in range(6, 21):
示例#5
0
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.validation import ModuleValidator

if __name__ == '__main__':

    # Definicao da classe de rede neural
    dimensaoDaEntrada=1
    dimensaoDaCamadaEscondida=22
    dimensaoDaSaida=1

    rn=buildNetwork(dimensaoDaEntrada,dimensaoDaCamadaEscondida,dimensaoDaSaida,bias=True,hiddenclass=TanhLayer)

    #Criacao dos dados
    tamanhoDaAmostra=400
    dados = SupervisedDataSet(dimensaoDaEntrada,dimensaoDaSaida)

    comRuido=False

    #Gera uma amostra da funcao f(x) = sen(x)
    for i in range(tamanhoDaAmostra):
        if(comRuido):
            x=np.random.uniform(0,2*math.pi,1)
            dados.addSample((x), (math.sin(x)+ np.random.normal(0, 0.1,1),))
        else:
            x=np.random.uniform(0,2*math.pi,1)
            dados.addSample((x), (math.sin(x),))

    treinoSupervisionado = BackpropTrainer(rn, dados)

    # quantidade de iteracoes. Saidas canalizadas nas entradas da RNA
示例#6
0
def makerap(filename):
    # read markovied words
    if word_by_word == 0:
        lyrics = open("userData/" + filename).read().split("\n")
    elif word_by_word == 1:
        lyrics = markov(filename).split("\n")
    # the (now empty) song the neural network is going to write
    song = []
    statusFile = open("userData/progress-" + filename, "w")
    statusFile.write("||||||||||||||||||||||||||||||| 40%" + "\n")
    statusFile.close()
    # all of the possible rhymes based on the contents of the stuff you fed it
    if training == 1:
        all_possible_rhymes = rhymeindex(lyrics)
    #should uncomment if you dont want to read the rhyming words from file
    #elif training == 0:
    # with open("corpus_markov_" + filename, 'w') as result_f:
    #    all_possible_rhymes  = opennetwork(epoch)[1]
    #   rhymes_in_lyrics = rhymeindex(lyrics)
    #  for rhyme in rhymes_in_lyrics:
    #     if rhyme not in all_possible_rhymes:
    #      all_possible_rhymes.append(rhyme)
    #     result_f.write(rhyme+'\n')
    #   print all_possible_rhymes

    # should comment if you want to get the rhyming words from the network
    all_possible_rhymes = open("rhymingWords.txt").read().split("\n")

    if training == 1:
        net = buildNetwork(4,
                           8,
                           8,
                           8,
                           8,
                           8,
                           8,
                           8,
                           8,
                           4,
                           recurrent=True,
                           hiddenclass=TanhLayer)
        t = BackpropTrainer(net, learningrate=0.05, momentum=0.5, verbose=True)

    # This loads a neural network that has already been trained on an actual rap song - so it knows how the rhymes and syllables should fit together
    if training == 0:
        rapdict = []
        rhyme_list_generator(lyrics, rapdict, all_possible_rhymes)
        net = opennetwork(epoch)[0]
    t = BackpropTrainer(net, learningrate=0.01, momentum=0.5, verbose=True)

    progressUpdater("|||||||||||||||||||||||||||||||||| 50%", filename)

    # debug stuff...
    #print "\n\nAlright, here are all of the possible rhymes from the lyrics it can draw from."
    #print all_possible_rhymes

    if training == 1:
        # rapdict is just the list containing smaller lists as follows;
        # [the text of the line, the number of syllables in the line, the number of the rhyme scheme of the line]

        rapdict = []
        rhyme_list_generator(lyrics, rapdict, all_possible_rhymes)
        print "\n\nAlright, here's the information it will be working with - in the form of lyric, syllables, and rhyming scheme"
        print rapdict

        # makes a dataset
        ds = SupervisedDataSet(4, 4)
        # the dataset is in the form of the amount of syllables and rhyme scheme of TWO lines that are next to each other in the song.

        for i in rapdict[:-3]:
            if i != "" and rapdict[rapdict.index(i) + 1] != "" and rapdict[
                    rapdict.index(i) + 2] != "" and rapdict[rapdict.index(i) +
                                                            3] != "":
                # twobars is just a list containing the aspects of two lines in a row
                twobars = [
                    i[1], i[2], rapdict[rapdict.index(i) + 1][1],
                    rapdict[rapdict.index(i) + 1][2],
                    rapdict[rapdict.index(i) + 2][1],
                    rapdict[rapdict.index(i) + 2][2],
                    rapdict[rapdict.index(i) + 3][1],
                    rapdict[rapdict.index(i) + 3][2]
                ]

                # twobars gets formatted into floating point values between 0 and 1 so it can be entered into the dataset
                ds.addSample(
                    (twobars[0] / float(20), int(twobars[1]) /
                     float(len(all_possible_rhymes)), twobars[2] / float(20),
                     int(twobars[3]) / float(len(all_possible_rhymes))),
                    (twobars[4] / float(20), int(twobars[5]) /
                     float(len(all_possible_rhymes)), twobars[6] / float(20),
                     int(twobars[7]) / float(len(all_possible_rhymes))))

        # printing the dataset
        print "\n\nAlright, here is the dataset."
        print ds

    #just to make sure it doesn't keep using the same lyric over and over
    lyricsused = []

    trainingcount = 0

    progressUpdater("|||||||||||||||||||||||||||||||||||||||||| 60%", filename)
    progressBarCounter = 0

    # The number 3 at the end of this line can be tweaked- it's just so things don't get too repetitive/drawn out.
    # for example; if i had 30 lines to draw from, I wouldn't want to try and rearrange them into a song with 30 lines.
    # it would be much better if i only tried to take 10 rhyming lines and make a song with those.
    if training == 0:
        while len(song) < len(lyrics) / 3 and len(song) < 50:
            verse = writearap(
                [(random.choice(range(1, 20))) / 20.0,
                 (random.choice(range(1, len(all_possible_rhymes)))) /
                 float(len(all_possible_rhymes)),
                 (random.choice(range(1, 20))) / 20.0,
                 (random.choice(range(1, len(all_possible_rhymes)))) /
                 float(len(all_possible_rhymes))], net, rapdict,
                all_possible_rhymes, lyricsused, song)
            if len(
                    verse
            ) > 3:  # this number can be adjusted - usually the short verses it generates are low quality.
                for line in lyricsused:
                    # actually write the line to the song
                    song.append(line)
                song.append("\n...\n")
                print "Just wrote a verse to the file... - " + str(lyricsused)
                lyricsused = []
                if (progressBarCounter == 0):
                    progressUpdater(
                        "||||||||||||||||||||||||||||||||||||||||||||||| 65%",
                        filename)
                if (progressBarCounter == 1):
                    progressUpdater(
                        "||||||||||||||||||||||||||||||||||||||||||||||||||| 70%",
                        filename)
                if (progressBarCounter == 2):
                    progressUpdater(
                        "|||||||||||||||||||||||||||||||||||||||||||||||||||||||| 80%",
                        filename)
                if (progressBarCounter == 3):
                    progressUpdater(
                        "||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 90%",
                        filename)
                if (progressBarCounter == 4):
                    progressUpdater(
                        "|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 95%",
                        filename)
                progressBarCounter += 1

    # The part that actually writes a rap.
    final_song = open("userData/neural_rap" + filename, "w+")
    for line in song:
        final_song.write(line + "\n")
    final_song.close()
    if training == 1:
        while True:
            epochs_per_iteration = 100
            trainingcount += epochs_per_iteration
            t.trainOnDataset(ds, epochs_per_iteration)
            #print "just wrote " + str(trainingcount) + "/" + "..."
            savenetwork(net, all_possible_rhymes, trainingcount)
示例#7
0
        return result

def fullfil(x):
    result = list(x);
    if len(result) < 15:
        for i in range(15-len(result)):
            result.append("Ϩ")
            return result

if __name__ == "__main__":
    print(chr(1000))
cities = {1 : "Москва", 2: "Санкт-Петербург" , 3: "Пермь", 4 : "Омск", 5 : "Екатеринбург", 6 : "Тюмень", 7 : "Ижевск"}
stemmer = RussianStemmer()
s1 = list(stemmer.stem("Москва"))
net = buildNetwork(15, 30, 1)
dataset = SupervisedDataSet(15, 1)

dataset.addSample(flatten(fullfil(stemmer.stem("Москва"))), 1)
dataset.addSample(flatten(fullfil(stemmer.stem("Моск"))), 1)
dataset.addSample(flatten(fullfil(stemmer.stem("Мск"))), 1)
dataset.addSample(flatten(fullfil(stemmer.stem("Нерезин"))), 2)
dataset.addSample(flatten(fullfil(stemmer.stem("Нерезиновск"))), 1)
dataset.addSample(flatten(fullfil(stemmer.stem("Резин"))), 1)
dataset.addSample(flatten(fullfil(stemmer.stem("Дефолт"))), 1)
dataset.addSample(flatten(fullfil(stemmer.stem("Дефолтсити"))), 1)

dataset.addSample(flatten(fullfil(stemmer.stem("Питер"))), 2)
dataset.addSample(flatten(fullfil(stemmer.stem("Петр"))), 2)
dataset.addSample(flatten(fullfil(stemmer.stem("Петер"))), 2)
dataset.addSample(flatten(fullfil(stemmer.stem("Санкт-Петербург"))), 2)
dataset.addSample(flatten(fullfil(stemmer.stem("Спб"))), 2)
    # features = sys.argv[1]
    # Make the first argument the PCA scaled file.
    pca = sys.argv[1]
    # The second argument is the synergy file.
    synergy = sys.argv[2]

    out = "feature_sets.csv"

    # drug_dict = read_drug_data(features)
    pca_dict = read_pca_data(pca)
    synergy_dict = read_synergy_data(synergy)
    # dump_drug_dict_as_flat(pca_dict, out)
    training_input, input_len = build_training_input(pca_dict, synergy_dict)
    # input_len = training_input[list(training_input.keys())[0]]['INPUT']
    target_len = 1
    ds = SupervisedDataSet(input_len, target_len)
    for t1 in training_input:
        for t2 in training_input[t1]:
            print("Input Vector", training_input[t1][t2]['INPUT'],
                  training_input[t1][t2]['OUTPUT'])
            ds.addSample(training_input[t1][t2]['INPUT'],
                         training_input[t1][t2]['OUTPUT'])

    n = buildNetwork(ds.indim, 3, ds.outdim, bias=True)
    t = BackpropTrainer(n, learningrate=0.001, momentum=0.05, verbose=True)
    print("Training")
    t.trainUntilConvergence(ds, verbose=True)
    NetworkWriter.writeToFile(n, 'trainedNetwork.xml')

    # n = NetworkReader.readFrom('trainedNetwork_2.xml')
示例#9
0
  plotname = os.path.join(plotdir,('jpq2layers_plot'+ str(iter)))
  pylab.savefig(plotname)


# set-up the neural network
nneuron = 5
mom = 0.98
netname="LSL-"+str(nneuron)+"-"+str(mom)
mv=ModuleValidator()
v = Validator()


#create the test DataSet
x = numpy.arange(0.0, 1.0+0.01, 0.01)
s = 0.5+0.4*numpy.sin(2*numpy.pi*x)
tsts = SupervisedDataSet(1,1)
tsts.setField('input',x.reshape(len(x),1))
tsts.setField('target',s.reshape(len(s),1))
#read the train DataSet from file
trndata = SupervisedDataSet.loadFromFile(os.path.join(os.getcwd(),'trndata'))

myneuralnet = os.path.join(os.getcwd(),'myneuralnet.xml')
if os.path.isfile(myneuralnet):
  n = NetworkReader.readFrom(myneuralnet,name=netname)
  #calculate the test DataSet based on the trained Neural Network
  ctsts = mv.calculateModuleOutput(n,tsts)
  tserr = v.MSE(ctsts,tsts['target'])
  print('MSE error on TSTS:',tserr)
  myplot(trndata,tsts = tsts,ctsts = ctsts)

  pylab.show()
示例#10
0
def generate_network_forecaster(history_size = 1):
    #Building Network proccess-------------------------------------------------------
    net = FeedForwardNetwork()
    inLayer = LinearLayer(history_size)
    hiddenLayer0 = LinearLayer(history_size)
    hiddenLayer1 = LinearLayer(3)
    outLayer = LinearLayer(1)

    net.addInputModule(inLayer)
    net.addModule(hiddenLayer0)
    net.addModule(hiddenLayer1)
    net.addOutputModule(outLayer)

    net.addConnection(FullConnection(inLayer, hiddenLayer0))
    #net.addConnection(FullConnection(inLayer, outLayer))
    #net.addConnection(FullConnection(hiddenLayer0, outLayer))
    net.addConnection(FullConnection(hiddenLayer0, hiddenLayer1))
    net.addConnection(FullConnection(hiddenLayer1, outLayer))
    net.sortModules()
    AUX = 0.1
    print net
    ##Net with 3 inputs, 8 hidden neurons in a layer and 8 in another, and 1 out.
    #net = buildNetwork(3,8,8,1)

    #Making Forecaster---------------------------------------------------------------    
    def learn(self, data, lag, epoch = 1000):
        self.samples.clear()
        self.true_predictions = 0 #This numbers indicates how long the net was used to predict a event rather than using another method because haven't enought data.
        self.predictions = 0
        self.lag = lag
        for i in range( len(data) - (lag + self.history_size) ):
            self.samples.addSample([data[j][1]*AUX for j in range(i, i + self.history_size)], data[i + lag + self.history_size][1]*AUX)
        print 'Training'
 
        elapsed = os.times()[-1] #take time as miliseconds
        self.trainer.trainUntilConvergence(maxEpochs = epoch)# , validationProportion = 0.01)
        elapsed = os.times()[-1] - elapsed

        if elapsed <= 60:
            time = '%2.1f seconds.' %elapsed
        elif elapsed <= 3600:
            time = '%d minutes and %2.1f seconds.' %(elapsed/60, elapsed%60)
        else:
            time = '%d hours, %d minutes and %2.1f seconds.' %(elapsed/3600, elapsed/60, elapsed%60)
        print 'Trained along', time

    def predict(self, lag):
        if self.initialized:
            return self.mem
        else:
            if 'vect' in dir(self):
                return self.vect[-1]
            else:
                raise Exception("Forecaster "+self.name+" don't initialized.")

    def update(self, data):
        if self.initialized:
            self.vect.append(data[1])
            if len(self.vect) > self.history_size:
                self.vect = self.vect[1:]
            self.mem = self.net.activate(map(lambda x: x*AUX, self.vect))*(1/AUX)
        else:
            self.mem = data[1]
            if 'vect' in dir(self):
                self.vect.append(data[1])
            else:
                self.vect = [data[1]]
            if len(self.vect) >= self.history_size:
                self.initialized = True

    NN = Forecaster(name = 'Neural Net', predict_function = predict, update_function = update, learn_function = learn)
    NN.history_size = history_size
    NN.net = net
    NN.samples = SupervisedDataSet(history_size,1)
    NN.trainer = BackpropTrainer(NN.net, NN.samples)
    return NN
示例#11
0
from pybrain.datasets import SupervisedDataSet

ds = SupervisedDataSet(2, 1)  #2D input and 1D output

# Add 2D input and the desired output
ds.addSample((0, 0), (0, ))
ds.addSample((0, 1), (1, ))
ds.addSample((1, 0), (1, ))
ds.addSample((1, 1), (0, ))

# Print data set
for inpt, target in ds:
    # inpt is an index in the 2D inputs
    # target is an index in the 1D outputs
    print(inpt, target)

# Print input or target
# print(ds['input'])
# print(ds['target'])

# # Clear the data set
# ds.clear()
# print(ds)

# Trainers
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure import TanhLayer  # for tanh
net = buildNetwork(2, 10, 1, bias=True,
                   hiddenclass=TanhLayer)  # Change activation function
示例#12
0
#x2train = create_ramp(240)
x1train = create_rnd(240)
x2train = create_rnd(240)

# scale all inputs to be around [-1,1]
x1train, x1_scale = normalize_vals(x1train)
x2train, x2_scale = normalize_vals(x2train)
ytrain = iir1(x1train, x2train, 0.01)

#print "x1=", x1train
#print "x2=", x2train
#print "y=", ytrain
#plotLists(x1train,x2train,ytrain)

# Create regression training data for RNN
ds = SupervisedDataSet(2, 1)
for i in range(len(x1train)):
    ds.addSample((x1train[i], x2train[i]), (ytrain[i]))

# Create Recurrent Network Structure
net = RecurrentNetwork()
net.addInputModule(LinearLayer(2, name='in'))
net.addModule(LinearLayer(1, name='hidden'))
net.addOutputModule(LinearLayer(1, name='out'))
net.addConnection(FullConnection(net['in'], net['hidden'], name='c1'))
net.addConnection(FullConnection(net['hidden'], net['out'], name='c2'))
#net.addRecurrentConnection(FullConnection(net['hidden'], net['hidden'], name='c3'))
net.addRecurrentConnection(FullConnection(net['out'], net['out'], name='c3'))
net.sortModules()

# Train network
示例#13
0
#######################################################
######### Build Neural Network Manually ############
#######################################################

# Data standardization 
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)
sx = MinMaxScaler()
X_train = sx.fit_transform(X_train)
X_test = sx.transform(X_test)
stest = MinMaxScaler()
test_id = test['test_id']
test = test.drop(['test_id'], axis = 1)
test = stest.fit_transform(test)

# Initialize supervised dataset
ds = SupervisedDataSet(test.shape[1], 1)
ds_train = SupervisedDataSet(X.shape[1], 1)
ds_test = SupervisedDataSet(X.shape[1], 1)

# put data and label into ds  
for i in range(len(X_train)):
    ds_train.addSample(X_train[i], y_train[i])

for i in range(len(X_test)):
    ds_test.addSample(X_test[i], y_test[i])
  
Y = np.random.random(size = test.shape[0])
for i in range(len(test)):
    ds.addSample(test[i], Y[i])

#%%
示例#14
0
    def learn(self):
        # convert reinforcement dataset to NFQ supervised dataset
        supervised = []
        dats=[]#[seq index][turn]=[state,jointAct,jointReward]
        for i in range(self.num_agents):
            supervised.append(SupervisedDataSet(self.num_features+self.actionDiminInput, 1))
        for i in range(self.dataset[self.indexOfAgent].getNumSequences()):
            seq=[]
            for j in range(len(self.dataset[self.indexOfAgent].getSequence(i)[0])):
                state=self.dataset[self.indexOfAgent].getSequence(i)[0][j]
                jointAct=[]
                jointReward=[]
                for k in range(self.num_agents):
                    jointAct.append(self.dataset[k].getSequence(i)[1][j][0])
                    jointReward.append(self.dataset[k].getSequence(i)[2][j][0])
                seq.append([state, jointAct, jointReward])
            dats.append(seq)
        #prepare data set
        for i in range(self.num_agents):
            for seq in dats:
                lastexperience = None
                for sarPair in seq:
                    state = sarPair[0]
                    action = sarPair[1]
                    reward = sarPair[2]
                    if not lastexperience:
                        # delay each experience in sequence by one
                        lastexperience = (state, action, reward)
                        continue
                    # use experience from last timestep to do Q update
                    (state_, action_, reward_) = lastexperience

                    #update Q-value function approximator
                    qValuesNext=self._qValuesForAllPossibleJointAction(state)
                    eqNext=findCorrelatedEquilibrium(self.num_agents, self.num_actions, qValuesNext, self.possibleJointAction,self.w4ActIndexing)
                    #Learn
                    inp=self._EncodeStateAndJointActionIntoInputVector(state_, action_)
                    if self.isFirstLerning:
                        target=reward_[i]
                    else:
                        target=reward_[i] + self.rewardDiscount * max(self._qValuesForEachActionOfAgent(state, eqNext, i))
                    target=np.array([target])
                    supervised[i].addSample(inp, target)
                    # update last experience with current one
                    lastexperience = (state, action, reward)
        if self.isFirstLerning:
            self.isFirstLerning=False

        procTrainers=[]
        qResult=Queue()
        for i in range(self.num_agents):
            trainer=RPropMinusTrainer(self.linQ[i],dataset=supervised[i],
                                      batchlearning=True,
                                      verbose=False,
                                      )
            if not self.validateMultiProc:
                trainer.trainUntilConvergence(maxEpochs=self.max_epochs,verbose=False)
            else:
                procTrainers.append(Process(target=self._learningQfunction, kwargs={"trainer":trainer,"i":i,"q":qResult}))
        if self.validateMultiProc:
            for proc in procTrainers:
                proc.start()
            for i in range(self.num_agents):
                res=qResult.get()
                self.linQ[res[0]]=res[1]
示例#15
0
    for i, row in enumerate(spamreader):
        if i == 0:
            nfeatures = len(row) - 1
        dataXAll.append([float(val) for val in row[0:nfeatures]])
        if int(row[nfeatures]) == 1:
            dataYAll.append([1, 0, 0])
        elif int(row[nfeatures]) == 0:
            dataYAll.append([0, 1, 0])
        else:
            dataYAll.append([0, 0, 1])

winrateFinal = 0
winrateMin = winrateFinal
winrateMax = winrateFinal
for n in range(0, ntry):
    ds = SupervisedDataSet(nfeatures, 3)
    dataX = list(dataXAll)
    dataY = list(dataYAll)

    # # crossvalidation data construction RANDOM PICK
    # datapX = list()
    # datapY = list()
    # for i in range(0, int(crossvalidation_pct * len(dataX))):
    #     popi = random.randint(0, len(dataX) - 1)
    #     datapX.append(dataX[popi])
    #     datapY.append(dataY[popi])
    #     dataX.pop(popi)
    #     dataY.pop(popi)
    # # / crossvalidation data construction

    # crossvalidation data construction PICK LAST
示例#16
0
 validationSetsA = linspace(0, 2, num=100)
 trainingSetsB = linspace(0, 1, num=5)
 validationSetsB = linspace(0, 1, num=100)
 a = lambda x: exp(multiply(-1, sqrt(x)))
 b = lambda x: arctan(x)
 network1 = buildNetwork(
     1, 5, 1) if not isfile(net1Filename) else NetworkReader.readFrom(
         net1Filename)
 network2 = buildNetwork(
     1, 5, 1) if not isfile(net2Filename) else NetworkReader.readFrom(
         net2Filename)
 if not isfile(net1Filename):
     NetworkWriter.writeToFile(network1, net1Filename)
 if not isfile(net2Filename):
     NetworkWriter.writeToFile(network2, net2Filename)
 trainingSets1 = SupervisedDataSet(1, 1)
 trainingSets2 = SupervisedDataSet(1, 1)
 [
     trainingSets1.addSample(trainingSetsA[i], a(trainingSetsA[i]))
     for i in range(len(trainingSetsA))
 ]
 [
     trainingSets2.addSample(trainingSetsB[i], b(trainingSetsB[i]))
     for i in range(len(trainingSetsB))
 ]
 trainer1 = BackpropTrainer(network1, trainingSets1, learningrate=0.1)
 trainer2 = BackpropTrainer(network2, trainingSets2, learningrate=0.1)
 trainer1.trainUntilConvergence()
 trainer2.trainUntilConvergence()
 trainOutputA = []
 trainOutputB = []
示例#17
0
import time
from graph import Graph

# Configuration
N = 6
GRAPHS = 10000
HIDDENLAYERS = 6
LEARNINGRATE = 0.2
MOMENTUM = 0.3
MINUTES = 3 * 60
ITERATIONS = 999999999
MAX_ERROR = 0.000001
WEIGHTDECAY = 0.0

# Data
trainingData = SupervisedDataSet(N * N, N * N)
trainingDataInput = []
trainingDataOutput = []

# Prepare training data
print('Generating data...')
allGraphs = []
for i in range(GRAPHS):
    # Prepare graph
    graph = Graph(N)
    graph.compute_tsp()
    allGraphs.append(graph)

    # Create input
    sample_input = ()
    for j in range(N):
示例#18
0
saida = np.genfromtxt("DataSets/irisdataset.data", delimiter=",", usecols=(4))
'''
Iris-setosa = 0
iris-versicolor = 1
iris-virginica = 2
'''

entrada_treino = np.concatenate(
    (entrada[:35], entrada[50:85], entrada[100:135]))
saida_treino = np.concatenate((saida[:35], saida[50:85], saida[100:135]))

entrada_teste = np.concatenate(
    (entrada[35:50], entrada[85:100], entrada[135:]))
saida_teste = np.concatenate((saida[35:50], saida[85:100], saida[135:]))

treinamento = SupervisedDataSet(4, 1)
for i in range(len(entrada_treino)):
    treinamento.addSample(entrada_treino[i], saida_treino[i])
# print(len(treinamento))
# print(treinamento.indim)
# print(treinamento.outdim)

# Construindo rede
rede = buildNetwork(treinamento.indim, 2, treinamento.outdim, bias=True)
trainer = BackpropTrainer(rede, treinamento, learningrate=0.03, momentum=0.3)

# Treinando a rede
for epoch in range(1000):
    trainer.train()

# Testando a rede
示例#19
0
  (X, T) = getTimeEmbeddedMatrix(sequence, numLags, predictionStep,
                                 useTimeOfDay, useDayOfWeek)

  random.seed(6)
  net = initializeTDNNnet(nDimInput=X.shape[1],
                         nDimOutput=1, numNeurons=200)

  predictedInput = np.zeros((len(sequence),))
  targetInput = np.zeros((len(sequence),))
  trueData = np.zeros((len(sequence),))
  for i in xrange(nTrain, len(sequence)-predictionStep):
    Y = net.activate(X[i])

    if i % 336 == 0 and i > numLags:
      ds = SupervisedDataSet(X.shape[1], 1)
      for i in xrange(i-nTrain, i):
        ds.addSample(X[i], T[i])
      trainer = BackpropTrainer(net, dataset=ds, verbose=1)
      trainer.trainEpochs(30)

    predictedInput[i] = Y[-1]
    targetInput[i] = sequence['data'][i+predictionStep]
    trueData[i] = sequence['data'][i]
    print "Iteration {} target input {:2.2f} predicted Input {:2.2f} ".format(
      i, targetInput[i], predictedInput[i])

  predictedInput = (predictedInput * stdSeq) + meanSeq
  targetInput = (targetInput * stdSeq) + meanSeq
  trueData = (trueData * stdSeq) + meanSeq
from pybrain.supervised import BackpropTrainer

inputs = np.genfromtxt('ModifiedIrisDataset.txt',
                       delimiter=',',
                       usecols=(0, 1, 2, 3))
outputs = np.genfromtxt('ModifiedIrisDataset.txt', delimiter=',', usecols=(4))

training_inputs = np.concatenate((inputs[:35], inputs[50:85], inputs[100:135]))
training_outputs = np.concatenate(
    (outputs[:35], outputs[50:85], outputs[100:135]))

testing_inputs = np.concatenate((inputs[35:50], inputs[85:100], inputs[135:]))
testing_outputs = np.concatenate(
    (outputs[35:50], outputs[85:100], outputs[135:]))

training_dataset = SupervisedDataSet(4, 1)
for input_sample, output_sample in zip(training_inputs, training_outputs):
    training_dataset.addSample(input_sample, output_sample)

network = buildNetwork(training_dataset.indim,
                       2,
                       training_dataset.outdim,
                       bias=True)
trainer = BackpropTrainer(network,
                          training_dataset,
                          learningrate=0.01,
                          momentum=0.3)

trainer.trainEpochs(10000)

test = SupervisedDataSet(4, 1)
示例#21
0
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer

dataset = SupervisedDataSet(2, 1)

# Adiciona a tabela XOR
dataset.addSample([0, 0], [0])
dataset.addSample([0, 1], [1])
dataset.addSample([1, 0], [1])
dataset.addSample([1, 1], [0])

# dimensões de entrada e saida, argumento 2 é a quantidade de camadas intermediárias
network = buildNetwork(dataset.indim, 4, dataset.outdim, bias=True)

trainer = BackpropTrainer(network, dataset, learningrate=0.01, momentum=0.99)

trainer.trainEpochs(1000)

test_data = SupervisedDataSet(2, 1)

test_data.addSample([0, 0], [0])
test_data.addSample([0, 1], [1])
test_data.addSample([1, 0], [1])
test_data.addSample([1, 1], [0])

trainer.testOnData(test_data, verbose=True)
示例#22
0
def initRNA():
    global d
    global erroDpc
    q = np.array([[Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, Q10]])
    InterL=InterLayer.get()
    LRate=LearnR.get()
    beta=Momentum.get()
    Iter=IterMax.get()
    erro=Tol.get()
    intervaloA=intervaloInicial.get()
    intervaloB=intervaloFinal.get()
    print q
    print b
    print f
    print InterL
    print LRate
    print beta
    print erro
    print Iter
    print intervaloA
    print intervaloB

    n = buildNetwork(10,InterL,1, bias=b, hiddenclass=eval(f))
    d = SupervisedDataSet(10,1)
    getRandomSample()
    trainer = BackpropTrainer(n, d, learningrate = LRate, momentum=beta)
    tol_max = erro
    
    n._setParameters(np.random.uniform(intervaloA,intervaloB,n.params.shape[0]))
    
    ####para que o gráfico plote de forma única
    erroDpc = [] #limpa os dados a cada plot
    plt.clf() #limpa o plot
    plt.ion() # plota de forma interativa

    iter_t = 0
    while Iter>0:
        erro = trainer.train()
        erroDpc.append(erro)
        Iter -= 1
        print 'geração:',iter_t,' | erro: ', erro
        if erro<=tol_max:
            break
        iter_t += 1
        plt.plot(erroDpc, c='r') 
        plt.xlabel('Epoca')
        plt.ylabel('Erro')
        plt.title('Decaimento do erro')
        plt.pause(0.002)

    r = n.activate(q[0,:])
    print" Chance real", MSP(q)
    real.set(MSP(q)*100)
    print"predito", 
    predict.set(r*100)

    if (r*100>80):
        Perigo = StringVar()
        Perigo.set("PERIGO")
        labelPerigo = Label(app, textvariable=Perigo,bg = "red", font = "Helvetica 13 bold")
        labelPerigo.place(x = 665, y = 650)

    if (r*100>50 and r*100<80):
        Alerta = StringVar()
        Alerta.set("ALERTA")
        labelAlerta = Label(app, textvariable=Alerta,bg = "orange", font = "Helvetica 13 bold")
        labelAlerta.place(x = 665, y = 650)

    if (r*100>30 and r*100<50):
        Atencao = StringVar()
        Atencao.set("ATENÇÃO")
        labelAtencao = Label(app, textvariable=Atencao,bg = "yellow", font = "Helvetica 13 bold")
        labelAtencao.place(x = 665, y = 650)

    if (r*100>0 and r*100<30):
        SemRisco = StringVar()
        SemRisco.set("SEM DE RISCO")
        labelSemRisco = Label(app, textvariable=SemRisco,bg = "green", font = "Helvetica 13 bold")
        labelSemRisco.place(x = 665, y = 650)
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

neuralNetwork = buildNetwork(2, 3, 1, bias=True)

dataset = SupervisedDataSet(2, 1)

# XOR
#dataset.addSample((0, 0), (0,))
#dataset.addSample((0, 1), (1,))
#dataset.addSample((1, 0), (1,))
#dataset.addSample((1, 1), (0,))

# AND
dataset.addSample((0, 0), (0, ))
dataset.addSample((0, 1), (0, ))
dataset.addSample((1, 0), (0, ))
dataset.addSample((1, 1), (1, ))

trainer = BackpropTrainer(neuralNetwork,
                          dataset=dataset,
                          learningrate=0.01,
                          momentum=0.06)

for i in range(1, 30000):
    error = trainer.train()

    if i % 1000 == 0:
        print("Error in iteration ", i, " is: ", error)
        print(neuralNetwork.activate([0, 0]))
# -*- coding: utf-8 -*-

from pybrain.supervised.trainers import BackpropTrainer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure import TanhLayer
from pybrain.structure import LinearLayer
from pybrain.structure import SigmoidLayer
from pybrain.datasets import SupervisedDataSet
import matplotlib.pyplot as plt
from Tkinter import *
import numpy as np

n = buildNetwork(10, 15, 1, bias=True, hiddenclass=TanhLayer)
d = SupervisedDataSet(10, 1)

X = np.random.randint(0, 2, (200, 10)).astype(np.float64)


def MSP(X):
    weight = np.array([
        2. / 10.5, 2. / 10.5, 2. / 10.5, 1. / 10.5, 1. / 10.5, 0.5 / 10.5,
        0.5 / 10.5, 0.5 / 10.5, 0.5 / 10.5, 0.5 / 10.5
    ])
    return np.dot(X, weight)


y = MSP(X)

for i in xrange(0, X.shape[0]):
    d.addSample(X[i, :], y[i])
示例#25
0
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
import cPickle as pickle
import numpy as np
import sys

data = SupervisedDataSet(3, 1)

data.addSample([0, 0, 0], [0])
data.addSample([0, 0, 1], [1])
data.addSample([0, 1, 0], [1])
data.addSample([0, 1, 1], [0])
data.addSample([1, 0, 0], [1])
data.addSample([1, 0, 1], [0])
data.addSample([1, 1, 0], [0])
data.addSample([1, 1, 1], [1])

if sys.argv[1] == "train":

    print(sys.argv[2])

    net1 = buildNetwork(data.indim, 3, data.outdim)
    trainer1 = BackpropTrainer(net1, dataset=data, verbose=True)
    for i in xrange(int(sys.argv[2])):

        trainer1.trainEpochs(1)
        print('\tValue after %d epochs: %.2f' %
              (i, net1.activate([sys.argv[3], sys.argv[4], sys.argv[5]])[0]))
    pickle.dump(net1, open('testNetwork.dump', 'w'))
示例#26
0
from pybrain.tools.shortcuts import buildNetwork
from PIL import Image
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import RPropMinusTrainer
import numpy as np
import os

files = os.listdir("img/")
files1 = os.listdir("test_img/")
ds = SupervisedDataSet(200 * 60 * 3, 1)

for i in range(len(files)):
    img = Image.open("img/" + files[i])
    data = np.array(img)
    data = data.reshape(-1)
    ds.addSample((data), (files[i][0:6]))

img1 = Image.open("test_img/152830.png")

data1 = np.array(img1)
data1 = data1.reshape(-1)

net = buildNetwork(200 * 60 * 3, 1)

trainer = RPropMinusTrainer(net)
trainer.setData(ds)

trainer.trainEpochs(100)


def calculation(a, b):
示例#27
0
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer, SigmoidLayer

# Criando rede neural
# ---Parametros: (-Camadas de entrada, -Camadas olcutas, Camadas de saida)
# ---Outros Parametros: outclass = SoftmaxLayer, hiddenclass = SigmoidLayer, bias = False
network = buildNetwork(2, 3, 1)

print('Camada de Entrada: ', network['in'])
print('Camada Oculta: ', network['hidden0'])
print('Camada de Saida: ', network['out'])
print('BIAS:', network['bias'])

# Criando base de dados
base = SupervisedDataSet(2, 1)
base.addSample((0, 0), (0, ))
base.addSample((0, 1), (1, ))
base.addSample((1, 0), (1, ))
base.addSample((1, 1), (0, ))

# Treinando
trainamento = BackpropTrainer(network,
                              dataset=base,
                              learningrate=0.01,
                              momentum=0.06)
# Epocas
for i in range(30000):
    erro = trainamento.train()
    if i % 1000 == 0:
        print("Erro: %s" % erro)
示例#28
0
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 07 10:05:57 2015

@author: razkevich
"""


def totuple(a):
    try:
        return tuple(totuple(i) for i in a)
    except TypeError:
        return a


import pybrain
from pybrain.tools.shortcuts import buildNetwork

from pybrain.supervised.trainers import BackpropTrainer

from pybrain.datasets import SupervisedDataSet
net = buildNetwork(X_train.shape[1], 3, 1, bias=True)
ds = SupervisedDataSet(X_train.shape[1], 1)

for i in range(1, len(X_train.toarray())):
    ds.addSample(totuple(X_train[i].toarray()[0]), Y_train.iloc[i] == 3)
    print i
trainer = BackpropTrainer(net, ds)
trainer.trainUntilConvergence()
示例#29
0
#the four sets are given separately
set_lengths = ((0, 12), (12, 24), (24, 36), (36, 48), (48, 60))

enh_array = np.empty((1, 5))  #np.zeros((38,5))

n_hidden = [5, 10, 15, 20, 30, 50]  #to become a list
n_Epochs = [15, 50, 100, 200, 300, 400, 600]  #
#print mb2
#print mb2.head
error_storage = []

#----------
# build the datasets
#----------

ds2 = SupervisedDataSet(5, 3)

#Data normalization
# a custom built function normalizes along the column. The columns are then put back together
#Illumination DCW nitrate Lutein n-Flowrate
enh_array = np.column_stack(
    (standardizer(beta_Val[:, 0]), standardizer(beta_Val[:, 1]),
     standardizer(beta_Val[:, 2]), standardizer(beta_Val[:, 3]),
     standardizer(beta_Val[:, 4])))

#dataset with flow

for j in set_lengths:
    temp_data = enh_array[j[0]:j[1]]
    for k in xrange(len(temp_data) - 1):
        ds2.addSample(
示例#30
0
    def test_data(self):
        test_data = SupervisedDataSet(len(lib.entrada[0]), len(lib.saida[0]))
        for i in range(lib.training, (lib.validation + lib.testing)):
            self.dataset.addSample(lib.entrada[i], lib.saida[i])

        print('Testing: %d' % lib.testing)