#1-N output encoding , N=10 trndata = ClassificationDataSet(np.shape(train)[1], 10, nb_classes=10) for i in xrange(np.shape(train)[0]): trndata.addSample(train[i], traint[i]) validata = ClassificationDataSet(np.shape(valid)[1], 10, nb_classes=10) for i in xrange(np.shape(valid)[0]): trndata.addSample(valid[i], validt[i]) testdata = ClassificationDataSet(np.shape(test)[1], 10, nb_classes=10) for i in xrange(np.shape(test)[0]): testdata.addSample(test[i], testt[i]) #Build the network if nlayers > 1: net = buildNetwork(trndata.indim, nhidden, nhiddeno, trndata.outdim, outclass=SoftmaxLayer ) else: net = buildNetwork(trndata.indim, nhidden, trndata.outdim, outclass=SoftmaxLayer ) #construct the trainer object #We can also train Bprop using pybrain using the same argumets as below: trainer = BackpropTrainer(...) trainer = RPropMinusTrainer(net, dataset=trndata, momentum=0.9, verbose=True, weightdecay=0.01, learningrate=0.1) #train and test trainer.trainUntilConvergence(maxEpochs=percent_dataset_usage*300)#,trainingData=trndata,validationData = validata) trainer.testOnData(verbose=True, dataset=testdata) print_NN_params() #remind us what architecture was tested print_time_elapsed(start) #print training time filename = 'instances/NN_' +str(percent_dataset_usage) +'perc_'+ str(nhidden) + '_' +str(nhiddeno) +'.save' save_NN_instance(filename) #save trained object to disk
networkPath='20LSTMCell/TrainUntilConv.xml' figPath='20LSTMCell/ErrorGraph' ##################### ##################### print "Training Data Length: ", len(trndata) print "Num of Training Seq: ", trndata.getNumSequences() print "Validation Data Length: ", len(tstdata) print "Num of Validation Seq: ", tstdata.getNumSequences() print 'Start Training' time_start = time.time() while (tstErrorCount<100): print "********** Classification with 20LSTMCell with RP- **********" trnError=trainer.train() tstError = trainer.testOnData(dataset=tstdata) trnAccu = 100-percentError(trainer.testOnClassData(), trndata['class']) tstAccu = 100-percentError(trainer.testOnClassData(dataset=tstdata), tstdata['class']) trn_class_accu.append(trnAccu) tst_class_accu.append(tstAccu) trn_error.append(trnError) tst_error.append(tstError) np.savetxt(trnErrorPath, trn_error) np.savetxt(tstErrorPath, tst_error) np.savetxt(trnClassErrorPath, trn_class_accu) np.savetxt(tstClassErrorPath, tst_class_accu) if(oldtstError==0): oldtstError = tstError
test_set_num = 10 #int(math.floor(len_pList*0.15)) epochs = 35 hiddenNodes = 8 print "======== Settings ========" print "input_interval: %d, input_vector_size: %d, data_set: %d, test_set_num: %d, epochs: %d" % (interval, inputSize, len_pList, test_set_num, epochs, ) limit = len_pList-test_set_num ds = createDataset3(pList[0:int(limit)], limit,inputSize,1) #net = buildNetwork(1,6,1,bias=True,recurrent=True) #trainer = BackpropTrainer(net,ds,batchlearning=False,lrdecay=0.0,momentum=0.0,learningrate=0.01) net = buildNetwork(inputSize, hiddenNodes, 1, bias=True) trainer = RPropMinusTrainer(net, verbose=True,) #trainer = BackpropTrainer(net,ds,batchlearning=False,lrdecay=0.0,momentum=0.0,learningrate=0.01, verbose=True) trainer.trainOnDataset(ds,epochs) trainer.testOnData(verbose=True) i = len_pList-test_set_num last_value = normalize(pList[i-2][1]) last_last_value = normalize(pList[i-1][1]) out_data = [] print "======== Testing ========" for i in range(len_pList-test_set_num+1, len_pList): value = denormalize(net.activate([last_last_value, last_value])) out_datum = (i, pList[i][1], value) out_data.append(out_datum) print "Index: %d Actual: %f Prediction: %f" % out_datum last_value = normalize(value) last_last_value = last_value