コード例 #1
0
def main():
    inputs = ReadCSV('./data/input.csv')
    outputs = ReadCSV('./data/output.csv')
    ids = make_list(outputs)
    x = []
    y = {}

    for i in range(3, 24, 3):
        x.append(i)

    for hidden_layers in range(
            1, 7):  #change loop for plot also if you change here
        print 'Number of hidden layers', hidden_layers
        y[hidden_layers] = []
        for i in x:
            print 'Number of training tuples', i
            y[hidden_layers].append(
                find_accuracy(i, ids, hidden_layers, inputs, outputs))
    #print x
    #print y
    #Now plot the graph
    for i in range(1, 7):
        t = pylab.plot(x,
                       y[i],
                       label="Number of hidden layers " + str(i),
                       linewidth=4,
                       linestyle='-')
    t = pylab.xlabel("Number of training tuples")
    t = pylab.ylabel("Accuracy %")
    t = pylab.title(
        "Random Subset Sampling (Number of training samples vs Accuracy)")
    t = pylab.legend(loc='lower right')
    t = pylab.grid()
    pylab.show()
コード例 #2
0
def main():
	inputs = ReadCSV('./data/input.csv')
	outputs = ReadCSV('./data/output.csv')
	
	test_set = test.keys()
	train_set = []
	for k in inputs.keys():
		if k not in test_set:
			train_set.append(k)
	print "Number of training samples", len(train_set)
	print "Number of testing samples", len(test_set)
			
	net = buildNetwork(178, 6, 5)
	ds=SupervisedDataSet(178,5)
	for id in train_set:
		ds.addSample(inputs[id],outputs[id])

	trainer = BackpropTrainer(net, ds, learningrate=0.001, momentum = 0.001)

	trainer.trainUntilConvergence(maxEpochs=1000, validationProportion = 0.5)
	
	
	for id in test_set:
		predicted = net.activate(inputs[id])
		actual = outputs[id]
		print '-----------------------------'
		print test[id]
		print '-----------------------------'
		print 'Trait\t\tPredicted\tActual\tError'
		for i in range(0,5):
			error = abs(predicted[i] - actual[i])*100/4.0
			print traits[i], '\t', predicted[i], '\t', actual[i], '\t', error,"%" 
コード例 #3
0
def main():
    inputs = ReadCSV('./data/input.csv')
    outputs = ReadCSV('./data/output.csv')
    ids = make_list(outputs)
    x = []
    y = {}

    for i in range(2, 11, 2):
        x.append(i)

    for hidden_layers in range(
            1, 6):  #change loop for plot also if you change here
        print 'Number of hidden layers', hidden_layers
        y[hidden_layers] = []
        for i in x:
            print 'Number of folds', i
            y[hidden_layers].append(
                find_accuracy(i, ids, hidden_layers, inputs, outputs))
    #print x
    #print y
    #Now plot the graph
    for i in range(1, 6):
        t = pylab.plot(x,
                       y[i],
                       label="Number of hidden layers " + str(i),
                       linewidth=4,
                       linestyle='-')
    t = pylab.xlabel("Number of folds")
    t = pylab.ylabel("Accuracy %")
    t = pylab.title("K-Fold cross validation (Number of folds vs Accuracy)")
    t = pylab.legend(loc='lower right')
    t = pylab.grid()
    pylab.show()
def main():
	inputs = ReadCSV('./data/input.csv')
	outputs = ReadCSV('./data/output.csv')
	ids = make_list(outputs)
	
	net = constructNet(ids, inputs, outputs)
	
	pickle.dump(net, open('neuralNet.sl', 'w'))
コード例 #5
0
ファイル: test_ui.py プロジェクト: Sharkuu/PitE-Olaf-Schab
    def connect(self):
        if (self.csv_loaded == False):
            self.plotWidget.clear()
            line = 1
            rfile = ReadCSV.read("test.csv")
            for row in rfile:
                if (line == 1):
                    line = line + 1
                    continue
                self.upload_data.append(row)

            Fixer.fixTimeAndAlt(self.upload_data)
            buff = Buffer()
            if os.path.exists('data.txt'):
                os.remove('data.txt')
            for i in self.upload_data:
                buff.sendToBuffer(i)
                buff.sendData()
            self.data = (DataLoader.read('data.txt'))
            self.csv_loaded = True
            self.infocsv.setText(_fromUtf8("Zaladowano"))
            self.info.setText("")
            self.wys = self.kat = self.dyst = self.pred = self.odchyl = 0
コード例 #6
0
ファイル: test_ui.py プロジェクト: Sharkuu/PitE-Olaf-Schab
    def connect(self):
        if (self.csv_loaded == False):
            self.plotWidget.clear()
            line = 1
            rfile = ReadCSV.read("test.csv")
            for row in rfile:
			    if(line == 1):
				    line = line+1
				    continue
			    self.upload_data.append(row)

            Fixer.fixTimeAndAlt(self.upload_data)
            buff = Buffer()
            if os.path.exists('data.txt'):
                 os.remove('data.txt')
            for i in self.upload_data:
	            buff.sendToBuffer(i)
	            buff.sendData()
            self.data = (DataLoader.read('data.txt'))
            self.csv_loaded = True
	    self.infocsv.setText(_fromUtf8("Zaladowano"))
            self.info.setText("")
            self.wys = self.kat = self.dyst = self.pred = self.odchyl = 0
コード例 #7
0
def model_train(Save = False, modelname = None):
    
    X1_left  = ReadCSV('datasets_csv/feature_0630_1700/X1_offline_0630_1700.csv','float64')
    X1_midle = ReadCSV('datasets_csv/feature_0701_1100/X1_offline_0701_1100.csv','float64')
    X1_right = ReadCSV('datasets_csv/feature_0701_1400/X1_offline_0701_1400.csv','float64')
    Y1 = ReadCSV('datasets_csv/feature_0701_1100/Y1_offline_0701_1100.csv','float64')    
    X1 = np.c_[X1_left,X1_midle,X1_right]
#     X1 = np.c_[X1_left]
    
    X1 = list(X1)
    Y1 = list(Y1)
    
    X3_left  = ReadCSV('datasets_csv/feature_0630_1700/X3_offline_0630_1700.csv','float64')
    X3_midle = ReadCSV('datasets_csv/feature_0701_1100/X3_offline_0701_1100.csv','float64')
    X3_right = ReadCSV('datasets_csv/feature_0701_1400/X3_offline_0701_1400.csv','float64')
    Y3 = ReadCSV('datasets_csv/feature_0701_1100/Y3_offline_0701_1100.csv','float64')
      
    X3 = np.c_[X3_left,X3_midle,X3_right]
#     X3 = np.c_[X3_left]
     
    X3 = list(X3)
    Y3 = list(Y3)
    X1.extend(X3)
    Y1.extend(Y3)
    del X3,Y3
      
    X4_left  = ReadCSV('datasets_csv/feature_0630_1700/X4_offline_0630_1700.csv','float64')
    X4_midle = ReadCSV('datasets_csv/feature_0701_1100/X4_offline_0701_1100.csv','float64')
    X4_right = ReadCSV('datasets_csv/feature_0701_1400/X4_offline_0701_1400.csv','float64')
    Y4 = ReadCSV('datasets_csv/feature_0701_1100/Y4_offline_0701_1100.csv','float64')
        
    X4 = np.c_[X4_left,X4_midle,X4_right]
#     X4 = np.c_[X4_left]
      
    X4 = list(X4)
    Y4 = list(Y4)
    X1.extend(X4)
    Y1.extend(Y4)
    del X4,Y4
     
    X5_left  = ReadCSV('datasets_csv/feature_0630_1700/X5_offline_0630_1700.csv','float64')
    X5_midle = ReadCSV('datasets_csv/feature_0701_1100/X5_offline_0701_1100.csv','float64')
    X5_right = ReadCSV('datasets_csv/feature_0701_1400/X5_offline_0701_1400.csv','float64')
    Y5 = ReadCSV('datasets_csv/feature_0701_1100/Y5_offline_0701_1100.csv','float64')
       
    X5 = np.c_[X5_left,X5_midle,X5_right]
    X5 = list(X5)
    Y5 = list(Y5)
    X1.extend(X5)
    Y1.extend(Y5)
    del X5,Y5
      
    X6_left  = ReadCSV('datasets_csv/feature_0630_1700/X6_offline_0630_1700.csv','float64')
    X6_midle = ReadCSV('datasets_csv/feature_0701_1100/X6_offline_0701_1100.csv','float64')
    X6_right = ReadCSV('datasets_csv/feature_0701_1400/X6_offline_0701_1400.csv','float64')
    Y6 = ReadCSV('datasets_csv/feature_0701_1100/Y6_offline_0701_1100.csv','float64')
       
    X6 = np.c_[X6_left,X6_midle,X6_right]
    X6 = list(X6)
    Y6 = list(Y6)
    X1.extend(X6)
    Y1.extend(Y6)
    del X6,Y6
      
    X7_left  = ReadCSV('datasets_csv/feature_0630_1700/X7_offline_0630_1700.csv','float64')
    X7_midle = ReadCSV('datasets_csv/feature_0701_1100/X7_offline_0701_1100.csv','float64')
    X7_right = ReadCSV('datasets_csv/feature_0701_1400/X7_offline_0701_1400.csv','float64')
    Y7 = ReadCSV('datasets_csv/feature_0701_1100/Y7_offline_0701_1100.csv','float64')
       
    X7 = np.c_[X7_left,X7_midle,X7_right]
    X7 = list(X7)
    Y7 = list(Y7)
    X1.extend(X7)
    Y1.extend(Y7)
    del X7,Y7
      
    X8_left  = ReadCSV('datasets_csv/feature_0630_1700/X8_offline_0630_1700.csv','float64')
    X8_midle = ReadCSV('datasets_csv/feature_0701_1100/X8_offline_0701_1100.csv','float64')
    X8_right = ReadCSV('datasets_csv/feature_0701_1400/X8_offline_0701_1400.csv','float64')
    Y8 = ReadCSV('datasets_csv/feature_0701_1100/Y8_offline_0701_1100.csv','float64')
       
    X8 = np.c_[X8_left,X8_midle,X8_right]
    X8 = list(X8)
    Y8 = list(Y8)
    X1.extend(X8)
    Y1.extend(Y8)    
    del X8,Y8 
     
     
     
    X1 = np.array(X1)
    Y1 = np.array(Y1)
    
    X1,Y1 = DowmSample(X1,Y1,1)
    
    model = RandomForestClassifier(n_estimators=100,random_state=1)
#     model = GradientBoostingClassifier(n_estimators=100,max_leaf_nodes=5, subsample=0.8, random_state=1)
#     model = LogisticRegression('l2')
#     model = RVC()
    
    y0 = []
    for y in Y1:
        y0.append(y[0])
    y1 = np.array(y0)
#     y1 = y0.reshape(1,len(y0))
    
    print 'size of X1',np.shape(X1)
    print 'size of y1',np.shape(y1)
    model.fit(X1, y1.ravel())
    model.fit(X1, y1)
    
    # 保存模型
    if Save == True:
        f = open(modelname,'w')
        pickle.dump(model, f)
        f.close()
    
    print '\n -------------- Training is over ----------------------'    
    return model
コード例 #8
0
import sys
sys.path.append('./lib')

from ReadCSV import ReadCSV

print ReadCSV("./data/input.csv")
コード例 #9
0
from ReadCSV import ReadCSV

rs = ReadCSV(
    '/home/dgrfi/MEGA/supersymmetry/7TeVxyz.csv').read_matrix_in_dataframe()
コード例 #10
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.structure import TanhLayer
from pybrain.supervised.trainers import BackpropTrainer
import pickle

import sys

sys.path.append('./lib')

from ReadCSV import ReadCSV

new_input = ReadCSV("newinput.csv")
net = pickle.load(open("neuralNet.sl", "r"))

input = new_input[new_input.keys()[0]]
traits = net.activate(input)

text = ""
for trait in traits:
    text = text + str(trait) + " "

print text
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.structure import TanhLayer
from pybrain.supervised.trainers import BackpropTrainer
import pickle

import sys

sys.path.append('./lib')

from ReadCSV import ReadCSV

new_input = ReadCSV("newinput.csv")
net = pickle.load(open("neuralNet.sl", "r"))

input = new_input[new_input.keys()[0]]
traits = net.activate(input)

text = ""
for trait in traits:
	text = text + str(trait) + " "

print text
コード例 #12
0
 def createSample(self):
     sum_w8 = self.w.sum_weights(self.w.weights)
     #print(sum_w8)
     rd = ReadCSV()
     sample = rd.create_sampleList(self.sample_limit, self.db, sum_w8)
     return sample
コード例 #13
0
ファイル: app.py プロジェクト: Sharkuu/PitE-Olaf-Schab
from ReadCSV import ReadCSV
from Buffer import Buffer
from DataLoader import DataLoader
from Fixer import Fixer
from Plots import Plots
import os

if os.path.exists('data.txt'):
    os.remove('data.txt')
upload_data = []
data = []
line = 1
rfile = ReadCSV.read('test.csv')
for row in rfile:
			if(line == 1):
				line = line+1
				continue
			upload_data.append(row)

Fixer.fixTimeAndAlt(upload_data)
buff = Buffer()
for i in upload_data:
	buff.sendToBuffer(i)
	buff.sendData()

data = (DataLoader.read('data.txt'))
plots = Plots()
plots.altitude(data)
plots.pitch(data)
plots.distance(data)
plots.speed(data)
コード例 #14
0
def model_train(Save = False, modelname = None):
    
    X1_left  = ReadCSV('datasets_csv/feature_0630_1700/X1_offline_0630_1700.csv','float64')
    X1_midle = ReadCSV('datasets_csv/feature_0701_1100/X1_offline_0701_1100.csv','float64')
    X1_right = ReadCSV('datasets_csv/feature_0701_1400/X1_offline_0701_1400.csv','float64')
    Y1 = ReadCSV('datasets_csv/feature_0701_1100/Y1_offline_0701_1100.csv','float64')    
    X1 = np.c_[X1_left,X1_midle,X1_right]
#     X1 = np.c_[X1_left]
    
    X1 = list(X1)
    Y1 = list(Y1)

    X2_left  = ReadCSV('datasets_csv/feature_0630_1700/X2_offline_0630_1700.csv','float64')
    X2_midle = ReadCSV('datasets_csv/feature_0701_1100/X2_offline_0701_1100.csv','float64')
    X2_right = ReadCSV('datasets_csv/feature_0701_1400/X2_offline_0701_1400.csv','float64')
    Y2 = ReadCSV('datasets_csv/feature_0701_1100/Y2_offline_0701_1100.csv','float64')    
    X2 = np.c_[X2_left,X2_midle,X2_right]
#     X2 = np.c_[X2_left]
    
    X2 = list(X2)
    Y2 = list(Y2)
    X1.extend(X2)
    Y1.extend(Y2)
    del X2,Y2
    
    X3_left  = ReadCSV('datasets_csv/feature_0630_1700/X3_offline_0630_1700.csv','float64')
    X3_midle = ReadCSV('datasets_csv/feature_0701_1100/X3_offline_0701_1100.csv','float64')
    X3_right = ReadCSV('datasets_csv/feature_0701_1400/X3_offline_0701_1400.csv','float64')
    Y3 = ReadCSV('datasets_csv/feature_0701_1100/Y3_offline_0701_1100.csv','float64')
     
    X3 = np.c_[X3_left,X3_midle,X3_right]
#     X3 = np.c_[X3_left]
    
    X3 = list(X3)
    Y3 = list(Y3)
    X1.extend(X3)
    Y1.extend(Y3)
    del X3,Y3
     
    X4_left  = ReadCSV('datasets_csv/feature_0630_1700/X4_offline_0630_1700.csv','float64')
    X4_midle = ReadCSV('datasets_csv/feature_0701_1100/X4_offline_0701_1100.csv','float64')
    X4_right = ReadCSV('datasets_csv/feature_0701_1400/X4_offline_0701_1400.csv','float64')
    Y4 = ReadCSV('datasets_csv/feature_0701_1100/Y4_offline_0701_1100.csv','float64')
       
    X4 = np.c_[X4_left,X4_midle,X4_right]
#     X4 = np.c_[X4_left]
     
    X4 = list(X4)
    Y4 = list(Y4)
    X1.extend(X4)
    Y1.extend(Y4)
    del X4,Y4
    
    X5_left  = ReadCSV('datasets_csv/feature_0630_1700/X5_offline_0630_1700.csv','float64')
    X5_midle = ReadCSV('datasets_csv/feature_0701_1100/X5_offline_0701_1100.csv','float64')
    X5_right = ReadCSV('datasets_csv/feature_0701_1400/X5_offline_0701_1400.csv','float64')
    Y5 = ReadCSV('datasets_csv/feature_0701_1100/Y5_offline_0701_1100.csv','float64')
      
    X5 = np.c_[X5_left,X5_midle,X5_right]
    X5 = list(X5)
    Y5 = list(Y5)
    X1.extend(X5)
    Y1.extend(Y5)
    del X5,Y5
     
    X6_left  = ReadCSV('datasets_csv/feature_0630_1700/X6_offline_0630_1700.csv','float64')
    X6_midle = ReadCSV('datasets_csv/feature_0701_1100/X6_offline_0701_1100.csv','float64')
    X6_right = ReadCSV('datasets_csv/feature_0701_1400/X6_offline_0701_1400.csv','float64')
    Y6 = ReadCSV('datasets_csv/feature_0701_1100/Y6_offline_0701_1100.csv','float64')
      
    X6 = np.c_[X6_left,X6_midle,X6_right]
    X6 = list(X6)
    Y6 = list(Y6)
    X1.extend(X6)
    Y1.extend(Y6)
    del X6,Y6
     
    X7_left  = ReadCSV('datasets_csv/feature_0630_1700/X7_offline_0630_1700.csv','float64')
    X7_midle = ReadCSV('datasets_csv/feature_0701_1100/X7_offline_0701_1100.csv','float64')
    X7_right = ReadCSV('datasets_csv/feature_0701_1400/X7_offline_0701_1400.csv','float64')
    Y7 = ReadCSV('datasets_csv/feature_0701_1100/Y7_offline_0701_1100.csv','float64')
      
    X7 = np.c_[X7_left,X7_midle,X7_right]
    X7 = list(X7)
    Y7 = list(Y7)
    X1.extend(X7)
    Y1.extend(Y7)
    del X7,Y7
     
    X8_left  = ReadCSV('datasets_csv/feature_0630_1700/X8_offline_0630_1700.csv','float64')
    X8_midle = ReadCSV('datasets_csv/feature_0701_1100/X8_offline_0701_1100.csv','float64')
    X8_right = ReadCSV('datasets_csv/feature_0701_1400/X8_offline_0701_1400.csv','float64')
    Y8 = ReadCSV('datasets_csv/feature_0701_1100/Y8_offline_0701_1100.csv','float64')
      
    X8 = np.c_[X8_left,X8_midle,X8_right]
    X8 = list(X8)
    Y8 = list(Y8)
    X1.extend(X8)
    Y1.extend(Y8)    
    del X8,Y8 
     
     
     
    X1 = np.array(X1)
    Y1 = np.array(Y1)
    
    X1,Y1 = DowmSample(X1,Y1,8)
    
    model = GradientBoostingClassifier(n_estimators=100,max_leaf_nodes=5, learning_rate=0.1, subsample=0.8, random_state=1)
    model.fit(X1, Y1.ravel())
    
    # 保存模型
    if Save == True:
        f = open(modelname,'w')
        pickle.dump(model, f)
        f.close()
    
    print '\n ------------- Training is over ----------------------'    
    return model
コード例 #15
0
        accuracies = []
        for i in range(self.turns):
            print(" turn ", i)
            validationSet = self.create_validation_set(div_db, i)
            #print(len(validationSet))
            adaboost_set = []
            for j in range(self.turns):
                if (j != i):
                    adaboost_set += div_db[j]
            adaboost_set = [self.attr_list] + adaboost_set

            fscr, accuracy_per_turn = self.fscr_per_turn(
                adaboost_set, validationSet)
            #print(fscr)
            f1Scores.append(fscr)
            accuracies.append(accuracy_per_turn)

        #print(len(f1Scores))
        fscore = sum(f1Scores) / float(self.turns)
        acc_score = sum(accuracies) / float(self.turns)
        print("the f1 score of simulation: ", fscore)
        print("the accuracy of simulation: ", acc_score)


##################################################################################

rd = ReadCSV()
db_1 = rd.produceDB()
k = KFoldCross(db_1, 5)
#print(k.block_size)
k.validation()