示例#1
0
a.readDataSet(equalLength=False, checkData=False)

useDump = False

if useDump:
    a.loadDumpNormParam(dumpName="dataOnly")
    clf = a.loadDumpClassifier("dataOnly")
    a.testClassifier(classifier=clf)
    a.setFileSink(fileSinkName="chris", fileSinkPath="../")
    a.startLiveClassification()
else:
    a.initFeatNormalization(dumpName="dataOnly")
    from sklearn import svm
    clf = svm.SVC(kernel='rbf')
    a.trainClassifier(classifier=clf)
    a.dumpClassifier(dumpName="dataOnly")
    a.testClassifier()

windowedData, windowLabels = a.windowSplitSourceDataTT()

index = np.linspace(0, len(windowedData) - 1, len(windowedData), dtype=int)
random.shuffle(index)

trainingData = []
trainingLabels = []
testData = []
testLabels = []
for i in range(int(len(windowedData))):
    if i / len(windowedData) < 0.8:
        trainingData.append(windowedData[index[i]])
        trainingLabels.append(windowLabels[index[i]])
示例#2
0
a.readDataSet(equalLength=False, checkData=False)

useDump = False

if useDump:
    a.loadDumpNormParam(dumpName="MLPClassifier")
    clf = a.loadDumpClassifier("MLPClassifier")
    a.testClassifier(classifier=clf)
    a.setFileSink(fileSinkName="chris", fileSinkPath="../")
    a.startLiveClassification()
else:
    a.initFeatNormalization(dumpName="MLPClassifier")
    from sklearn.neural_network import MLPClassifier
    clf = MLPClassifier()
    a.trainClassifier(classifier=clf)
    a.dumpClassifier(dumpName="MLPClassifier")
    a.testClassifier()

windowedData, windowLabels = a.windowSplitSourceDataTT()

index = np.linspace(0, len(windowedData) - 1, len(windowedData), dtype=int)
random.shuffle(index)

trainingData = []
trainingLabels = []
testData = []
testLabels = []
for i in range(int(len(windowedData))):
    if i / len(windowedData) < 0.8:
        trainingData.append(windowedData[index[i]])
        trainingLabels.append(windowLabels[index[i]])
示例#3
0
               label=5)

a.addDataFiles(fileSourceName="igor.txt",
               fileSourcePath="../",
               startTime=100,
               stopTime=2900,
               label=6)
a.addDataFiles(fileSourceName="igor2.txt",
               fileSourcePath="../",
               startTime=600,
               stopTime=6000,
               label=6)

a.readDataSet(equalLength=False, checkData=False)

useDump = False

if useDump:
    a.loadDumpNormParam(dumpName="KNeighborsClassifier")
    clf = a.loadDumpClassifier("KNeighborsClassifier")
    a.testClassifier(classifier=clf)
    a.setFileSink(fileSinkName="chris", fileSinkPath="../")
    a.startLiveClassification()
else:
    a.initFeatNormalization(dumpName="KNeighborsClassifier")
    from sklearn.neighbors import KNeighborsClassifier
    clf = KNeighborsClassifier(n_neighbors=4, metric='euclidean')
    a.trainClassifier(classifier=clf)
    a.dumpClassifier(dumpName="KNeighborsClassifier")
    a.testClassifier()
示例#4
0

useDump = False

if useDump:
    a.loadDumpNormParam(dumpName="XGBClassifier")
    clf = a.loadDumpClassifier("XGBClassifier")
    a.testClassifier(classifier=clf)
    a.setFileSink(fileSinkName="chris", fileSinkPath="../")
    a.startLiveClassification()
else:
    a.initFeatNormalization(dumpName="XGBClassifier")
    from xgboost import XGBClassifier
    clf = XGBClassifier()
    a.trainClassifier(classifier=clf)
    a.dumpClassifier(dumpName="XGBClassifier")
    a.testClassifier()


windowedData, windowLabels = a.windowSplitSourceDataTT()

index = np.linspace(0, len(windowedData) - 1, len(windowedData), dtype=int)
random.shuffle(index)

trainingData = []
trainingLabels = []
testData = []
testLabels = []
for i in range(int(len(windowedData))):
    if i/len(windowedData) < 0.8:
        trainingData.append(windowedData[index[i]])
示例#5
0

useDump = False

if useDump:
    a.loadDumpNormParam(dumpName="DecisionTreeClassifier")
    clf = a.loadDumpClassifier("DecisionTreeClassifier")
    a.testClassifier(classifier=clf)
    a.setFileSink(fileSinkName="chris", fileSinkPath="../")
    a.startLiveClassification()
else:
    a.initFeatNormalization(dumpName="DecisionTreeClassifier")
    from sklearn.tree import DecisionTreeClassifier
    clf = DecisionTreeClassifier()
    a.trainClassifier(classifier=clf)
    a.dumpClassifier(dumpName="DecisionTreeClassifier")
    a.testClassifier()


windowedData, windowLabels = a.windowSplitSourceDataTT()

index = np.linspace(0, len(windowedData) - 1, len(windowedData), dtype=int)
random.shuffle(index)

trainingData = []
trainingLabels = []
testData = []
testLabels = []
for i in range(int(len(windowedData))):
    if i/len(windowedData) < 0.8:
        trainingData.append(windowedData[index[i]])
示例#6
0
a.readDataSet(equalLength=False, checkData=False)

useDump = False

if useDump:
    a.loadDumpNormParam(dumpName="GaussianNB")
    clf = a.loadDumpClassifier("GaussianNB")
    a.testClassifier(classifier=clf)
    a.setFileSink(fileSinkName="chris", fileSinkPath="../")
    a.startLiveClassification()
else:
    a.initFeatNormalization(dumpName="GaussianNB")
    from sklearn.naive_bayes import GaussianNB
    clf = GaussianNB()
    a.trainClassifier(classifier=clf)
    a.dumpClassifier(dumpName="GaussianNB")
    a.testClassifier()

windowedData, windowLabels = a.windowSplitSourceDataTT()

index = np.linspace(0, len(windowedData) - 1, len(windowedData), dtype=int)
random.shuffle(index)

trainingData = []
trainingLabels = []
testData = []
testLabels = []
for i in range(int(len(windowedData))):
    if i / len(windowedData) < 0.8:
        trainingData.append(windowedData[index[i]])
        trainingLabels.append(windowLabels[index[i]])