a.readDataSet(equalLength=False, checkData=False) useDump = False if useDump: a.loadDumpNormParam(dumpName="dataOnly") clf = a.loadDumpClassifier("dataOnly") a.testClassifier(classifier=clf) a.setFileSink(fileSinkName="chris", fileSinkPath="../") a.startLiveClassification() else: a.initFeatNormalization(dumpName="dataOnly") from sklearn import svm clf = svm.SVC(kernel='rbf') a.trainClassifier(classifier=clf) a.dumpClassifier(dumpName="dataOnly") a.testClassifier() windowedData, windowLabels = a.windowSplitSourceDataTT() index = np.linspace(0, len(windowedData) - 1, len(windowedData), dtype=int) random.shuffle(index) trainingData = [] trainingLabels = [] testData = [] testLabels = [] for i in range(int(len(windowedData))): if i / len(windowedData) < 0.8: trainingData.append(windowedData[index[i]]) trainingLabels.append(windowLabels[index[i]])
a.readDataSet(equalLength=False, checkData=False) useDump = False if useDump: a.loadDumpNormParam(dumpName="MLPClassifier") clf = a.loadDumpClassifier("MLPClassifier") a.testClassifier(classifier=clf) a.setFileSink(fileSinkName="chris", fileSinkPath="../") a.startLiveClassification() else: a.initFeatNormalization(dumpName="MLPClassifier") from sklearn.neural_network import MLPClassifier clf = MLPClassifier() a.trainClassifier(classifier=clf) a.dumpClassifier(dumpName="MLPClassifier") a.testClassifier() windowedData, windowLabels = a.windowSplitSourceDataTT() index = np.linspace(0, len(windowedData) - 1, len(windowedData), dtype=int) random.shuffle(index) trainingData = [] trainingLabels = [] testData = [] testLabels = [] for i in range(int(len(windowedData))): if i / len(windowedData) < 0.8: trainingData.append(windowedData[index[i]]) trainingLabels.append(windowLabels[index[i]])
label=5) a.addDataFiles(fileSourceName="igor.txt", fileSourcePath="../", startTime=100, stopTime=2900, label=6) a.addDataFiles(fileSourceName="igor2.txt", fileSourcePath="../", startTime=600, stopTime=6000, label=6) a.readDataSet(equalLength=False, checkData=False) useDump = False if useDump: a.loadDumpNormParam(dumpName="KNeighborsClassifier") clf = a.loadDumpClassifier("KNeighborsClassifier") a.testClassifier(classifier=clf) a.setFileSink(fileSinkName="chris", fileSinkPath="../") a.startLiveClassification() else: a.initFeatNormalization(dumpName="KNeighborsClassifier") from sklearn.neighbors import KNeighborsClassifier clf = KNeighborsClassifier(n_neighbors=4, metric='euclidean') a.trainClassifier(classifier=clf) a.dumpClassifier(dumpName="KNeighborsClassifier") a.testClassifier()
useDump = False if useDump: a.loadDumpNormParam(dumpName="XGBClassifier") clf = a.loadDumpClassifier("XGBClassifier") a.testClassifier(classifier=clf) a.setFileSink(fileSinkName="chris", fileSinkPath="../") a.startLiveClassification() else: a.initFeatNormalization(dumpName="XGBClassifier") from xgboost import XGBClassifier clf = XGBClassifier() a.trainClassifier(classifier=clf) a.dumpClassifier(dumpName="XGBClassifier") a.testClassifier() windowedData, windowLabels = a.windowSplitSourceDataTT() index = np.linspace(0, len(windowedData) - 1, len(windowedData), dtype=int) random.shuffle(index) trainingData = [] trainingLabels = [] testData = [] testLabels = [] for i in range(int(len(windowedData))): if i/len(windowedData) < 0.8: trainingData.append(windowedData[index[i]])
useDump = False if useDump: a.loadDumpNormParam(dumpName="DecisionTreeClassifier") clf = a.loadDumpClassifier("DecisionTreeClassifier") a.testClassifier(classifier=clf) a.setFileSink(fileSinkName="chris", fileSinkPath="../") a.startLiveClassification() else: a.initFeatNormalization(dumpName="DecisionTreeClassifier") from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() a.trainClassifier(classifier=clf) a.dumpClassifier(dumpName="DecisionTreeClassifier") a.testClassifier() windowedData, windowLabels = a.windowSplitSourceDataTT() index = np.linspace(0, len(windowedData) - 1, len(windowedData), dtype=int) random.shuffle(index) trainingData = [] trainingLabels = [] testData = [] testLabels = [] for i in range(int(len(windowedData))): if i/len(windowedData) < 0.8: trainingData.append(windowedData[index[i]])
a.readDataSet(equalLength=False, checkData=False) useDump = False if useDump: a.loadDumpNormParam(dumpName="GaussianNB") clf = a.loadDumpClassifier("GaussianNB") a.testClassifier(classifier=clf) a.setFileSink(fileSinkName="chris", fileSinkPath="../") a.startLiveClassification() else: a.initFeatNormalization(dumpName="GaussianNB") from sklearn.naive_bayes import GaussianNB clf = GaussianNB() a.trainClassifier(classifier=clf) a.dumpClassifier(dumpName="GaussianNB") a.testClassifier() windowedData, windowLabels = a.windowSplitSourceDataTT() index = np.linspace(0, len(windowedData) - 1, len(windowedData), dtype=int) random.shuffle(index) trainingData = [] trainingLabels = [] testData = [] testLabels = [] for i in range(int(len(windowedData))): if i / len(windowedData) < 0.8: trainingData.append(windowedData[index[i]]) trainingLabels.append(windowLabels[index[i]])