Beispiel #1
0
if useDump:
    a.loadDumpNormParam(dumpName="dataOnly")
    clf = a.loadDumpClassifier("dataOnly")
    a.testClassifier(classifier=clf)
    a.setFileSink(fileSinkName="chris", fileSinkPath="../")
    a.startLiveClassification()
else:
    a.initFeatNormalization(dumpName="dataOnly")
    from sklearn import svm
    clf = svm.SVC(kernel='rbf')
    a.trainClassifier(classifier=clf)
    a.dumpClassifier(dumpName="dataOnly")
    a.testClassifier()

windowedData, windowLabels = a.windowSplitSourceDataTT()

index = np.linspace(0, len(windowedData) - 1, len(windowedData), dtype=int)
random.shuffle(index)

trainingData = []
trainingLabels = []
testData = []
testLabels = []
for i in range(int(len(windowedData))):
    if i / len(windowedData) < 0.8:
        trainingData.append(windowedData[index[i]])
        trainingLabels.append(windowLabels[index[i]])
    else:
        testData.append(windowedData[index[i]])
        testLabels.append(windowLabels[index[i]])
Beispiel #2
0
               fileSourcePath="../",
               startTime=14000,
               stopTime=22000,
               label=1)

# a.addDataFiles(fileSourceName="chris_c.txt", fileSourcePath="../", startTime=100, stopTime=1600, label=1)

# a.addDataFiles(fileSourceName="ben.txt", fileSourcePath="../", startTime=2000, stopTime=6000, label=1)

# a.addDataFiles(fileSourceName="markus.txt", fileSourcePath="../", startTime=500, stopTime=3300, label=1)

# a.addDataFiles(fileSourceName="igor.txt", fileSourcePath="../", startTime=100, stopTime=2900, label=1)
# a.addDataFiles(fileSourceName="igor2.txt", fileSourcePath="../", startTime=600, stopTime=6000, label=1)

dataSet = a.readDataSet(equalLength=False, checkData=False)
windowData, windowLabels = a.windowSplitSourceDataTT(inputData=dataSet)

features = a.initFeatNormalization(inputData=windowData)
features = a.featureNormalization(features=features, initDone=True)

features = np.array(features)
windowLabels = np.array(windowLabels)

features0 = features[windowLabels == 0]
features1 = features[windowLabels == 1]

m0 = []
m1 = []
diff = []
for i in range(int(len(features[0, ::]))):
    #f = kernelDensityEstimator(x=features0[::, f0], h=0.15)
Beispiel #3
0
               startTime=0,
               stopTime=10000,
               label=1)
b.addDataFiles(fileSourceName="nowalk3.txt",
               fileSourcePath="../",
               startTime=0,
               stopTime=10000,
               label=1)

dataSet = b.readDataSet(checkData=False, equalLength=True)
# dataSet := Array with shape dataSet[i][j, k], where i refers to the i-th file loaded, k indicates the sensor and
#         j is the "time"-index.

wData, wLabels = b.windowSplitSourceDataTT(inputData=dataSet,
                                           inputLabels=np.array([
                                               0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                               1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
                                               1, 1
                                           ]))

wLabels = np.array(wLabels)

print(wLabels)

toggleI = 1
toggleA = 1

print("Number of windows, walk: ", str(np.size(wLabels[wLabels == 0])))
print("Number of windows, no walk: ", str(np.size(wLabels[wLabels == 1])))

for i in range(len(wLabels)):
    if wLabels[i] == 0:
               className="markus")

a.addDataFiles(fileSourceName="igor.txt",
               fileSourcePath="../",
               startTime=100,
               stopTime=2900,
               label=6,
               className="igor")
a.addDataFiles(fileSourceName="igor2.txt",
               fileSourcePath="../",
               startTime=600,
               stopTime=6000,
               label=6)

a.readDataSet(equalLength=False, checkData=False)
windows, labels = a.windowSplitSourceDataTT()
features = a.initFeatNormalization(windows)

# create the RFE model and select 3 attributes

features = np.array(features)
labels = np.array(labels)

model = XGBClassifier()
model = RFE(model, 100)
model.fit(features, labels)
# summarize the selection of the attributes
print(model.support_)
print(model.ranking_)
fov = a.returnFeatureIndices()
print(fov)