Ejemplo n.º 1
0
    def visualizeAllAcc(self, correlated=False):
        '''
    Visualizes all datastreams of the Accelerometer
    Arguments:
      correlated: set correlated to True if you want to autocorrelate the data before graphing.
    '''
        time = np.linspace(
            0, self.data.shape[0], self.data.shape[0]
        )  # from 0 to 10 seconds with [amount of datapoints] steps

        f, (ax4, ax5, ax6) = plt.subplots(3, sharex=True, sharey=True)

        ax4.plot(time, self.data.accX)
        ax4.set_title("accX", y=0.65, size='smaller')
        ax5.plot(time, self.data.accY)
        ax5.set_title("accY", y=0.65, size='smaller')
        ax6.plot(time, self.data.accZ)
        ax6.set_title("accZ", y=0.65, size='smaller')

        if correlated:
            analyzer = DataAnalyzer.DataAnalyzer(self.data.accX)
            ax4.plot(time, analyzer.getAutocorrelation())
            analyzer = DataAnalyzer.DataAnalyzer(self.data.accY)
            ax5.plot(time, analyzer.getAutocorrelation())
            analyzer = DataAnalyzer.DataAnalyzer(self.data.accZ)
            ax6.plot(time, analyzer.getAutocorrelation())

        # Fine-tune figure; make subplots close to each other and hide x ticks for
        # all but bottom plot.
        f.subplots_adjust(hspace=0)
        plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)

        plt.show()
Ejemplo n.º 2
0
def classify(classiFile):
    global previousFile
    if (previousFile == "none"):
        previousFile = classiFile
        FolderWatch.FolderWatch(CLASSIFY_FOLDER, classify)
    else:

        #print("classifying: " + previousFile)

        dataFile = pd.read_csv(previousFile, header=0)

        try:
            print("trying...")
            dataFile = analyzer.normalize(dataFile)
            dataFile = analyzer.autoCorrelate(dataFile)
            #is already being autocorrelated by getBPM
            autoanalyzer = DataAnalyzer.AutoAnalyzer(dataFile)
            output = autoanalyzer.getLastPeakTime()
            detectedBPM = output['bpm']
            time = output['time']

            row = []
            for secondData in training_data:
                row.append(analyzer.DTWSimilarity(dataFile, secondData))

            print("Classify: \t", str(model.predict([row])), ", BPM: ",
                  str(detectedBPM), ", time: ", str(time))
        except:
            print('raising exception')
            pass

        os.remove(previousFile)
        previousFile = classiFile
        FolderWatch.FolderWatch(CLASSIFY_FOLDER, classify)
Ejemplo n.º 3
0
def classify(classiFile):
  global previousFile
  if (previousFile == "none"):
    previousFile = classiFile
    FolderWatch.FolderWatch(CLASSIFY_FOLDER, classify)
  else:
    
    #print("classifying: " + previousFile)
    try:
      dataFile = pd.read_csv(previousFile, header=0)
    except:
      print("[EXCEPTION] cant read previous file")
      pass

    try:
      print("Classificating...")
      dataFile = analyzer.normalize(dataFile)
      dataFile = analyzer.autoCorrelate(dataFile)
      #is already being autocorrelated by getBPM
      autoanalyzer = DataAnalyzer.AutoAnalyzer(dataFile)
      output = autoanalyzer.getLastPeakTime() #calculates where the first occuring peak is in the file
      detectedBPM = output['bpm']
      time = output['time']
      peakIndex = output['index'] #index number of the peak in the file
      endPeakIndex = output['endPeriodIndex']

      periodData = autoanalyzer.getPeriodsFromDataIndex(1, peakIndex)['data'] #get one period of data starting from peak index.

      row = []
      for secondData in training_data: # calculate the DTW Similarity between the sample and all items in the training dataset.
        row.append(analyzer.DTWSimilarity(periodData, secondData)) 

      gesture = model.predict([row])[0] #predict the gesture with KNN using the calculated distance matrix

      # if BPM is larger than 200, you know for sure it isn't a movement. Temporary 'knutseloplossing'
      if detectedBPM > 200:
        gesture = "rest"

      print("Classify: \t", str(gesture), ", BPM: ", str(detectedBPM), ", time: ", str(time))  
      socket.sendClassify(str(gesture), detectedBPM, time) #send back the classified gesture through the socket.
      dataFile[peakIndex:endPeakIndex].to_csv(CLASSIFY_SAVE_FOLDER + str(gesture) + "-" + str(detectedBPM) + "-" + str(time) + ".csv")

    except:
      print('[EXCEPTION] during classification')
      pass

    try:
      os.remove(previousFile)
    except:
      print("[EXCEPTION] cant delete previous file")
      pass
    previousFile = classiFile
    FolderWatch.FolderWatch(CLASSIFY_FOLDER, classify)
Ejemplo n.º 4
0
def classify(classiFile):
    global previousFile
    if (previousFile == "none"):
        previousFile = classiFile
        FolderWatch.FolderWatch(CLASSIFY_FOLDER, classify)
    else:

        #print("classifying: " + previousFile)
        try:
            dataFile = pd.read_csv(previousFile, header=0)
        except:
            print("[EXCEPTION] cant read previous file")
            pass

        try:
            print("Classificating...")
            dataFile = analyzer.normalize(dataFile)
            dataFile = analyzer.autoCorrelate(dataFile)
            #is already being autocorrelated by getBPM
            autoanalyzer = DataAnalyzer.AutoAnalyzer(dataFile)
            output = autoanalyzer.getLastPeakTime()
            detectedBPM = output['bpm']
            time = output['time']

            periodData = autoanalyzer.getPeriods(1, startIndexPeriod=1)['data']

            row = []
            for secondData in training_data:
                row.append(analyzer.DTWSimilarity(periodData, secondData))

            gesture = model.predict([row])[0]

            #knutseloplossing
            if detectedBPM > 200:
                gesture = "rest"

            print("Classify: \t", str(gesture), ", BPM: ", str(detectedBPM),
                  ", time: ", str(time))

            socket.sendClassify(str(gesture), detectedBPM, time)
            print("tried to send...")

        except:
            print('[EXCEPTION] during classification')
            pass

        try:
            os.remove(previousFile)
        except:
            print("[EXCEPTION] cant delete previous file")
            pass
        previousFile = classiFile
        FolderWatch.FolderWatch(CLASSIFY_FOLDER, classify)
Ejemplo n.º 5
0
    def visualizeStream(self,
                        dataStream,
                        correlated=False,
                        title='Data',
                        vLine=None):
        '''Visualizes a particular datastream
      Arguments
        data: single datastream
        title: [optional]
    '''

        time = np.linspace(0, dataStream.shape[0], dataStream.shape[0])
        f, ax1 = plt.subplots(1)
        ax1.plot(time, dataStream)
        ax1.set_title(title)
        if correlated:
            analyzer = DataAnalyzer.DataAnalyzer(dataStream)
            ax1.plot(time, analyzer.getAutocorrelation())

        if (vLine):
            plt.axvline(vLine)
        plt.show()
Ejemplo n.º 6
0
# Helper functions:


# get all the data files from the directory
def getDataFileNames(dataType, movement="", dataFolder=DATA_FOLDER):
    files = os.listdir(dataFolder)
    output = []
    for file in files:
        if dataType in file and movement in file:
            output.append(file)
    return output


# ------------------- MAIN ------------------------------------

analyzer = DataAnalyzer.DataAnalyzer()

print("Doing " + str(ITERATIONS) + " iterations with a " +
      str(TEST_SIZE_PERCENT) + " test ratio")

# -- training --

data_data = []
data_labels = []
data_data_length = []

files = getDataFileNames("training")
for trainingFile in files:
    dataFile = pd.read_csv(DATA_FOLDER + trainingFile, header=0)
    #data = [dataFile['alpha'], dataFile['beta'], dataFile['gamma'], dataFile['accX'], dataFile['accY'], dataFile['accZ']]
    dataFile = analyzer.normalize(dataFile)
Ejemplo n.º 7
0

# get all the data files from the directory
def getDataFileNames(dataType, movement = "", dataFolder = DATA_FOLDER):
  files = os.listdir(dataFolder);
  output = []
  for file in files:
    if dataType in file and movement in file:
      output.append(file)
  return output



# ------------------- MAIN ------------------------------------

analyzer = DataAnalyzer.DataAnalyzer()
print("Connecting to socket..")
socket = Socket.Connection()


# -- training --
print("[TRAINING]")

training_data = []
training_labels = []
training_data_length = []

files = getDataFileNames("", dataFolder=SEQUENCE_FOLDER)
for trainingFile in files:
  dataFile = pd.read_csv(SEQUENCE_FOLDER + trainingFile, header = 0)
Ejemplo n.º 8
0
from helpers import DataAnalyzer

import matplotlib.pyplot as plt
import numpy as np

from sklearn.preprocessing import normalize as skNorm

#dataFile = pd.read_csv("../../data/testing-leftright-qsMbpdsd6zQTqlrKAADi-1-72BPM.csv", header=0)
#dataFile = pd.read_csv("../../data/training-rotateclockwise-avkfxrmpauHdDpeaAAAa-6.csv", header=0)
#dataFile = pd.read_csv("../../data/training-updown-avkfxrmpauHdDpeaAAAa-1.csv", header=0)
#normal data

dataFile = pd.read_csv(
    "../data/FLAWED/lessthan1period/updown-35-49-R53P95G3cV93UMlKAAB0-3_1.csv",
    header=0)
da = DataAnalyzer.DataAnalyzer()

#Normalize each stream individually:
'''
dataNorm = dataFile.copy()
dataNorm['accX'] = skNorm(dataNorm['accX'])[0]
dataNorm['accY'] = skNorm(dataNorm['accY'])[0]
dataNorm['accZ'] = skNorm(dataNorm['accZ'])[0]
'''

#dataFile = da.normalize(dataFile)
#dataFile = da.autoCorrelate(dataFile)

daa = DataAnalyzer.AutoAnalyzer(dataFile)

visualizer = Visualizer.Visualizer(dataFile)
Ejemplo n.º 9
0
import pandas as pd
from helpers import Visualizer
from helpers import DataAnalyzer

#dataFile = pd.read_csv("../../data/testing-leftright-qsMbpdsd6zQTqlrKAADi-1-72BPM.csv", header=0)
#dataFile = pd.read_csv("../../data/training-rotateclockwise-avkfxrmpauHdDpeaAAAa-6.csv", header=0)
#dataFile = pd.read_csv("../../data/training-updown-avkfxrmpauHdDpeaAAAa-1.csv", header=0)
#normal data

dataFile = pd.read_csv(
    "../data/forvisualize/training-w-WvUVD6yGptP0OmsZAABi-3.csv", header=0)
da = DataAnalyzer.DataAnalyzer()

#dataFile = da.normalize(dataFile)
#dataFile = da.autoCorrelate(dataFile)

das = DataAnalyzer.StreamDataAnalyzer(
    dataFile['accZ'])  #<---- PLAY WITH THIS PARAMETER Z, X, Y
#output = das.getFFTData()

daa = DataAnalyzer.AutoAnalyzer(dataFile)
'''
output = daa.getLastPeakTime(visualize=True, periods=4)
peakTime = output['index']
graphData = das.getPeriods(4, startIndex=peakTime)['data']
'''

visualizer = Visualizer.Visualizer(dataFile)
visualizer.visualizeAll(correlated=False)

dataFile2 = da.standardize(dataFile)
Ejemplo n.º 10
0
import scipy.stats as stats
from sklearn.preprocessing import normalize as skNorm
from scipy import signal

samplingRate = 1000 / 50

dataFile = pd.read_csv(
    "../../data/good-backup-10seconds/testing-leftright-JUxdyRarf6RVZv0WAABN-7.csv",
    header=0)
#dataFile2 = pd.read_csv("../../data/training-leftright-avkfxrmpauHdDpeaAAAa-3.csv", header=0)

dataAlpha = dataFile['alpha'].values
dataBeta = dataFile['beta'].values
dataGamma = dataFile['gamma'].values

analyzer = DataAnalyzer.AutoAnalyzer(dataFile)
visualizer = Visualizer.Visualizer(dataFile)

res = analyzer.getLastPeakTime(visualize=True)

print(res['time'])
print(res['bpm'])
'''

length = int(samplingRate / (bpm[0]/60))
startIndex = length * 2  # <-- Start extracting the peak from the 2nd period
rates = np.array([70,80,90,100,110,120,130,140])/60 #BPMs to test

piece = dataFile['accZ'][startIndex: startIndex+length*2]

peak = signal.find_peaks_cwt(piece, samplingRate/rates/2)