def reloadPCAParams(self):
    print ".......................... RELOAD PARAMS ................................"
    # load the setClassAverages
    self.setClassAverages = io.fetchYaml('pca_scores.yaml')

    # load the pca_params
    self.pca_params = io.fetchYaml('pca_params.yaml')

    # clear serial buffer
    timeI = time.time()
    while time.time() - timeI < 3.5:
      raw_data = self.ser.readline()

    # graph debug file
    self.error = {'left_nod': [], 'right_nod': []}
    io.saveYaml('error_graphs_data.yaml', self.error)
def readParams():
  global lastModTime
  global params

  params = io.fetchYaml(paramsFilename)
  lastModTime = os.stat(paramsFilename).st_mtime
  print "Loaded new params: " + str(params)
    def reloadPCAParams(self):
        print ".......................... RELOAD PARAMS ................................"
        # load the setClassAverages
        self.setClassAverages = io.fetchYaml('pca_scores.yaml')

        # load the pca_params
        self.pca_params = io.fetchYaml('pca_params.yaml')

        # clear serial buffer
        timeI = time.time()
        while time.time() - timeI < 3.5:
            raw_data = self.ser.readline()

        # graph debug file
        self.error = {'left_nod': [], 'right_nod': []}
        io.saveYaml('error_graphs_data.yaml', self.error)
Esempio n. 4
0
  def __init__(self, baudrate=57600, portname="/dev/ttyACM0", numberOfSets=10, window=80):
    self.baudrate = baudrate
    self.portname = portname
    self.numberOfSets = numberOfSets
    self.sensorHistory = []
    self.window = window

    # load the setClassAverages
    self.setClassAverages = io.fetchYaml('pca_scores.yaml')

    # load the pca_params
    self.pca_params = io.fetchYaml('pca_params.yaml')

    # connect to available devices
    self.ser = io.connectToAvailablePort(baudrate=self.baudrate, portName=self.portname, debug=True)
    
    # clear serial buffer
    timeI = time.time()
    while time.time() - timeI < 3.5:
      raw_data = self.ser.readline()

    # graph debug file
    self.error = {'left_nod': [], 'right_nod': []}
    io.saveYaml('error_graphs_data.yaml', self.error)
Esempio n. 5
0
  def trainModel(self, setClasses=['left_nod', 'right_nod']):

    # ========  read in training data  ========
    trainedData = []
    trainedDataDims = []
    trainedDataDatapoints = []

    for setClassIndex in range(0, len(setClasses)):
      setClassFilename = 'training_data_' + str(setClasses[setClassIndex]) + '.yaml'
      trainedData.append(io.fetchYaml(setClassFilename))
      (d, dp) = trainedData[setClassIndex].shape
      trainedDataDims.append(d)
      trainedDataDatapoints.append(dp)

    # check input dimension consistency across training sets
    inputDims = trainedDataDims[0]
    for d in trainedDataDims:
      if inputDims != d:
        print "Number of dimensions in the training sets do not match"
        sys.exit()


    # ========  concatenate training files  ========
    pcaInput = []

    for setClassIndex in range(0, len(trainedData)):
      for colIndex in range(0, trainedDataDatapoints[setClassIndex]):
        dp = trainedData[setClassIndex][:, colIndex].tolist()
        pcaInput.append(dp)

    pcaInput = np.transpose(np.array(pcaInput))


    # ========  do PCA  ========
    coeff, score, latent = pca.princomp(pcaInput.T, self.dims)
    varience_covered = np.sum(latent[0:self.dims]) / np.sum(latent)

    startingIndex = 0
    endingIndex = 0
    setClassScores = {}

    for setClassIndex in range(0, len(setClasses)):
      startingIndex = endingIndex
      endingIndex = endingIndex + trainedDataDatapoints[setClassIndex]
      setClassScores[setClasses[setClassIndex]] = score[:, startingIndex:endingIndex-1]

    pca_params ={}
    pca_params['coeff'] = coeff
    pca_params['latent'] = latent
    pca_params['dims'] = self.dims
    pca_params['window'] = self.window
    io.saveYaml('pca_params.yaml', pca_params)


    # ========  find average of training sets  ========
    # find peaks in left score
    setClassAverages = {}
    for setClassIndex in range(0, len(setClasses)):

      setClassAverage = []
      setClassScore = setClassScores[setClasses[setClassIndex]]

      for peakNumber in range(0, self.numberOfNods):

        (d, dp) = setClassScore.shape
        peakLocation = np.argmax(np.absolute(setClassScore))%dp
        columnsInWindow = range(peakLocation-self.window/2, peakLocation+self.window/2)
        setClassAverage.append(setClassScore[:, columnsInWindow])
        setClassScore = np.delete(setClassScore, columnsInWindow, 1)
       
      # compute average
      setClassAverages[setClasses[setClassIndex]] = np.mean(np.array(setClassAverage), axis=0)

    # normalize
    for setClass in setClassAverages:
      a = setClassAverages[setClass]
      row_sums = np.absolute(a).sum(axis=1)
      setClassAverages[setClass] = a / row_sums[:, np.newaxis]

    # save scores
    io.saveYaml('pca_scores.yaml', setClassAverages)

    # plot all scores
    print "plotting"
    for setClass in setClassAverages:
      fig = plt.figure()
      ax = fig.add_subplot(111)
      ax.plot(np.transpose(setClassAverages[setClass]))
      filename = 'graph_' + str(setClass) + '.png'
      plt.savefig(filename)
Esempio n. 6
0
  def trainModel(self, setClasses=['left_nod', 'right_nod']):

    # ========  read in training data  ========
    trainedData = []
    trainedDataDims = []
    trainedDataDatapoints = []

    for setClassIndex in range(0, len(setClasses)):
      setClassFilename = 'training_data_' + str(setClasses[setClassIndex]) + '.yaml'
      trainedData.append(io.fetchYaml(setClassFilename))
      (d, dp) = trainedData[setClassIndex].shape
      trainedDataDims.append(d)
      trainedDataDatapoints.append(dp)

    # check input dimension consistency across training sets
    inputDims = trainedDataDims[0]
    for d in trainedDataDims:
      if inputDims != d:
        print "Number of dimensions in the training sets do not match"
        sys.exit()


    # ========  concatenate training files  ========
    pcaInput = []

    for setClassIndex in range(0, len(trainedData)):
      for colIndex in range(0, trainedDataDatapoints[setClassIndex]):
        dp = trainedData[setClassIndex][:, colIndex].tolist()
        pcaInput.append(dp)

    pcaInput = np.transpose(np.array(pcaInput))


    # ========  do PCA  ========
    coeff, score, latent = pca.princomp(pcaInput.T, self.dims)
    varience_covered = np.sum(latent[0:self.dims]) / np.sum(latent)

    startingIndex = 0
    endingIndex = 0
    setClassScores = {}

    for setClassIndex in range(0, len(setClasses)):
      startingIndex = endingIndex
      endingIndex = endingIndex + trainedDataDatapoints[setClassIndex]
      setClassScores[setClasses[setClassIndex]] = score[:, startingIndex:endingIndex-1]

    pca_params ={}
    pca_params['coeff'] = coeff
    pca_params['latent'] = latent
    pca_params['dims'] = self.dims
    pca_params['window'] = self.window
    io.saveYaml('pca_params.yaml', pca_params)


    # ========  find average of training sets  ========
    # find peaks in left score
    setClassAverages = {}
    for setClassIndex in range(0, len(setClasses)):

      setClassAverage = []
      setClassScore = setClassScores[setClasses[setClassIndex]]

      for peakNumber in range(0, self.numberOfNods):

        (d, dp) = setClassScore.shape
        peakLocation = np.argmax(np.absolute(setClassScore))%dp
        columnsInWindow = range(peakLocation-self.window/2, peakLocation+self.window/2)
        setClassAverage.append(setClassScore[:, columnsInWindow])
        setClassScore = np.delete(setClassScore, columnsInWindow, 1)
       
      # compute average
      setClassAverages[setClasses[setClassIndex]] = np.mean(np.array(setClassAverage), axis=0)

    # normalize
    for setClass in setClassAverages:
      a = setClassAverages[setClass]
      row_sums = np.absolute(a).sum(axis=1)
      setClassAverages[setClass] = a / row_sums[:, np.newaxis]

    # save scores
    io.saveYaml('pca_scores.yaml', setClassAverages)

    # plot all scores
    print "plotting"
    for setClass in setClassAverages:
      fig = plt.figure()
      ax = fig.add_subplot(111)
      ax.plot(np.transpose(setClassAverages[setClass]))
      filename = 'graph_' + str(setClass) + '.png'
      plt.savefig(filename)
      os.system("eog " + filename)
import acd_file_io_lib as io
import time
import numpy as np
from numpy import mean, cov, double, cumsum, dot, linalg, array, rank
import matplotlib.pyplot as plt
import os

params = io.fetchYaml('pca_scores.yaml')

fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(params['left_nod'].T)
plt.savefig('test_img.png')
os.system("eog " + 'test_img.png')
Esempio n. 8
0
import pca
import numpy as np
import sys
import acd_file_io_lib as io


# model parameters
dims = 2
numberOfNods = 10
window = 80


# ========  read in training data  ========
# read in left nods file
pcaInputLeft = io.fetchYaml('training_data_left_nod.yaml')
(inputDimsLeft, samplesLeft) = pcaInputLeft.shape

# read in right nods file
pcaInputRight = io.fetchYaml('training_data_right_nod.yaml')
(inputDimsRight, samplesRight) = pcaInputRight.shape


# check input dimension consistency across training sets
if inputDimsLeft != inputDimsRight:
  print "Number of dimensions in the training sets do not match"
  sys.exit()
else:
  inputDims = inputDimsLeft


# ========  concatenate training files  ========
import acd_file_io_lib as io
import time
import numpy as np
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank
import matplotlib.pyplot as plt
import os


params = io.fetchYaml('pca_scores.yaml')

fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(params['left_nod'].T)
plt.savefig('test_img.png')
os.system("eog " + 'test_img.png')