def formatTrainingSet(self, setClass):

    # recording params
    Ifilename = "training_data_" + setClass + ".txt"
    Ofilename = "training_data_" + setClass + ".yaml"


    # open, read and process sets
    try:
      Ifile = open(Ifilename,'r')
    except IOError:
      print("Data File does not exist. Please run Training Program Before Running")
      raise IOError

    arr = []
    fullarr = []

    for line in Ifile:
      val = line.split(',')
      arr = []
      for i in val:
        arr.append(float(i))
      fullarr.append(arr)


    test = numpy.array(fullarr, dtype ="float")
    test = numpy.transpose(test)
    io.saveYaml(Ofilename, test)
    Ifile.close()
    print("File successfully formatted")
  def formatTrainingSet(self, setClass):

    # recording params
    Ifilename = "training_data_" + setClass + ".txt"
    Ofilename = "training_data_" + setClass + ".yaml"


    # open, read and process sets
    try:
      Ifile = open(Ifilename,'r')
    except IOError:
      print("Data File does not exist. Please run Training Program Before Running")
      raise IOError

    arr = []
    fullarr = []

    for line in Ifile:
      val = line.split(',')
      arr = []
      for i in val:
        arr.append(float(i))
      fullarr.append(arr)


    test = numpy.array(fullarr, dtype ="float")
    test = numpy.transpose(test)
    io.saveYaml(Ofilename, test)
    Ifile.close()
    print("File successfully formatted")
  def reloadPCAParams(self):
    print ".......................... RELOAD PARAMS ................................"
    # load the setClassAverages
    self.setClassAverages = io.fetchYaml('pca_scores.yaml')

    # load the pca_params
    self.pca_params = io.fetchYaml('pca_params.yaml')

    # clear serial buffer
    timeI = time.time()
    while time.time() - timeI < 3.5:
      raw_data = self.ser.readline()

    # graph debug file
    self.error = {'left_nod': [], 'right_nod': []}
    io.saveYaml('error_graphs_data.yaml', self.error)
    def reloadPCAParams(self):
        print ".......................... RELOAD PARAMS ................................"
        # load the setClassAverages
        self.setClassAverages = io.fetchYaml('pca_scores.yaml')

        # load the pca_params
        self.pca_params = io.fetchYaml('pca_params.yaml')

        # clear serial buffer
        timeI = time.time()
        while time.time() - timeI < 3.5:
            raw_data = self.ser.readline()

        # graph debug file
        self.error = {'left_nod': [], 'right_nod': []}
        io.saveYaml('error_graphs_data.yaml', self.error)
Example #5
0
  def __init__(self, baudrate=57600, portname="/dev/ttyACM0", numberOfSets=10, window=80):
    self.baudrate = baudrate
    self.portname = portname
    self.numberOfSets = numberOfSets
    self.sensorHistory = []
    self.window = window

    # load the setClassAverages
    self.setClassAverages = io.fetchYaml('pca_scores.yaml')

    # load the pca_params
    self.pca_params = io.fetchYaml('pca_params.yaml')

    # connect to available devices
    self.ser = io.connectToAvailablePort(baudrate=self.baudrate, portName=self.portname, debug=True)
    
    # clear serial buffer
    timeI = time.time()
    while time.time() - timeI < 3.5:
      raw_data = self.ser.readline()

    # graph debug file
    self.error = {'left_nod': [], 'right_nod': []}
    io.saveYaml('error_graphs_data.yaml', self.error)
  def trainModel(self, setClasses=['left_nod', 'right_nod']):

    # ========  read in training data  ========
    trainedData = []
    trainedDataDims = []
    trainedDataDatapoints = []

    for setClassIndex in range(0, len(setClasses)):
      setClassFilename = 'training_data_' + str(setClasses[setClassIndex]) + '.yaml'
      trainedData.append(io.fetchYaml(setClassFilename))
      (d, dp) = trainedData[setClassIndex].shape
      trainedDataDims.append(d)
      trainedDataDatapoints.append(dp)

    # check input dimension consistency across training sets
    inputDims = trainedDataDims[0]
    for d in trainedDataDims:
      if inputDims != d:
        print "Number of dimensions in the training sets do not match"
        sys.exit()


    # ========  concatenate training files  ========
    pcaInput = []

    for setClassIndex in range(0, len(trainedData)):
      for colIndex in range(0, trainedDataDatapoints[setClassIndex]):
        dp = trainedData[setClassIndex][:, colIndex].tolist()
        pcaInput.append(dp)

    pcaInput = np.transpose(np.array(pcaInput))


    # ========  do PCA  ========
    coeff, score, latent = pca.princomp(pcaInput.T, self.dims)
    varience_covered = np.sum(latent[0:self.dims]) / np.sum(latent)

    startingIndex = 0
    endingIndex = 0
    setClassScores = {}

    for setClassIndex in range(0, len(setClasses)):
      startingIndex = endingIndex
      endingIndex = endingIndex + trainedDataDatapoints[setClassIndex]
      setClassScores[setClasses[setClassIndex]] = score[:, startingIndex:endingIndex-1]

    pca_params ={}
    pca_params['coeff'] = coeff
    pca_params['latent'] = latent
    pca_params['dims'] = self.dims
    pca_params['window'] = self.window
    io.saveYaml('pca_params.yaml', pca_params)


    # ========  find average of training sets  ========
    # find peaks in left score
    setClassAverages = {}
    for setClassIndex in range(0, len(setClasses)):

      setClassAverage = []
      setClassScore = setClassScores[setClasses[setClassIndex]]

      for peakNumber in range(0, self.numberOfNods):

        (d, dp) = setClassScore.shape
        peakLocation = np.argmax(np.absolute(setClassScore))%dp
        columnsInWindow = range(peakLocation-self.window/2, peakLocation+self.window/2)
        setClassAverage.append(setClassScore[:, columnsInWindow])
        setClassScore = np.delete(setClassScore, columnsInWindow, 1)
       
      # compute average
      setClassAverages[setClasses[setClassIndex]] = np.mean(np.array(setClassAverage), axis=0)

    # normalize
    for setClass in setClassAverages:
      a = setClassAverages[setClass]
      row_sums = np.absolute(a).sum(axis=1)
      setClassAverages[setClass] = a / row_sums[:, np.newaxis]

    # save scores
    io.saveYaml('pca_scores.yaml', setClassAverages)

    # plot all scores
    print "plotting"
    for setClass in setClassAverages:
      fig = plt.figure()
      ax = fig.add_subplot(111)
      ax.plot(np.transpose(setClassAverages[setClass]))
      filename = 'graph_' + str(setClass) + '.png'
      plt.savefig(filename)
Example #7
0
  def trainModel(self, setClasses=['left_nod', 'right_nod']):

    # ========  read in training data  ========
    trainedData = []
    trainedDataDims = []
    trainedDataDatapoints = []

    for setClassIndex in range(0, len(setClasses)):
      setClassFilename = 'training_data_' + str(setClasses[setClassIndex]) + '.yaml'
      trainedData.append(io.fetchYaml(setClassFilename))
      (d, dp) = trainedData[setClassIndex].shape
      trainedDataDims.append(d)
      trainedDataDatapoints.append(dp)

    # check input dimension consistency across training sets
    inputDims = trainedDataDims[0]
    for d in trainedDataDims:
      if inputDims != d:
        print "Number of dimensions in the training sets do not match"
        sys.exit()


    # ========  concatenate training files  ========
    pcaInput = []

    for setClassIndex in range(0, len(trainedData)):
      for colIndex in range(0, trainedDataDatapoints[setClassIndex]):
        dp = trainedData[setClassIndex][:, colIndex].tolist()
        pcaInput.append(dp)

    pcaInput = np.transpose(np.array(pcaInput))


    # ========  do PCA  ========
    coeff, score, latent = pca.princomp(pcaInput.T, self.dims)
    varience_covered = np.sum(latent[0:self.dims]) / np.sum(latent)

    startingIndex = 0
    endingIndex = 0
    setClassScores = {}

    for setClassIndex in range(0, len(setClasses)):
      startingIndex = endingIndex
      endingIndex = endingIndex + trainedDataDatapoints[setClassIndex]
      setClassScores[setClasses[setClassIndex]] = score[:, startingIndex:endingIndex-1]

    pca_params ={}
    pca_params['coeff'] = coeff
    pca_params['latent'] = latent
    pca_params['dims'] = self.dims
    pca_params['window'] = self.window
    io.saveYaml('pca_params.yaml', pca_params)


    # ========  find average of training sets  ========
    # find peaks in left score
    setClassAverages = {}
    for setClassIndex in range(0, len(setClasses)):

      setClassAverage = []
      setClassScore = setClassScores[setClasses[setClassIndex]]

      for peakNumber in range(0, self.numberOfNods):

        (d, dp) = setClassScore.shape
        peakLocation = np.argmax(np.absolute(setClassScore))%dp
        columnsInWindow = range(peakLocation-self.window/2, peakLocation+self.window/2)
        setClassAverage.append(setClassScore[:, columnsInWindow])
        setClassScore = np.delete(setClassScore, columnsInWindow, 1)
       
      # compute average
      setClassAverages[setClasses[setClassIndex]] = np.mean(np.array(setClassAverage), axis=0)

    # normalize
    for setClass in setClassAverages:
      a = setClassAverages[setClass]
      row_sums = np.absolute(a).sum(axis=1)
      setClassAverages[setClass] = a / row_sums[:, np.newaxis]

    # save scores
    io.saveYaml('pca_scores.yaml', setClassAverages)

    # plot all scores
    print "plotting"
    for setClass in setClassAverages:
      fig = plt.figure()
      ax = fig.add_subplot(111)
      ax.plot(np.transpose(setClassAverages[setClass]))
      filename = 'graph_' + str(setClass) + '.png'
      plt.savefig(filename)
      os.system("eog " + filename)
Example #8
0
for s in range(0, samplesRight):
  sample = pcaInputRight[:, s].tolist()
  concatenatedPcaInput.append(sample)

pcaInput = np.transpose(np.array(concatenatedPcaInput))


# ========  do PCA  ========
coeff, score, latent = pca.princomp(pcaInput.T, dims)
print np.mean(latent)

leftScore = score[:, 0:samplesLeft]
rightScore = score[:, samplesLeft:samplesLeft+samplesRight]

io.saveYaml('pcaTrainingCoeff.yaml', coeff)


# ========  find average of training sets  ========
# find peaks in left score
leftScoreAverage = []
for peakNumber in range(0, numberOfNods):
  peakLocation = np.argmax(np.absolute(leftScore))%samplesLeft
  columnsToDelete = range(peakLocation-window/2, peakLocation+window/2)
  leftScoreAverage.append(leftScore[:, columnsToDelete])
  leftScore = np.delete(leftScore, columnsToDelete, 1)

leftScore = np.mean(np.array(leftScoreAverage), axis=0)

# save left score
io.saveYaml('trained_left_nod_score.yaml', leftScore)