def validate(self, reader): validateFiles = reader.testdata triggers = reader.classtestTriggers onsets = reader.classtestOnsets finOnsets = reader.classtestFinOnsets [validateData, validateLabels] = utilities.dataLoadFromEDF(self, validateFiles, triggers, onsets, finOnsets, self.params) numChannels = validateData.shape[1] numSamples = validateData.shape[0] tmpVec = utilities.get2DFeatures(np.squeeze(self.trainData[0, :, :]), self.params) if (np.ndim(tmpVec) == 3): fVecs = np.zeros((numSamples, tmpVec.shape[0], tmpVec.shape[1], tmpVec.shape[2])) if (np.ndim(tmpVec) == 2): fVecs = np.zeros((numSamples, tmpVec.shape[0], tmpVec.shape[1])) result = np.zeros((numSamples)) for k in range(numSamples): #print('Validate Sample ', k, ' of ', numSamples) curFvec = utilities.get2DFeatures( np.squeeze(validateData[k, :, :]), self.params) result[k] = self.trainResult.model.testModel(curFvec) classRates, confMat = utilities.calcStats(validateLabels, result) #print('Class Rates:\n', classRates) #print('Confusion Matrix: \n', confMat) return classRates, confMat
def validate(self, reader): validateFiles = reader.testdata triggers = reader.classtestTriggers onsets = reader.classtestOnsets finOnsets = reader.classtestFinOnsets [validateData, validateLabels] = utilities.dataLoadFromEDF(self, validateFiles, triggers, onsets, finOnsets, self.params) fVecs = self.trainResult.cspOp.transform(validateData) #Norm! for i in range(fVecs.shape[0]): fVecs[i, :] = (fVecs[i, :] - self.trainResult.mean) / self.trainResult.std fTransformed = fVecs if (not self.params.finalClassifier is None): fTransformed = utilities.applyClassifier( self.params.finalClassifier, self.trainResult.finalOp, fTransformed) result = np.zeros((fTransformed.shape[0])) for i, fvec in enumerate(fTransformed): if not (self.params.distanceFun is None): result[i] = self.params.distanceFun( self.trainResult.trainTransformedVecs, self.trainResult.trainLabels, fvec, self.params) else: result[i] = fTransformed[i] classRates, confMat = utilities.calcStats(validateLabels, result) #print('Class Rates:\n', classRates) #print('Confusion Matrix: \n', confMat) return classRates, confMat
def validate(self, reader): validateFiles = reader.testdata triggers = reader.classtestTriggers onsets = reader.classtestOnsets finOnsets = reader.classtestFinOnsets [validateData, validateLabels] = utilities.dataLoadFromEDF(self, validateFiles, triggers, onsets, finOnsets, self.params) numChannels = validateData.shape[1] numSamples = validateData.shape[0] sumFvecLen = 0 for ch in range(numChannels): if not (self.params.fDiaps is None): self.params.lowFreq = self.params.fDiaps[ch][0] self.params.highFreq = self.params.fDiaps[ch][1] tmpVec = utilities.get1DFeatures( np.squeeze(self.trainData[0, ch, :]), self.params) sumFvecLen = sumFvecLen + len(tmpVec) fVecs = np.zeros((numSamples, sumFvecLen)) for k in range(numSamples): #print('Validate Sample ', k, ' of ', numSamples) curFvec = np.zeros((0)) for i in range(numChannels): if not (self.params.fDiaps is None): self.params.lowFreq = self.params.fDiaps[i][0] self.params.highFreq = self.params.fDiaps[i][1] chVec = utilities.get1DFeatures( np.squeeze(validateData[k, i, :]), self.params) curFvec = np.concatenate((curFvec, chVec), axis=0) fVecs[k, :] = curFvec #Norm! for i in range(fVecs.shape[0]): fVecs[i, :] = (fVecs[i, :] - self.trainResult.mean) / self.trainResult.std fTransformed = fVecs if self.params.usePCA: fTransformed = self.trainResult.pcaOp.transform(fVecs) if (not self.params.finalClassifier is None): fTransformed = utilities.applyClassifier( self.params.finalClassifier, self.trainResult.finalOp, fTransformed) result = np.zeros((fTransformed.shape[0])) for i, fvec in enumerate(fTransformed): if not (self.params.distanceFun is None): result[i] = self.params.distanceFun( self.trainResult.trainTransformedVecs, self.trainResult.trainLabels, fvec, self.params) else: result[i] = fTransformed[i] classRates, confMat = utilities.calcStats(validateLabels, result) #print('Class Rates:\n', classRates) #print('Confusion Matrix: \n', confMat) return classRates, confMat
def validate(self, reader): validateFiles = reader.testdata triggers = reader.classtestTriggers onsets = reader.classtestOnsets finOnsets = reader.classtestFinOnsets [validateData, validateLabels] = utilities.dataLoadFromEDF(self, validateFiles, triggers, onsets, finOnsets, self.params) numChannels = validateData.shape[1] numSamples = validateData.shape[0] tmpVec = utilities.get1DFeatures(np.squeeze(validateData[0, 0, :]), self.params) fvecLen = len(tmpVec) fVecs = np.zeros((numSamples, numChannels, fvecLen)) for k in range(numSamples): #print("Fvec: " + str(k) + " of " + str(numSamples)) for i in range(numChannels): fVecs[k, i, :] = utilities.get1DFeatures( np.squeeze(validateData[k, i, :]), self.params) #Norm! fTransformed = np.zeros( (fVecs.shape[0], fVecs.shape[1] * fVecs.shape[2])) if (self.params.usePCA): fTransformed = np.zeros( (fVecs.shape[0], fVecs.shape[1] * self.params.numPC)) for i in range(numChannels): chanVecs = np.squeeze(fVecs[:, i, :]) for k in range(chanVecs.shape[0]): chanVecs[k, :] = (chanVecs[k, :] - self.trainResult.mean[i] ) / self.trainResult.std[i] if (self.params.usePCA): fTransformed[:, i * self.params.numPC:(i + 1) * self.params.numPC] = self.trainResult.pcaOp[ i].transform(chanVecs) else: fTransformed[:, i * fVecs.shape[2]:(i + 1) * fVecs.shape[2]] = chanVecs if (not self.params.finalClassifier is None): fTransformed = utilities.applyClassifier( self.params.finalClassifier, self.trainResult.finalOp, fTransformed) result = np.zeros((fTransformed.shape[0])) for i, fvec in enumerate(fTransformed): if not (self.params.distanceFun is None): result[i] = self.params.distanceFun( self.trainResult.trainTransformedVecs, self.trainResult.trainLabels, fvec, self.params) else: result[i] = fTransformed[i] classRates, confMat = utilities.calcStats(validateLabels, result) #print('Class Rates:\n', classRates) #print('Confusion Matrix: \n', confMat) return classRates, confMat
def validate(self, reader): validateFiles = reader.testdata triggers = reader.classtestTriggers onsets = reader.classtestOnsets finOnsets = reader.classtestFinOnsets [validateData, validateLabels] = utilities.dataLoadFromEDF(self, validateFiles, triggers, onsets, finOnsets, self.params) numChannels = validateData.shape[1] numSamples = validateData.shape[0] fvecLen = len(self.params.channelSelect) fVecs = np.zeros((numSamples, fvecLen)) for k in range(numSamples): #print("Fvec: " + str(k) + " of " + str(numSamples)) for i in range(fvecLen): sample = np.squeeze(validateData[k, i, :]) if not (self.params.fDiaps is None): self.params.lowFreq = self.params.fDiaps[i][0] self.params.highFreq = self.params.fDiaps[i][1] specVal = np.mean(utilities.get1DFeatures(sample, self.params)) fVecs[k, i] = specVal #Norm! for i in range(fVecs.shape[0]): fVecs[i, :] = (fVecs[i, :] - self.trainResult.mean) / self.trainResult.std fTransformed = fVecs if self.params.usePCA: fTransformed = self.trainResult.pcaOp.transform(fVecs) if (not self.params.finalClassifier is None): fTransformed = utilities.applyClassifier( self.params.finalClassifier, self.trainResult.finalOp, fTransformed) result = np.zeros((fTransformed.shape[0])) for i, fvec in enumerate(fTransformed): if not (self.params.distanceFun is None): result[i] = self.params.distanceFun( self.trainResult.trainTransformedVecs, self.trainResult.trainLabels, fvec, self.params) else: result[i] = fTransformed[i] classRates, confMat = utilities.calcStats(validateLabels, result) #print('Class Rates:\n', classRates) #print('Confusion Matrix: \n', confMat) return classRates, confMat
def validate(self,validateFiles,triggers,onsets,finOnsets): [validateData, validateLabels] = utilities.dataLoadFromEDF(self, validateFiles, triggers, onsets,finOnsets,self.params) fVecs = np.zeros((validateLabels.shape[0], self.numCSP * 2 * len(self.trainResult.cspOp))) for i in range(self.labels.shape[0]): fvec = [] for j in range(len(self.trainResult.cspOp)): fvec = np.concatenate((fvec, self.trainResult.cspOp[j].process(validateData[i, :]))) fVecs[i, :] = fvec #Norm! for i in range(fVecs.shape[0]): fVecs[i, :] = (fVecs[i, :] - self.trainResult.mean) / self.trainResult.std fTransformed = fVecs if (not self.params.finalClassifier is None): fTransformed = utilities.applyClassifier(self.params.finalClassifier, self.trainResult.finalOp, fTransformed) result = np.zeros((fTransformed.shape[0])) for i, fvec in enumerate(fTransformed): if not (self.params.distanceFun is None): result[i] = self.params.distanceFun(self.trainResult.trainTransformedVecs, self.trainResult.trainLabels, fvec, self.params) else: result[i] = fTransformed[i] classRates, confMat = utilities.calcStats(validateLabels,result) print('Class Rates:\n', classRates) print('Confusion Matrix: \n', confMat)
def validate(self, reader): validateFiles = reader.testdata triggers = reader.classtestTriggers onsets = reader.classtestOnsets finOnsets = reader.classtestFinOnsets [validateData, validateLabels] = utilities.dataLoadFromEDF(self, validateFiles, triggers, onsets, finOnsets, self.params) numChannels = validateData.shape[1] numSamples = validateData.shape[0] if self.mode == 'IndepChan': tmpVec = utilities.get1DFeatures(np.squeeze(validateData[0, 0, :]), self.params) sumFvecLen = len(tmpVec) if self.mode == 'CustomFreqs': sumFvecLen = len(self.params.channelSelect) if self.mode == 'Classic': sumFvecLen = 0 for i in range(numChannels): if not (self.params.fDiaps is None): self.params.lowFreq = self.params.fDiaps[i][0] self.params.highFreq = self.params.fDiaps[i][1] tmpVec = utilities.get1DFeatures(np.squeeze(validateData[0, i, :]), self.params) sumFvecLen = sumFvecLen + len(tmpVec) fVecs = np.zeros((numSamples, numChannels, sumFvecLen)) for k in range(numSamples): #cust classic = 1 curFvec = np.zeros((0)) # print("Fvec: " + str(k) + " of " + str(numSamples)) for i in range(numChannels): if not (self.params.fDiaps is None): self.params.lowFreq = self.params.fDiaps[i][0] self.params.highFreq = self.params.fDiaps[i][1] tmpVec = utilities.get1DFeatures(np.squeeze(self.trainData[k, i, :]), self.params) if self.mode == 'IndepChan': fVecs[k, i, :] = tmpVec #"""КАК НАСЧЕТ ТОГО, ЧТОБЫ СОБЛЮДАТЬ РАЗМЕРНОСТЬ FVECS?""" [k, i, :] if self.mode == 'CustomFreqs': specVal = np.mean(tmpVec) # !!!!!!! was not meaned while sumfveclen was counted! fVecs[k, i, 0] = specVal #[k, i] if self.mode == 'Classic': curFvec = np.concatenate((curFvec, tmpVec), axis=0) if self.mode == 'Classic': fVecs[k, i, :] = curFvec #[k, :] # Norm! if self.mode == 'Classic' or self.mode == 'CustomFreqs': self.trainResult.mean = np.mean(fVecs, 0) self.trainResult.std = np.std(fVecs, 0) for i in range(fVecs.shape[0]): fVecs[i, :, :] = (fVecs[i, :, :] - self.trainResult.mean) / self.trainResult.std fTransformed = fVecs # PCA if self.params.usePCA: fTransformed = self.trainResult.pcaOp.transform(fVecs) #Norm! if self.mode == 'IndepChan': fTransformed = np.zeros((fVecs.shape[0], fVecs.shape[1] * fVecs.shape[2])) # PCA if (self.params.usePCA): fTransformed = np.zeros((fVecs.shape[0], fVecs.shape[1] * self.params.numPC)) for i in range(numChannels): chanVecs = np.squeeze(fVecs[:, i, :]) for k in range(chanVecs.shape[0]): chanVecs[k, :] = (chanVecs[k, :] - self.trainResult.mean[i]) / self.trainResult.std[i] if (self.params.usePCA): fTransformed[:, i * self.params.numPC:(i + 1) * self.params.numPC] = self.trainResult.pcaOp[ i].transform(chanVecs) else: fTransformed[:, i * fVecs.shape[2]:(i + 1) * fVecs.shape[2]] = chanVecs if (not self.params.finalClassifier is None): fTransformed = utilities.applyClassifier(self.params.finalClassifier, self.trainResult.finalOp, fTransformed) result = np.zeros((fTransformed.shape[0])) for i, fvec in enumerate(fTransformed): if not (self.params.distanceFun is None): result[i] = self.params.distanceFun(self.trainResult.trainTransformedVecs, self.trainResult.trainLabels, fvec, self.params) else: result[i] = fTransformed[i] classRates, confMat = utilities.calcStats(validateLabels, result) # print('Class Rates:\n', classRates) # print('Confusion Matrix: \n', confMat) return classRates, confMat