def train(self, reader, doBalanceLabels): trainFiles = reader.traindata triggers = reader.classtrainTriggers onsets = reader.classtrainOnsets finOnsets = reader.classtrainFinOnsets [self.trainData, self.labels] = utilities.dataLoadFromEDF(self, trainFiles, triggers, onsets, finOnsets, self.params) if (doBalanceLabels): [self.trainData, self.labels] = utilities.balance_labels(self.trainData, self.labels) numChannels = self.trainData.shape[1] numSamples = self.trainData.shape[0] fvecLen = len(self.params.channelSelect) fVecs = np.zeros((numSamples, fvecLen)) for k in range(numSamples): #print("Fvec: " + str(k) + " of " + str(numSamples)) for i in range(fvecLen): sample = np.squeeze(self.trainData[k, i, :]) if not (self.params.fDiaps is None): self.params.lowFreq = self.params.fDiaps[i][0] self.params.highFreq = self.params.fDiaps[i][1] specVal = np.mean(utilities.get1DFeatures( sample, self.params)) #!!!!!!! fVecs[k, i] = specVal #Shuffle! inds = np.random.permutation(fVecs.shape[0]) fVecs = fVecs[inds, :] labels = self.labels[inds] self.trainResult.mean = np.mean(fVecs, 0) self.trainResult.std = np.std(fVecs, 0) #Norm! for i in range(fVecs.shape[0]): fVecs[i, :] = (fVecs[i, :] - self.trainResult.mean) / self.trainResult.std fTransformed = fVecs # PCA! if self.params.usePCA: pcaTransform = PCA(self.params.numPC) pcaTransform.fit(fVecs) self.trainResult.pcaOp = pcaTransform fTransformed = pcaTransform.transform(fVecs) #LDA! if not (self.params.finalClassifier is None): [Op, fTransformed ] = utilities.trainClassifier(self.params.finalClassifier, fTransformed, labels) self.trainResult.finalOp = Op if not (self.params.distanceFun is None): self.trainResult.trainTransformedVecs = fTransformed self.trainResult.trainLabels = labels
def train(self,trainFiles,triggers,onsets,finOnsets, doBalanceLabels): [self.trainData, self.labels] = utilities.dataLoadFromEDF(self, trainFiles, triggers, onsets, finOnsets,self.params) self.numClasses = len(set(self.labels)) if (doBalanceLabels): [self.trainData, self.labels] = utilities.balance_labels(self.trainData, self.labels) cspModels = [] for i in range(self.numClasses): for j in range(self.numClasses): if i<j: print("Training SWCSP Model for {} vs {}".format(i,j)) classFts1 = self.trainData[self.labels == i, :, :] classFts2 = self.trainData[self.labels == j, :, :] S = [] S.append(classFts1) S.append(classFts2) cspWorker = SWCSP.SWCSP(self.params.Fs, self.numCSP) cspWorker.train(S) cspModels.append(cspWorker) fVecs = np.zeros((self.labels.shape[0],self.numCSP*2*len(cspModels))) for i in range(self.labels.shape[0]): fvec=[] for j in range(len(cspModels)): fvec = np.concatenate((fvec, cspModels[j].process(self.trainData[i,:]))) fVecs[i,:] = fvec self.trainResult.cspOp = cspModels #Shuffle! inds = np.random.permutation(fVecs.shape[0]) fVecs = fVecs[inds,:] labels = self.labels[inds] self.trainResult.mean = np.mean(fVecs,0) self.trainResult.std = np.std(fVecs,0) #Norm! for i in range(fVecs.shape[0]): fVecs[i,:] = (fVecs[i,:]-self.trainResult.mean)/self.trainResult.std fTransformed = fVecs # LDA! if not (self.params.finalClassifier is None): [Op, fTransformed] = utilities.trainClassifier(self.params.finalClassifier, fTransformed, labels) self.trainResult.finalOp = Op if not (self.params.distanceFun is None): self.trainResult.trainTransformedVecs = fTransformed self.trainResult.trainLabels = labels
def train(self, reader, doBalanceLabels): trainFiles = reader.traindata triggers = reader.classtrainTriggers onsets = reader.classtrainOnsets finOnsets = reader.classtrainFinOnsets [self.trainData, self.labels] = utilities.dataLoadFromEDF(self, trainFiles, triggers, onsets, finOnsets, self.params) self.numClasses = len(set(self.labels)) if (doBalanceLabels): [self.trainData, self.labels] = utilities.balance_labels(self.trainData, self.labels) numSamples = self.trainData.shape[0] tmpVec = utilities.get2DFeatures(np.squeeze(self.trainData[0, :, :]), self.params) if (np.ndim(tmpVec) == 3): fVecs = np.zeros((numSamples, tmpVec.shape[0], tmpVec.shape[1], tmpVec.shape[2])) if (np.ndim(tmpVec) == 2): fVecs = np.zeros((numSamples, tmpVec.shape[0], tmpVec.shape[1])) for k in range(numSamples): #print('Train Sample ', k, ' of ', numSamples) curFvec = utilities.get2DFeatures( np.squeeze(self.trainData[k, :, :]), self.params) if (np.ndim(tmpVec) == 3): fVecs[k, :, :, :] = curFvec if (np.ndim(tmpVec) == 2): fVecs[k, :, :, ] = curFvec #Shuffle! inds = np.random.permutation(fVecs.shape[0]) if (np.ndim(tmpVec) == 3): fVecs = fVecs[inds, :, :, :] if (np.ndim(tmpVec) == 2): fVecs = fVecs[inds, :, :] labels = self.labels[inds] # ConvNet! model = self.neuralFun(self.numClasses) model.trainModel(fVecs, labels, self.batchSize, self.numEpochs) self.trainResult.model = model
def train(self, reader, doBalanceLabels): trainFiles = reader.traindata triggers = reader.classtrainTriggers onsets = reader.classtrainOnsets finOnsets = reader.classtrainFinOnsets [self.trainData, self.labels] = utilities.dataLoadFromEDF(self, trainFiles, triggers, onsets, finOnsets, self.params) if (doBalanceLabels): [self.trainData, self.labels] = utilities.balance_labels(self.trainData, self.labels) csp = CSP(n_components=self.numCSP, reg=None, log=True, norm_trace=False) csp.fit(self.trainData, self.labels) fVecs = csp.transform(self.trainData) self.trainResult.cspOp = csp #Shuffle! inds = np.random.permutation(fVecs.shape[0]) fVecs = fVecs[inds, :] labels = self.labels[inds] self.trainResult.mean = np.mean(fVecs, 0) self.trainResult.std = np.std(fVecs, 0) #Norm! for i in range(fVecs.shape[0]): fVecs[i, :] = (fVecs[i, :] - self.trainResult.mean) / self.trainResult.std fTransformed = fVecs #LDA! if not (self.params.finalClassifier is None): [Op, fTransformed ] = utilities.trainClassifier(self.params.finalClassifier, fTransformed, labels) self.trainResult.finalOp = Op self.trainResult.trainTransformedVecs = fTransformed self.trainResult.trainLabels = labels
def train(self, reader, doBalanceLabels): trainFiles = reader.traindata triggers = reader.classtrainTriggers onsets = reader.classtrainOnsets finOnsets = reader.classtrainFinOnsets [self.trainData, self.labels] = utilities.dataLoadFromEDF(self, trainFiles, triggers, onsets, finOnsets, self.params) if (doBalanceLabels): [self.trainData, self.labels] = utilities.balance_labels(self.trainData, self.labels) numChannels = self.trainData.shape[1] numSamples = self.trainData.shape[0] sumFvecLen = 0 for ch in range(numChannels): if not (self.params.fDiaps is None): self.params.lowFreq = self.params.fDiaps[ch][0] self.params.highFreq = self.params.fDiaps[ch][1] tmpVec = utilities.get1DFeatures( np.squeeze(self.trainData[0, ch, :]), self.params) sumFvecLen = sumFvecLen + len(tmpVec) fVecs = np.zeros((numSamples, sumFvecLen)) for k in range(numSamples): #print('Train Sample ', k, ' of ', numSamples) curFvec = np.zeros((0)) for i in range(numChannels): if not (self.params.fDiaps is None): self.params.lowFreq = self.params.fDiaps[i][0] self.params.highFreq = self.params.fDiaps[i][1] chVec = utilities.get1DFeatures( np.squeeze(self.trainData[k, i, :]), self.params) curFvec = np.concatenate((curFvec, chVec), axis=0) fVecs[k, :] = curFvec #Shuffle! inds = np.random.permutation(fVecs.shape[0]) fVecs = fVecs[inds, :] labels = self.labels[inds] self.trainResult.mean = np.mean(fVecs, 0) self.trainResult.std = np.std(fVecs, 0) #Norm! for i in range(fVecs.shape[0]): fVecs[i, :] = (fVecs[i, :] - self.trainResult.mean) / self.trainResult.std # PCA! fTransformed = fVecs if self.params.usePCA: pcaTransform = PCA(self.params.numPC) pcaTransform.fit(fVecs) self.trainResult.pcaOp = pcaTransform fTransformed = pcaTransform.transform(fVecs) #LDA! if not (self.params.finalClassifier is None): [Op, fTransformed ] = utilities.trainClassifier(self.params.finalClassifier, fTransformed, labels) self.trainResult.finalOp = Op self.trainResult.trainLabels = labels if not (self.params.distanceFun is None): self.trainResult.trainTransformedVecs = fTransformed
def train(self, reader, doBalanceLabels): trainFiles = reader.traindata triggers = reader.classtrainTriggers onsets = reader.classtrainOnsets finOnsets = reader.classtrainFinOnsets [self.trainData, self.labels] = utilities.dataLoadFromEDF(self, trainFiles, triggers, onsets, finOnsets, self.params) if (doBalanceLabels): [self.trainData, self.labels] = utilities.balance_labels(self.trainData, self.labels) numChannels = self.trainData.shape[1] numSamples = self.trainData.shape[0] tmpVec = utilities.get1DFeatures(np.squeeze(self.trainData[0, 0, :]), self.params) fvecLen = len(tmpVec) fVecs = np.zeros((numSamples, numChannels, fvecLen)) for k in range(numSamples): #print("Fvec: " + str(k) + " of " + str(numSamples)) for i in range(numChannels): fVecs[k, i, :] = utilities.get1DFeatures( np.squeeze(self.trainData[k, i, :]), self.params) #Shuffle! inds = np.random.permutation(fVecs.shape[0]) fVecs = fVecs[inds, :, :] labels = self.labels[inds] #Norm! for i in range(fVecs.shape[1]): chanVecs = np.squeeze(fVecs[:, i, :]) self.trainResult.mean.append(np.mean(chanVecs, 0)) self.trainResult.std.append(np.std(chanVecs, 0)) for k in range(chanVecs.shape[0]): chanVecs[k, :] = (chanVecs[k, :] - self.trainResult.mean[i] ) / self.trainResult.std[i] fVecs[:, i, :] = chanVecs fTransformed = np.zeros( (fVecs.shape[0], fVecs.shape[1] * fVecs.shape[2])) if (self.params.usePCA): fTransformed = np.zeros( (fVecs.shape[0], fVecs.shape[1] * self.params.numPC)) # PCA or reshaping: for i in range(numChannels): chanVecs = np.squeeze(fVecs[:, i, :]) if (self.params.usePCA): curPcaTransform = PCA(self.params.numPC) curPcaTransform.fit(chanVecs) self.trainResult.pcaOp.append(curPcaTransform) fTransformed[:, i * self.params.numPC:(i + 1) * self.params.numPC] = curPcaTransform.transform( chanVecs) else: fTransformed[:, i * fVecs.shape[2]:(i + 1) * fVecs.shape[2]] = chanVecs #LDA! if not (self.params.finalClassifier is None): [Op, fTransformed ] = utilities.trainClassifier(self.params.finalClassifier, fTransformed, labels) self.trainResult.finalOp = Op if not (self.params.distanceFun is None): self.trainResult.trainTransformedVecs = fTransformed self.trainResult.trainLabels = labels
def train(self, reader, doBalanceLabels): trainFiles = reader.traindata triggers = reader.classtrainTriggers onsets = reader.classtrainOnsets finOnsets = reader.classtrainFinOnsets [self.trainData, self.labels] = utilities.dataLoadFromEDF(self, trainFiles, triggers, onsets, finOnsets, self.params) if (doBalanceLabels): [self.trainData, self.labels] = utilities.balance_labels(self.trainData, self.labels) numChannels = self.trainData.shape[1] numSamples = self.trainData.shape[0] if self.mode == 'IndepChan': tmpVec = utilities.get1DFeatures(np.squeeze(self.trainData[0, 0, :]), self.params) sumFvecLen = len(tmpVec) print(sumFvecLen, self.trainData.shape) print(self.trainData[0, 0, :]) if self.mode == 'CustomFreqs': sumFvecLen = len(self.params.channelSelect) print(sumFvecLen, '\n', self.params.channelSelect) if self.mode == 'Classic': sumFvecLen = 0 for i in range(numChannels): if not (self.params.fDiaps is None): self.params.lowFreq = self.params.fDiaps[i][0] self.params.highFreq = self.params.fDiaps[i][1] tmpVec = utilities.get1DFeatures(np.squeeze(self.trainData[0, i, :]), self.params) sumFvecLen = sumFvecLen + len(tmpVec) #print(sumFvecLen, '\n', self.trainData[0, i, :], '\n', tmpVec) fVecs = np.zeros((numSamples, numChannels, sumFvecLen)) #Fastovets #indepchan [nchan = 75 x nvecs = 335 x vecLen = 49] 139 #classic [nchan = 75 x nvecs = 335 x vecLen = 49] 2293 #custom [nchan = 75 x nvecs = 335 x vecLen = 29] #tmpVec.shape = 16 for k in range(numSamples): #cust classic = 1 curFvec = np.zeros((0)) # print("Fvec: " + str(k) + " of " + str(numSamples)) for i in range(numChannels): if not (self.params.fDiaps is None): self.params.lowFreq = self.params.fDiaps[i][0] self.params.highFreq = self.params.fDiaps[i][1] tmpVec = utilities.get1DFeatures(np.squeeze(self.trainData[k, i, :]), self.params) if self.mode == 'IndepChan': fVecs[k, i, :] = tmpVec #"""КАК НАСЧЕТ ТОГО, ЧТОБЫ СОБЛЮДАТЬ РАЗМЕРНОСТЬ FVECS?""" [k, i, :] if self.mode == 'CustomFreqs': specVal = np.mean(tmpVec) # !!!!!!! was not meaned while sumfveclen was counted! fVecs[k, i, 0] = specVal #[k, i] if self.mode == 'Classic': curFvec = np.concatenate((curFvec, tmpVec), axis=0) if self.mode == 'Classic': fVecs[k, i, :] = curFvec #[k, :] # Shuffle! inds = np.random.permutation(fVecs.shape[0]) fVecs = fVecs[inds, :, :] labels = self.labels[inds] #print(self.trainResult.mean, self.trainResult.std) # Norm! if self.mode == 'Classic' or self.mode == 'CustomFreqs': self.trainResult.mean = np.mean(fVecs, 0) self.trainResult.std = np.std(fVecs, 0) for i in range(fVecs.shape[0]): fVecs[i, :, :] = (fVecs[i, :, :] - self.trainResult.mean) / self.trainResult.std fTransformed = fVecs #PCA if self.params.usePCA: #cycle! as in INDEPCHAN for i in range(fVecs.shape[1]): tmp = np.zeros((fVecs.shape[0], fVecs.shape(2))) tmp = fVecs(:, i, :) pcaTransform = PCA(self.params.numPC) pcaTransform.fit(tmp) self.trainResult.pcaOp = pcaTransform fTransformed = pcaTransform.transform(tmp)