def defineMethods(self): print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] # Declare methods self.sp = DataAnalysis(self.fs) self.sp.initConst(self.avrM, self.conN, self.csp_time) self.sp.set_lowPass_filter_ds(self.avrM/(self.csp_time[1]-self.csp_time[0]))
def defineMethods(self): # Declare methods self.sp = DataAnalysis(self.fs) self.sp.initConst(self.avrM, self.conN, self.csp_time)
class P300_analysis(object): def __init__(self, sampling, cfg={}, fields=8): self.fs = sampling self.fields = fields self.defineConst(cfg) self.defineMethods() def defineConst(self, cfg): # moving avr & resample factor self.avrM = int(cfg['avrM']) # Concatenate number of CSP vectors self.conN = int(cfg['conN']) # Define analysis time self.csp_time = np.array(cfg['csp_time']) self.tInit, self.tFin = self.csp_time self.iInit, self.iFin = np.floor(self.csp_time*self.fs) self.chL = len(cfg['use_channels'].split(';')) self.arrL = self.avrM self.nLast = int(cfg['nLast']) self.nMin = int(cfg['nMin']) self.nMax = int(cfg['nMax']) self.dec = -1 # Arrays self.flashCount = np.zeros(self.fields) # Array4 flash counting self.dArr = np.zeros(self.fields) # Array4 d val self.dArrTotal = {} for i in range(self.fields): self.dArrTotal[i] = np.array([]) self.sAnalArr = {} for i in range(self.fields): self.sAnalArr[i] = np.zeros( self.arrL*self.conN) # For statistical analysis self.per = np.zeros(self.fields) self.pPer = float(cfg['pPercent']) self.pdf = np.array(cfg['pdf']) def newEpoch(self): self.flashCount = np.zeros(self.fields) # Array4 flash counting self.dArr = np.zeros(self.fields) # Array4 d val for i in range(self.fields): self.dArrTotal[i] = np.array([]) def defineMethods(self): # Declare methods self.sp = DataAnalysis(self.fs) self.sp.initConst(self.avrM, self.conN, self.csp_time) #~ self.sp.set_lowPass_filter_ds(self.avrM/(self.csp_time[1]-self.csp_time[0])) def prepareSignal(self, signal): return self.sp.prepareSignal(signal) def testData(self, signal, blink): # Analyze signal when data for that flash is neede s = np.empty( self.avrM*self.conN) for con in range(self.conN): s[con*self.avrM:(con+1)*self.avrM] = self.prepareSignal(np.dot(self.P[:,con], signal)) # Data projection on Fisher's space self.d = np.dot(s,self.w) - self.c self.dArr[blink] += self.d self.dArrTotal[blink] = np.append(self.d, self.dArrTotal[blink]) self.dArrTotal[blink] = self.dArrTotal[blink][:self.nMax] self.flashCount[blink] += 1 #~ print "self.d: ", self.d def isItEnought(self): print "self.flashCount: ", self.flashCount if (self.flashCount <= self.nMin).any(): return -1 elif (self.flashCount >= self.nMax).all(): self.forceDecision() else: return self.testSignificances() def testSignificances(self): """ Test significances of d values. If one differs MUCH number of it is returned as a P300 target. Temporarly, as a test, normal distribution boundries for pVal percentyl are calulated. If only one d is larger than that pVal then that's the target. """ print "++ testSignificances ++ " dMean = np.zeros(self.fields) for i in range(self.fields): dMean[i] = self.dArrTotal[i][:self.nLast].mean() self.per = [st.percentileofscore(self.pdf, x) for x in dMean] self.per = np.array(self.per) #~ print "dMean: ", dMean print "percentile: ", self.per # If only one value is significantly distant if np.sum(self.per > self.pPer) == 1: self.dec = np.arange(self.fields)[self.per==self.per.max()] self.dec = np.int(self.dec[0]) print "wybrano -- {0}".format(self.dec) return self.dec else: return -1 def forceDecision(self): print " ++ forceDecision ++ " self.testSignificances() # If only one value is significantly distant # Decision is the field with largest w value self.dec = np.arange(self.per.shape[0])[self.per==np.max(self.per)] self.dec = np.int(self.dec[0]) # Return int value return self.dec def setPWC(self, P, w, c): self.P = P self.w = w self.c = c def setPdf(self, pdf): self.pdf = pdf def getDecision(self): return self.dec def getArrTotalD(self): return self.dArrTotal def getRecentD(self): return self.d def getArrD(self): return self.dArr def getProbabiltyDensity(self): """ Returns percentiles of given scores from nontarget propabilty density function. """ dMean = np.zeros(self.fields) nMin = self.flashCount.min() #~ print "nMin: ", nMin for i in range(self.fields): dMean[i] = self.dArrTotal[i][:nMin].mean() # Assuming that dValues are from T distribution p = map( lambda score: st.percentileofscore(self.pdf, score), dMean) return p
class P300_train: def __init__(self, channels, Fs, avrM, conN, csp_time=[0.2, 0.6], pPer=90): self.fs = Fs self.csp_time = np.array(csp_time) self.channels = channels self.defineConst(avrM, conN, pPer) self.defineMethods() def defineConst(self, avrM, conN, pPer): print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] # Define Const print "self.csp_time: ", self.csp_time self.tInit, self.tFin = self.csp_time self.iInit, self.iFin = np.floor( self.csp_time*self.fs) self.avrM = avrM # Moving avr window length self.conN = conN # No. of chan. to concatenate self.pPer = pPer # Nontarget percentile threshold # Target/Non-target arrays self.chL = len((self.channels).split(';')) self.arrL = self.avrM # Length of data per channel # Data arrays totTarget = np.zeros( (self.chL, self.arrL)) totNontarget = np.zeros((self.chL, self.arrL)) # Flags self.classifiedFDA = -1 # 1 if classified with FDA def defineMethods(self): print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] # Declare methods self.sp = DataAnalysis(self.fs) self.sp.initConst(self.avrM, self.conN, self.csp_time) self.sp.set_lowPass_filter_ds(self.avrM/(self.csp_time[1]-self.csp_time[0])) #~ self.sp.set_highPass_filter(wp=2., ws=1., gpass=1., gstop=25.) def getTargetNontarget(self, signal, targetTags, nontargetTags): """ Divides signal into 2 dicts: target & nontarget. """ print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] # Reshapes data into trials x channels x data # Converts targets idxTarget = int(targetTags[0]) target = signal[np.newaxis, :, idxTarget:idxTarget+self.fs] for tag in targetTags[1:]: index = int(tag) s = signal[np.newaxis, :, index:index+self.fs] target = np.concatenate( (target, s), axis=0) # Converts nontargets idxNontarget = int(nontargetTags[0]) nontarget = signal[np.newaxis:, index:index+self.fs] for tag in ntrgTags[1:]: index = int(tag) s = signal[np.newaxis, :, index:index+self.fs] nontarget = np.concatenate( (nontarget, s), axis=0) return target, nontarget def prepareSignal(self, s): return self.sp.prepareSignal(s) def get_filter(self, c_max, c_min): print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] """This returns CSP filters Function returns array. Each column is a filter sorted in descending order i.e. first column represents filter that explains most energy, second - second most, etc. Parameters: ----------- c_max : ndarray covariance matrix of signal to maximalize. c_min : ndarray covariance matrix of signal to minimalize. Returns: -------- P : ndarray each column of this matrix is a CSP filter sorted in descending order vals : array-like corresponding eigenvalues """ vals, vects = eig(c_max, c_min) vals = vals.real vals_idx = np.argsort(vals)[::-1] P = np.zeros([len(vals), len(vals)]) for i in xrange(len(vals)): #~ P[:,i] = vects[:,vals_idx[i]] / np.sqrt(vals[vals_idx[i]]) P[:,i] = vects[:,vals_idx[i]] return P, vals[vals_idx] def train_csp(self, target, nontarget): print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] """Creates covariance matrices to be filtred with CSP. Parameters: ----------- target : ndarray target signal matrix in shape TRIAL x CHANNELS x DATA. nontarget : ndarray not target signal matrix in shape TRIAL x CHANNELS x DATA. Returns: -------- P : ndarray each column of this matrix is a CSP filter sorted in descending order vals : array-like corresponding eigenvalues """ # Calcluate covariance matrices covTarget = np.zeros((self.chL,self.chL)) covNontarget = np.zeros((self.chL,self.chL)) # Lengths trgTrialsNo = target.shape[0] ntrgTrialsNo = nontarget.shape[0] # Target for idx in range(trgTrialsNo): A = np.matrix(target[idx]) covTarget += np.cov(A) # NonTarget for idx in range(ntrgTrialsNo): A = np.matrix(nontarget[idx]) covNontarget += np.cov(A) #~ totTarget = np.matrix(np.mean(target, axis=0)) #~ totNtarget = np.matrix(np.mean(nontarget, axis=0)) covTarget /= trgTrialsNo covNontarget /= ntrgTrialsNo return self.get_filter( covTarget, covNontarget+covTarget) def crossCheck(self, signal, target, nontarget): print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] """ Cross validation over target and nontarget. """ return self.valid_kGroups(signal, target, nontarget, 2) def valid_kGroups(self, signal, target, nontarget, K): print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] """ Valid classifier. Divide signal into K groups then train classifier on K-1 and test it on single groups. """ # Determin CSP filter matrix self.P, vals = self.train_csp(target, nontarget) # Divide dicts into K groups target_kGroups = self.divideData(target, K) nontarget_kGroups = self.divideData(nontarget, K) # Determin shape of new matrices tShape = target_kGroups.shape train_tShape = ((tShape[0]-1)*tShape[1], tShape[2], tShape[3]) ntShape = nontarget_kGroups.shape train_ntShape = ((ntShape[0]-1)*ntShape[1], ntShape[2], ntShape[3]) # Buffers for dValues to determine their distributions dWholeTarget = np.zeros(tShape[0]*tShape[1]) dWholeNontarget = np.zeros(ntShape[0]*ntShape[1]) # For each group for i in range(K): # One group is test group target_test = target_kGroups[i] nontarget_test = nontarget_kGroups[i] # Rest of groups are training groups target_train = np.delete( target_kGroups, i, axis=0) target_train = target_train.reshape( train_tShape) nontarget_train = np.delete( nontarget_kGroups, i,axis=0) nontarget_train = nontarget_train.reshape( train_ntShape ) # Determin LDA classifier values w, c = self.trainFDA(target_train, nontarget_train) # Test data dTarget, dNontarget = self.analyseData(target_test, nontarget_test, w, c) # Saves dValues dWholeTarget[i*len(dTarget):(i+1)*len(dTarget)] = dTarget dWholeNontarget[i*len(dNontarget):(i+1)*len(dNontarget)] = dNontarget self.saveDisributions( dWholeTarget, dWholeNontarget) meanDiff = self.compareDistributions(dWholeTarget, dWholeNontarget) return meanDiff def compareDistributions(self, target, nontarget): #~ # Calculates mean distance od dValues #~ meanDiff = np.mean(dWholeTarget) - np.mean(dWholeNontarget) #~ percentileList = [st.percentileofscore(nontarget, t) for t in target] #~ percentileSum = sum(percentileList)/len(target) #~ result = percentileSum result = st.mannwhitneyu(nontarget, target)[0] return result def analyseData(self, target, nontarget, w, c): """ Calculates dValues for whole given targets and nontargets. Perform by K-validation tests. """ print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] # Test targets sTargetAnal = np.zeros((target.shape[0],w.shape[0])) sNontargetAnal = np.zeros((nontarget.shape[0],w.shape[0])) # step = w.shape[0]/self.conN # Depending how many eigenvectors one want's to consider # from CSP, for idx in range(self.conN): # Dot product over all channels (CSP filter) and returns # array of (trials, data) shape. productTrg = np.dot( self.P[:,idx], target) productNtrg = np.dot( self.P[:,idx], nontarget) # Each data signal is analysed: filtred, downsized... analProductTrg = map( lambda sig: self.prepareSignal(sig), productTrg) analProductNtrg = map( lambda sig: self.prepareSignal(sig), productNtrg) # Fills prepared buffer. sTargetAnal[:,idx*step:(idx+1)*step] = analProductTrg sNontargetAnal[:,idx*step:(idx+1)*step] = analProductNtrg # Calc dValues as: d = s*w - c dTarget = np.dot(sTargetAnal, w) - c dNontarget = np.dot(sNontargetAnal, w) - c return dTarget, dNontarget def divideData(self, D, K): """ Separates given data dictionary into K subdictionaries. Used to perform K-validation tests. Takes: D -- numpy 3D matrix: EPOCH x CHANNEL x DATA K -- number of groups to divide to. Returns: kGroups -- numpy 4D matrix: GROUP_NO x EPOCH x CHANNEL x DATA """ print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] L = D.shape[0] keys = np.arange(L) # Fill keys list with random trails utill it's diveable into K groups while( len(keys) % K != 0 ): keys = np.append( keys, np.random.randint(L)) # Shuffle list np.random.shuffle(keys) # Divide given matrix into K_groups step = len(keys)/K kGroups = np.empty( (K,step,D.shape[1], D.shape[2])) for idx in range(K): kGroups[idx] = D[keys[idx*step:(idx+1)*step]] return kGroups def trainClassifier(self, signal, trgTags, ntrgTags): print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] target, nontarget = self.getTargetNontarget(signal, trgTags, ntrgTags) self.P, vals = self.train_csp(target, nontarget) self.trainFDA(target, nontarget) def trainFDA(self, target, nontarget): print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] #~ target = np.matrix(target) #~ nontarget = np.matrix(nontarget) goodAnal = np.empty((target.shape[0],self.conN*self.avrM)) badAnal = np.empty((nontarget.shape[0],self.conN*self.avrM)) for con in range(self.conN): productCSPTrg = np.dot( self.P[:,con], target) productCSPNtrg = np.dot( self.P[:,con], nontarget) # Each data signal is analysed: filtred, downsized... analProductCSPTrg = np.array(map( lambda sig: self.prepareSignal(sig), productCSPTrg)) analProductCSPNtrg = np.array(map( lambda sig: self.prepareSignal(sig), productCSPNtrg)) goodAnal[:,con*self.avrM:(con+1)*self.avrM] = analProductCSPTrg badAnal[:,con*self.avrM:(con+1)*self.avrM] = analProductCSPNtrg ### TARGET gAnalMean = goodAnal.mean(axis=0) gAnalCov = np.cov(goodAnal, rowvar=0) bAnalMean = badAnal.mean(axis=0) bAnalCov = np.cov(badAnal, rowvar=0) # Mean diff meanAnalDiff = gAnalMean - bAnalMean #~ meanAnalMean = 0.5*(gAnalMean + bAnalMean) A = gAnalCov + bAnalCov invertCovariance = np.linalg.inv( A ) # w - normal vector to separeting hyperplane # c - treshold for data projection self.w = np.dot( invertCovariance, meanAnalDiff) self.c = st.scoreatpercentile(np.dot(self.w, badAnal.T), self.pPer) #~ self.c = np.dot(self.w, meanAnalMean) #~ print "We've done a research and it turned out that best values for you are: " #~ print "w: ", self.w #~ print "c: ", self.c #~ print "w.shape: ", self.w.shape np.save('w',self.w) np.save('c',self.c) return self.w, self.c def isClassifiedFDA(self): """ Returns True, if data has already been classified. """ print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] return self.classifiedFDA def getPWC(self): """ If classifier was taught, returns CSP Matrix as [P], classifier seperation vector [w] and classifier separation value [c]. Else returns -1. """ print "*"*5 + inspect.getframeinfo(inspect.currentframe())[2] if self.classifiedFDA: return (self.P, self.w, self.c) else: return -1 def saveDisributions(self, dTarget, dNontarget): self.dTarget = dTarget self.dNontarget = dNontarget def getDValDistribution(self): return self.dTarget, self.dNontarget def saveDist2File(self, targetName, nontargetName): np.save(targetName, self.dTarget) np.save(nontargetName, self.dNontarget)
def defineMethods(self): # Declare methods self.sp = DataAnalysis(self.fs) self.sp.initConst(avrM=self.fs) self.sp_ds = DataAnalysis(self.fs)
class P300_draw(object): def __init__(self, fs=128.): self.fs = fs self.globalNCount = 0 self.defineMethods() def defineMethods(self): # Declare methods self.sp = DataAnalysis(self.fs) self.sp.initConst(avrM=self.fs) self.sp_ds = DataAnalysis(self.fs) def setCalibration(self, target, nontarget): # Sorting tags... (for no real reason) # Setting arrays/dicts self.target = target self.nontarget = nontarget self.trgTrials = target.shape[0] self.ntrgTrials = nontarget.shape[0] def setTimeLine(self, conN, avrM=None, csp_time=[0, 1]): self.conN = conN self.csp_time = csp_time if avrM == None: self.avrM = self.fs else: self.avrM = avrM def setCSP(self, P): self.P = P def plotSignal(self, savefile=None): py.clf() py.cla() # Determine size tmp = self.target[0] L = self.sp.prepareSignal(tmp[0]).shape[0] # P matrix if self.P == None: self.P = np.ones(tmp.shape) P = self.P # Set time limits self.simple_time = np.linspace(0, 1, L) # Create buffers allCSPTrg = np.empty((self.conN, self.target.shape[0], L)) allCSPNtrg = np.empty((self.conN, self.nontarget.shape[0], L)) allMeanCSPTrg = np.empty((self.conN, L)) allMeanCSPNtrg = np.empty((self.conN, L)) # Plot simple means productMeanTrg = self.target.mean(axis=1) productMeanNtrg = self.nontarget.mean(axis=1) analProductMeanTrg = np.array( map(lambda sig: self.sp.prepareSignal(sig), productMeanTrg)) analProductMeanNtrg = np.array( map(lambda sig: self.sp.prepareSignal(sig), productMeanNtrg)) py.subplot(2, 1 + self.conN, 1) # 1st row py.title("Mean target") timeArr = np.array(self.simple_time.tolist() * analProductMeanTrg.shape[0]).reshape((-1, L)) py.plot(timeArr, analProductMeanTrg, 'r.') py.plot(self.simple_time, analProductMeanTrg.mean(axis=0), 'g-') py.subplot(2, 1 + self.conN, 2 + self.conN) # 2nd row py.title("Mean nontarget") timeArr = np.array(self.simple_time.tolist() * analProductMeanNtrg.shape[0]).reshape((-1, L)) py.plot(timeArr, analProductMeanNtrg, 'r.') py.plot(self.simple_time, analProductMeanTrg.mean(axis=0), 'g-') ####################### ## Plotting target ## # Depending how many eigenvectors one want's to consider # from CSP, for con in range(self.conN): # Dot product over all channels (CSP filter) and returns # array of (trials, data) shape. productCSPTrg = np.dot(self.P[:, con], self.target) productCSPNtrg = np.dot(self.P[:, con], self.nontarget) # Each data signal is analysed: filtred, downsized... analProductCSPTrg = np.array( map(lambda sig: self.sp.prepareSignal(sig), productCSPTrg)) analProductCSPNtrg = np.array( map(lambda sig: self.sp.prepareSignal(sig), productCSPNtrg)) allCSPTrg[con] = analProductCSPTrg allCSPNtrg[con] = analProductCSPNtrg allMeanCSPTrg[con] = analProductCSPTrg.mean(axis=0) allMeanCSPNtrg[con] = analProductCSPNtrg.mean(axis=0) py.subplot(2, 1 + self.conN, 2 + con) py.title("CSP mean target vec" + str(con + 1)) timeArr = np.array(self.simple_time.tolist() * analProductCSPTrg.shape[0]).reshape((-1, L)) py.plot(timeArr, analProductCSPTrg, 'r-') py.plot(self.simple_time, allMeanCSPTrg[con], 'g-') py.subplot(2, 1 + self.conN, self.conN + 1 + 2 + con) py.title("CSP mean nontarget vec" + str(con + 1)) timeArr = np.array(self.simple_time.tolist() * analProductCSPNtrg.shape[0]).reshape((-1, L)) py.plot(timeArr, analProductCSPNtrg, 'r-') py.plot(self.simple_time, allMeanCSPNtrg[con], 'g-') if savefile: py.savefig(savefile, dpi=150) ## Plotting Diff py.figure() meanTarget = analProductMeanTrg.mean(axis=0) meanNontarget = analProductMeanNtrg.mean(axis=0) py.subplot(1, 1 + self.conN, 1) py.title("Diff simple mean") py.plot(self.simple_time, meanTarget - meanNontarget, 'r-') print "self.simple_time.shape: ", self.simple_time.shape print "allMeanCSPTrg[0].shape: ", allMeanCSPTrg[0].shape print "allMeanCSPNtrg[0].shape: ", allMeanCSPNtrg[0].shape for con in range(self.conN): #~ for con in range(1): py.subplot(1, 1 + self.conN, 2 + con) py.title("Diff CSP mean vec " + str(con + 1)) py.plot(self.simple_time, allMeanCSPTrg[con] - allMeanCSPNtrg[con], 'r-') if savefile: py.savefig(savefile + '_diff', dpi=150) else: py.show() py.clf() py.cla() def plotSignal_ds(self, savefile=None): py.clf() py.cla() L = self.avrM if self.P == None: self.P = np.ones(tmp.shape) P = self.P # Create buffers allCSPTrg = np.empty((self.conN, self.target.shape[0], self.avrM)) allCSPNtrg = np.empty((self.conN, self.nontarget.shape[0], self.avrM)) allMeanCSPTrg = np.empty((self.conN, self.avrM)) allMeanCSPNtrg = np.empty((self.conN, self.avrM)) # Time tMin, tMax = self.csp_time[0], self.csp_time[1] self.sp_ds.initConst(self.avrM, self.conN, self.csp_time) self.simple_time_ds = np.linspace(tMin, tMax, self.avrM) # X axis limits dx = self.csp_time[1] - self.csp_time[0] xMin = self.csp_time[0] - 0.25 * dx xMax = self.csp_time[1] + 0.25 * dx # Plot simple means productMeanTrg = self.target.mean(axis=1) productMeanNtrg = self.nontarget.mean(axis=1) analProductMeanTrg = np.array( map(lambda sig: self.sp_ds.prepareSignal(sig), productMeanTrg)) analProductMeanNtrg = np.array( map(lambda sig: self.sp_ds.prepareSignal(sig), productMeanNtrg)) py.subplot(2, 1 + self.conN, 1) # 1st row py.title("Mean target") timeArr = np.array(self.simple_time_ds.tolist() * analProductMeanTrg.shape[0]).reshape( (-1, self.avrM)) py.plot(timeArr, analProductMeanTrg, 'r.') py.plot(self.simple_time_ds, analProductMeanTrg.mean(axis=0), 'go') py.xlim(xMin, xMax) py.subplot(2, 1 + self.conN, 2 + self.conN) # 2nd row py.title("Mean nontarget") timeArr = np.array(self.simple_time_ds.tolist() * analProductMeanNtrg.shape[0]).reshape( (-1, self.avrM)) py.plot(timeArr, analProductMeanNtrg, 'r.') py.plot(self.simple_time_ds, analProductMeanTrg.mean(axis=0), 'g') py.xlim(xMin, xMax) ####################### ## Plotting target ## # Depending how many eigenvectors one want's to consider # from CSP, for con in range(self.conN): # Dot product over all channels (CSP filter) and returns # array of (trials, data) shape. productCSPTrg = np.dot(self.P[:, con], self.target) productCSPNtrg = np.dot(self.P[:, con], self.nontarget) # Each data signal is analysed: filtred, downsized... analProductCSPTrg = np.array( map(lambda sig: self.sp_ds.prepareSignal(sig), productCSPTrg)) analProductCSPNtrg = np.array( map(lambda sig: self.sp_ds.prepareSignal(sig), productCSPNtrg)) allCSPTrg[con] = analProductCSPTrg allCSPNtrg[con] = analProductCSPNtrg allMeanCSPTrg[con] = analProductCSPTrg.mean(axis=0) allMeanCSPNtrg[con] = analProductCSPNtrg.mean(axis=0) py.subplot(2, 1 + self.conN, 2 + con) py.title("CSP mean target vec" + str(con + 1)) timeArr = np.array(self.simple_time_ds.tolist() * analProductCSPTrg.shape[0]).reshape( (-1, self.avrM)) py.plot(timeArr, analProductCSPTrg, 'r.') py.plot(self.simple_time_ds, allMeanCSPTrg[con], 'g-') py.xlim(xMin, xMax) py.subplot(2, 1 + self.conN, self.conN + 1 + 2 + con) py.title("CSP mean nontarget vec" + str(con + 1)) timeArr = np.array(self.simple_time_ds.tolist() * analProductCSPNtrg.shape[0]).reshape( (-1, self.avrM)) py.plot(timeArr, analProductCSPNtrg, 'r.') py.plot(self.simple_time_ds, allMeanCSPNtrg[con], 'g-') py.xlim(xMin, xMax) if savefile: py.savefig(savefile, dpi=150) ## Plotting Diff py.figure() meanTarget = analProductMeanTrg.mean(axis=0) meanNontarget = analProductMeanNtrg.mean(axis=0) py.subplot(1, 1 + self.conN, 1) py.title("Diff simple mean") py.plot(self.simple_time_ds, meanTarget - meanNontarget, 'r-') py.xlim(xMin, xMax) for con in range(self.conN): #~ for con in range(1): py.subplot(1, 1 + self.conN, 2 + con) py.title("Diff CSP mean vec " + str(con + 1)) py.plot(self.simple_time_ds, allMeanCSPTrg[con] - allMeanCSPNtrg[con], 'r-') py.xlim(xMin, xMax) if savefile: py.savefig(savefile + '_diff', dpi=150) else: py.show() py.clf() py.cla() def savePlotsD(self, dArrTotal, pVal, savefile=None): # Quick type check assert (type(pVal) == type(0.1)) assert (type(dArrTotal) == type({})) # Determin what is the least number of blinks nBlink = 100 for i in range(8): if nBlink > len(dArrTotal[i]): nBlink = len(dArrTotal[i]) print "nBlink: ", nBlink # dMean is an array which has 8 columns and in rows mean value # after n blinks dMean = np.zeros((nBlink, 8)) for n in range(nBlink)[::-1]: for i in range(8): dMean[n][i] = dArrTotal[i][nBlink - n - 1:].mean() print "dMean: ", dMean # "z" is treshold for difference in d and mean # "z" is calculated from pVal which is percentile z = st.norm.ppf(pVal) print "z: ", z for n in range(nBlink): py.subplot(2, (nBlink + 1) / 2, n + 1) # This blok calculates: # - mean for all except one # - std for all except on # - treshold boundries for pVal significance m = [] yMaxArr = [] for i in range(8): tmpArr = np.delete(dMean[n], i) mVal = np.mean(tmpArr) stdVal = np.std(tmpArr) m.append(mVal) yMaxArr.append(mVal + stdVal * z) # we want to have label on just one block (the left one) if n == 0: py.ylabel('d') # plot mean for each field and for all without that one py.plot(dMean[n], 'ro') py.plot(m, 'bo') yMin, yMax = dMean[n].min(), dMean.max() ySpan = yMax - yMin yMin, yMax = yMin - ySpan * .1, yMax + ySpan * 0.1 ySpan = yMax - yMin py.title(n) py.ylim((yMin, yMax)) py.xlim((-1, 8)) for idx in range(8): py.axvline(x=idx, ymin=(m[idx] - yMin) / ySpan, ymax=(yMaxArr[idx] - yMin) / ySpan) # save all blinks into one plot, then on hdd self.globalNCount += 1 if savefile == None: savefile = "online_{0}.png".format(self.globalNCount) py.savefig(savefile, dpi=150) py.cla() py.clf() print "Zapisano obraz '{0}' w katalogu: {1}".format( savefile, os.getcwd()) def plotDistribution(self, dTarget, dNontarget, savefile=None): """ Takes 1D numpy arrays of d values for target and nontarget. Plots and saves their histograms to file. """ bins = 20 py.subplot(2, 2, 1) py.title("Target distribution") py.hist(dTarget, bins) py.axvline(dTarget.mean(), color='r') py.xlabel("d values") py.ylabel("Quantity") py.subplot(2, 2, 2) py.title("Nontarget distribution") py.hist(dNontarget, bins) py.axvline(dNontarget.mean(), color='r') py.xlabel("d values") py.ylabel("Quantity") py.subplot(2, 1, 2) py.title("Target/Nontarget distribution") py.hist(np.append(dTarget, dNontarget), bins) py.axvline(dTarget.mean(), color='r') py.axvline(dNontarget.mean(), color='r') py.xlabel("d values") py.ylabel("Quantity") if savefile == None: savefile = "distrib_{0}.png".format(self.globalNCount) self.globalNCount += 1 py.savefig(savefile, dpi=150) py.show()
class P300_analysis(object): def __init__(self, sampling, cfg={}, fields=8): self.fs = sampling self.fields = fields self.defineConst(cfg) self.defineMethods() def defineConst(self, cfg): # moving avr & resample factor self.avrM = int(cfg['avrM']) # VAR ! # Concatenate No. of signals self.conN = int(cfg['conN']) # VAR ! # Define analysis time self.csp_time = np.array(cfg['csp_time']) self.tInit, self.tFin = self.csp_time self.iInit, self.iFin = np.floor(self.csp_time*self.fs) self.chL = len(cfg['use_channels'].split(';')) #~ self.arrL = np.floor((self.iFin-self.iInit)/self.avrM) self.arrL = self.avrM self.nRepeat = int(cfg['nRepeat']) self.nMin = 3 self.nMax = 6 self.dec = -1 # Arrays self.flashCount = np.zeros(self.fields) # Array4 flash counting self.dArr = np.zeros(self.fields) # Array4 d val self.dArrTotal = {} for i in range(self.fields): self.dArrTotal[i] = np.array([]) self.sAnalArr = {} for i in range(self.fields): self.sAnalArr[i] = np.zeros( self.arrL*self.conN) # For statistical analysis p = float(cfg['pVal']) self.pVal = p self.z = st.norm.ppf(p) # w - values of diff between dVal and significal d (v) self.diffV = np.zeros(self.fields) def defineMethods(self): # Declare methods self.sp = DataAnalysis(self.fs) self.sp.initConst(self.avrM, self.conN, self.csp_time) #~ self.sp.set_lowPass_filter_ds(self.avrM/(self.csp_time[1]-self.csp_time[0])) def prepareSignal(self, signal): return self.sp.prepareSignal(signal) def testData(self, signal, blink): # Analyze signal when data for that flash is neede s = np.array([]) for con in range(self.conN): tmp = self.prepareSignal(np.dot(self.P[:,con], signal)) s = np.append( s, tmp) # Data projection on Fisher's space self.d = np.dot(s,self.w) - self.c self.dArr[blink] += self.d self.dArrTotal[blink] = np.append(self.d, self.dArrTotal[blink]) self.dArrTotal[blink] = self.dArrTotal[blink][:self.nMax] self.flashCount[blink] += 1 #~ print "self.d: ", self.d def isItEnought(self): if (self.flashCount < self.nMin).any(): return -1 return self.testSignificances() def testSignificances(self): """ Test significances of d values. If one differs MUCH number of it is returned as a P300 target. Temporarly, as a test, normal distribution boundries for pVal percentyl are calulated. If only one d is larger than that pVal then that's the target. """ dMean = np.zeros(self.fields) nMin = self.flashCount.min() #~ print "nMin: ", nMin for i in range(self.fields): dMean[i] = self.dArrTotal[i][:nMin].mean() #~ dMean = self.dArr / self.flashCount self.diffV = np.zeros(self.diffV.shape) for sq in range(self.fields): # Norm distribution tmp = np.delete(dMean, sq) mean, std = tmp.mean(), tmp.std() # Find right boundry #~ v = mean + self.z*std # Calculate distance #~ self.diffV[sq] = dMean[sq]-v # Assuming that this is t distribution self.diffV[sq] = st.t.cdf(dMean[sq], self.fields, loc=mean, scale=std) #~ print "{0}: (m, std, v) = ({1}, {2}, {3})".format(sq, mean, std, v) #~ print "{0}: (d, w) = ({1}, {2})".format(sq, dMean[sq], self.diffV[sq]) #~ print "self.diffV: ", self.diffV # If only one value is significantly distant if np.sum(self.diffV>self.pVal) == 1: #~ return True self.dec = np.arange(self.diffV.shape[0])[self.diffV>0] self.dec = np.int(self.dec[0]) #~ print "wybrano -- {0}".format(self.dec) return self.dec else: return -1 def forceDecision(self): self.testSignificances() # If only one value is significantly distant # Decision is the field with largest w value self.dec = np.arange(self.diffV.shape[0])[self.diffV==np.max(self.diffV)] self.dec = np.int(self.dec[0]) # Return int value return self.dec def setPWC(self, P, w, c): self.P = P self.w = w self.c = c def getDecision(self): return self.dec def newEpoch(self): self.flashCount = self.flashCount*0 # Array4 flash counting self.dArr = self.dArr*0 # Array4 d val for i in range(self.fields): self.dArrTotal[i] = np.array([]) def getArrTotalD(self): return self.dArrTotal def getRecentD(self): return self.d def getArrD(self): return self.dArr def getProbabiltyDensity(self): dMean = np.zeros(self.fields) nMin = self.flashCount.min() #~ print "nMin: ", nMin for i in range(self.fields): dMean[i] = self.dArrTotal[i][:nMin].mean() # Assuming that dValues are from T distribution p = st.t.cdf(dMean, self.fields, loc=dMean.mean(), scale=dMean.std()) return p
class P300_draw(object): def __init__(self, fs=128.): self.fs = fs self.globalNCount = 0 self.defineMethods() def defineMethods(self): # Declare methods self.sp = DataAnalysis(self.fs) self.sp.initConst(avrM=self.fs) self.sp_ds = DataAnalysis(self.fs) def setCalibration(self, target, nontarget): # Sorting tags... (for no real reason) # Setting arrays/dicts self.target = target self.nontarget = nontarget self.trgTrials = target.shape[0] self.ntrgTrials = nontarget.shape[0] def setTimeLine(self, conN, avrM=None, csp_time=[0,1]): self.conN = conN self.csp_time = csp_time if avrM == None: self.avrM = self.fs else: self.avrM = avrM def setCSP(self, P): self.P = P def plotSignal(self, savefile=None): py.clf() py.cla() # Determine size tmp = self.target[0] L = self.sp.prepareSignal(tmp[0]).shape[0] # P matrix if self.P==None: self.P = np.ones(tmp.shape) P = self.P # Set time limits self.simple_time = np.linspace(0, 1, L) # Create buffers allCSPTrg = np.empty( (self.conN, self.target.shape[0], L)) allCSPNtrg = np.empty( (self.conN, self.nontarget.shape[0], L)) allMeanCSPTrg = np.empty( (self.conN, L)) allMeanCSPNtrg = np.empty( (self.conN, L)) # Plot simple means productMeanTrg = self.target.mean(axis=1) productMeanNtrg = self. nontarget.mean(axis=1) analProductMeanTrg = np.array(map( lambda sig: self.sp.prepareSignal(sig), productMeanTrg)) analProductMeanNtrg = np.array(map( lambda sig: self.sp.prepareSignal(sig), productMeanNtrg)) py.subplot(2, 1+self.conN, 1) # 1st row py.title("Mean target") timeArr = np.array(self.simple_time.tolist()*analProductMeanTrg.shape[0]).reshape((-1,L)) py.plot(timeArr, analProductMeanTrg,'r.') py.plot(self.simple_time, analProductMeanTrg.mean(axis=0),'g-') py.subplot(2, 1+self.conN, 2+self.conN) # 2nd row py.title("Mean nontarget") timeArr = np.array(self.simple_time.tolist()*analProductMeanNtrg.shape[0]).reshape((-1,L)) py.plot(timeArr, analProductMeanNtrg,'r.') py.plot(self.simple_time, analProductMeanTrg.mean(axis=0),'g-') ####################### ## Plotting target ## # Depending how many eigenvectors one want's to consider # from CSP, for con in range(self.conN): # Dot product over all channels (CSP filter) and returns # array of (trials, data) shape. productCSPTrg = np.dot( self.P[:,con], self.target) productCSPNtrg = np.dot( self.P[:,con], self.nontarget) # Each data signal is analysed: filtred, downsized... analProductCSPTrg = np.array(map( lambda sig: self.sp.prepareSignal(sig), productCSPTrg)) analProductCSPNtrg = np.array(map( lambda sig: self.sp.prepareSignal(sig), productCSPNtrg)) allCSPTrg[con] = analProductCSPTrg allCSPNtrg[con] = analProductCSPNtrg allMeanCSPTrg[con] = analProductCSPTrg.mean(axis=0) allMeanCSPNtrg[con] = analProductCSPNtrg.mean(axis=0) py.subplot(2,1+self.conN,2+con) py.title("CSP mean target vec" +str(con+1)) timeArr = np.array(self.simple_time.tolist()*analProductCSPTrg.shape[0]).reshape((-1,L)) py.plot(timeArr, analProductCSPTrg, 'r-') py.plot(self.simple_time, allMeanCSPTrg[con], 'g-') py.subplot(2,1+self.conN,self.conN+1+2+con) py.title("CSP mean nontarget vec" +str(con+1)) timeArr = np.array(self.simple_time.tolist()*analProductCSPNtrg.shape[0]).reshape((-1,L)) py.plot(timeArr, analProductCSPNtrg, 'r-') py.plot(self.simple_time, allMeanCSPNtrg[con], 'g-') if savefile: py.savefig(savefile, dpi=150) ## Plotting Diff py.figure() meanTarget = analProductMeanTrg.mean(axis=0) meanNontarget = analProductMeanNtrg.mean(axis=0) py.subplot(1, 1+self.conN,1) py.title("Diff simple mean") py.plot(self.simple_time, meanTarget-meanNontarget, 'r-') print "self.simple_time.shape: ", self.simple_time.shape print "allMeanCSPTrg[0].shape: ", allMeanCSPTrg[0].shape print "allMeanCSPNtrg[0].shape: ", allMeanCSPNtrg[0].shape for con in range(self.conN): #~ for con in range(1): py.subplot(1,1+self.conN,2+con) py.title("Diff CSP mean vec " + str(con+1)) py.plot(self.simple_time, allMeanCSPTrg[con]-allMeanCSPNtrg[con], 'r-') if savefile: py.savefig(savefile + '_diff', dpi=150) else: py.show() py.clf() py.cla() def plotSignal_ds(self, savefile=None): py.clf() py.cla() L = self.avrM if self.P==None: self.P = np.ones(tmp.shape) P = self.P # Create buffers allCSPTrg = np.empty( (self.conN, self.target.shape[0], self.avrM)) allCSPNtrg = np.empty( (self.conN, self.nontarget.shape[0], self.avrM)) allMeanCSPTrg = np.empty( (self.conN, self.avrM)) allMeanCSPNtrg = np.empty( (self.conN, self.avrM)) # Time tMin, tMax = self.csp_time[0], self.csp_time[1] self.sp_ds.initConst(self.avrM, self.conN, self.csp_time) self.simple_time_ds = np.linspace(tMin, tMax, self.avrM) # X axis limits dx = self.csp_time[1] - self.csp_time[0] xMin = self.csp_time[0] - 0.25*dx xMax = self.csp_time[1] + 0.25*dx # Plot simple means productMeanTrg = self.target.mean(axis=1) productMeanNtrg = self. nontarget.mean(axis=1) analProductMeanTrg = np.array(map( lambda sig: self.sp_ds.prepareSignal(sig), productMeanTrg)) analProductMeanNtrg = np.array(map( lambda sig: self.sp_ds.prepareSignal(sig), productMeanNtrg)) py.subplot(2, 1+self.conN, 1) # 1st row py.title("Mean target") timeArr = np.array(self.simple_time_ds.tolist()*analProductMeanTrg.shape[0]).reshape((-1,self.avrM)) py.plot(timeArr, analProductMeanTrg,'r.') py.plot(self.simple_time_ds, analProductMeanTrg.mean(axis=0),'go') py.xlim(xMin, xMax) py.subplot(2, 1+self.conN, 2+self.conN) # 2nd row py.title("Mean nontarget") timeArr = np.array(self.simple_time_ds.tolist()*analProductMeanNtrg.shape[0]).reshape((-1,self.avrM)) py.plot(timeArr, analProductMeanNtrg,'r.') py.plot(self.simple_time_ds, analProductMeanTrg.mean(axis=0),'g') py.xlim(xMin, xMax) ####################### ## Plotting target ## # Depending how many eigenvectors one want's to consider # from CSP, for con in range(self.conN): # Dot product over all channels (CSP filter) and returns # array of (trials, data) shape. productCSPTrg = np.dot( self.P[:,con], self.target) productCSPNtrg = np.dot( self.P[:,con], self.nontarget) # Each data signal is analysed: filtred, downsized... analProductCSPTrg = np.array(map( lambda sig: self.sp_ds.prepareSignal(sig), productCSPTrg)) analProductCSPNtrg = np.array(map( lambda sig: self.sp_ds.prepareSignal(sig), productCSPNtrg)) allCSPTrg[con] = analProductCSPTrg allCSPNtrg[con] = analProductCSPNtrg allMeanCSPTrg[con] = analProductCSPTrg.mean(axis=0) allMeanCSPNtrg[con] = analProductCSPNtrg.mean(axis=0) py.subplot(2,1+self.conN,2+con) py.title("CSP mean target vec" +str(con+1)) timeArr = np.array(self.simple_time_ds.tolist()*analProductCSPTrg.shape[0]).reshape((-1,self.avrM)) py.plot(timeArr, analProductCSPTrg, 'r.') py.plot(self.simple_time_ds, allMeanCSPTrg[con], 'g-') py.xlim(xMin, xMax) py.subplot(2,1+self.conN,self.conN+1+2+con) py.title("CSP mean nontarget vec" +str(con+1)) timeArr = np.array(self.simple_time_ds.tolist()*analProductCSPNtrg.shape[0]).reshape((-1,self.avrM)) py.plot(timeArr, analProductCSPNtrg, 'r.') py.plot(self.simple_time_ds, allMeanCSPNtrg[con], 'g-') py.xlim(xMin, xMax) if savefile: py.savefig(savefile, dpi=150) ## Plotting Diff py.figure() meanTarget = analProductMeanTrg.mean(axis=0) meanNontarget = analProductMeanNtrg.mean(axis=0) py.subplot(1, 1+self.conN,1) py.title("Diff simple mean") py.plot(self.simple_time_ds, meanTarget-meanNontarget, 'r-') py.xlim(xMin, xMax) for con in range(self.conN): #~ for con in range(1): py.subplot(1,1+self.conN,2+con) py.title("Diff CSP mean vec " + str(con+1)) py.plot(self.simple_time_ds, allMeanCSPTrg[con]-allMeanCSPNtrg[con], 'r-') py.xlim(xMin, xMax) if savefile: py.savefig(savefile + '_diff', dpi=150) else: py.show() py.clf() py.cla() def savePlotsD(self, dArrTotal, pVal, savefile=None): # Quick type check assert( type(pVal) == type(0.1) ) assert( type(dArrTotal) == type({}) ) # Determin what is the least number of blinks nBlink = 100 for i in range(8): if nBlink > len(dArrTotal[i]): nBlink = len(dArrTotal[i]) print "nBlink: ", nBlink # dMean is an array which has 8 columns and in rows mean value # after n blinks dMean = np.zeros((nBlink,8)) for n in range(nBlink)[::-1]: for i in range(8): dMean[n][i] = dArrTotal[i][nBlink-n-1:].mean() print "dMean: ", dMean # "z" is treshold for difference in d and mean # "z" is calculated from pVal which is percentile z = st.norm.ppf(pVal) print "z: ", z for n in range(nBlink): py.subplot(2,(nBlink+1)/2, n+1) # This blok calculates: # - mean for all except one # - std for all except on # - treshold boundries for pVal significance m = [] yMaxArr = [] for i in range(8): tmpArr = np.delete(dMean[n], i) mVal = np.mean(tmpArr) stdVal = np.std(tmpArr) m.append(mVal) yMaxArr.append(mVal+stdVal*z) # we want to have label on just one block (the left one) if n == 0: py.ylabel('d') # plot mean for each field and for all without that one py.plot(dMean[n], 'ro') py.plot(m, 'bo') yMin, yMax = dMean[n].min(), dMean.max() ySpan = yMax - yMin yMin, yMax = yMin-ySpan*.1, yMax+ySpan*0.1 ySpan = yMax - yMin py.title(n) py.ylim((yMin, yMax)) py.xlim((-1,8)) for idx in range(8): py.axvline(x=idx, ymin=(m[idx]-yMin)/ySpan, ymax=(yMaxArr[idx]-yMin)/ySpan) # save all blinks into one plot, then on hdd self.globalNCount += 1 if savefile == None: savefile = "online_{0}.png".format(self.globalNCount) py.savefig(savefile, dpi=150) py.cla() py.clf() print "Zapisano obraz '{0}' w katalogu: {1}".format( savefile, os.getcwd()) def plotDistribution(self, dTarget, dNontarget, savefile=None): """ Takes 1D numpy arrays of d values for target and nontarget. Plots and saves their histograms to file. """ bins = 20 py.subplot(2,2,1) py.title("Target distribution") py.hist(dTarget, bins) py.axvline(dTarget.mean(), color='r') py.xlabel("d values") py.ylabel("Quantity") py.subplot(2,2,2) py.title("Nontarget distribution") py.hist(dNontarget, bins) py.axvline(dNontarget.mean(), color='r') py.xlabel("d values") py.ylabel("Quantity") py.subplot(2,1,2) py.title("Target/Nontarget distribution") py.hist(np.append(dTarget,dNontarget), bins) py.axvline(dTarget.mean(), color='r') py.axvline(dNontarget.mean(), color='r') py.xlabel("d values") py.ylabel("Quantity") if savefile == None: savefile = "distrib_{0}.png".format(self.globalNCount) self.globalNCount += 1 py.savefig(savefile, dpi=150) py.show()
class P300_analysis(object): def __init__(self, sampling, cfg={}, fields=8): self.fs = sampling self.fields = fields self.defineConst(cfg) self.defineMethods() def defineConst(self, cfg): # moving avr & resample factor self.avrM = int(cfg['avrM']) # Concatenate number of CSP vectors self.conN = int(cfg['conN']) # Define analysis time self.csp_time = np.array(cfg['csp_time']) self.tInit, self.tFin = self.csp_time self.iInit, self.iFin = np.floor(self.csp_time*self.fs) self.chL = len(cfg['use_channels'].split(';')) self.arrL = self.avrM self.nLast = int(cfg['nLast']) self.nMin = int(cfg['nMin']) self.nMax = int(cfg['nMax']) self.dec = -1 # Arrays self.flashCount = np.zeros(self.fields) # Array4 flash counting self.dArr = np.zeros(self.fields) # Array4 d val self.dArrTotal = {} for i in range(self.fields): self.dArrTotal[i] = np.array([]) self.sAnalArr = {} for i in range(self.fields): self.sAnalArr[i] = np.zeros( self.arrL*self.conN) # For statistical analysis self.per = np.zeros(self.fields) self.pPer = float(cfg['pPercent']) self.pdf = np.array(cfg['pdf']) def newEpoch(self): self.flashCount = np.zeros(self.fields) # Array4 flash counting self.dArr = np.zeros(self.fields) # Array4 d val for i in range(self.fields): self.dArrTotal[i] = np.array([]) def defineMethods(self): # Declare methods self.sp = DataAnalysis(self.fs) self.sp.initConst(self.avrM, self.conN, self.csp_time) #~ self.sp.set_lowPass_filter_ds(self.avrM/(self.csp_time[1]-self.csp_time[0])) def prepareSignal(self, signal): return self.sp.prepareSignal(signal) def testData(self, signal, blink): # Analyze signal when data for that flash is neede s = np.empty( self.avrM*self.conN) for con in range(self.conN): s[con*self.avrM:(con+1)*self.avrM] = self.prepareSignal(np.dot(self.P[:,con], signal)) # Data projection on Fisher's space self.d = np.dot(s,self.w) - self.c self.dArr[blink] += self.d self.dArrTotal[blink] = np.append(self.d, self.dArrTotal[blink]) self.dArrTotal[blink] = self.dArrTotal[blink][:self.nMax] self.flashCount[blink] += 1 #~ print "self.d: ", self.d def isItEnought(self): #print "self.flashCount: ", self.flashCount if (self.flashCount <= self.nMin).any(): return -1 elif (self.flashCount >= self.nMax).all(): self.forceDecision() else: return self.testSignificances() def testSignificances(self): """ Test significances of d values. If one differs MUCH number of it is returned as a P300 target. Temporarly, as a test, normal distribution boundries for pVal percentyl are calulated. If only one d is larger than that pVal then that's the target. """ #print "++ testSignificances ++ " dMean = np.zeros(self.fields) for i in range(self.fields): dMean[i] = self.dArrTotal[i][:self.nLast].mean() self.per = [st.percentileofscore(self.pdf, x) for x in dMean] self.per = np.array(self.per) #~ print "dMean: ", dMean #print "percentile: ", self.per # If only one value is significantly distant if np.sum(self.per > self.pPer) == 1: self.dec = np.arange(self.fields)[self.per==self.per.max()] self.dec = np.int(self.dec[0]) print "wybrano -- {0}".format(self.dec) return self.dec else: return -1 def forceDecision(self): #print " ++ forceDecision ++ " self.testSignificances() # If only one value is significantly distant # Decision is the field with largest w value self.dec = np.arange(self.per.shape[0])[self.per==np.max(self.per)] self.dec = np.int(self.dec[0]) # Return int value return self.dec def setPWC(self, P, w, c): self.P = P self.w = w self.c = c def setPdf(self, pdf): self.pdf = pdf def getDecision(self): return self.dec def getArrTotalD(self): return self.dArrTotal def getRecentD(self): return self.d def getArrD(self): return self.dArr def getProbabiltyDensity(self): """ Returns percentiles of given scores from nontarget propabilty density function. """ dMean = np.zeros(self.fields) nMin = self.flashCount.min() #~ print "nMin: ", nMin for i in range(self.fields): dMean[i] = self.dArrTotal[i][:nMin].mean() # Assuming that dValues are from T distribution p = map( lambda score: st.percentileofscore(self.pdf, score), dMean) return p
class P300_analysis(object): def __init__(self, sampling, cfg={}, fields=8): self.fs = sampling self.fields = fields self.defineConst(cfg) self.defineMethods() def defineConst(self, cfg): # moving avr & resample factor self.avrM = int(cfg['avrM']) # VAR ! # Concatenate No. of signals self.conN = int(cfg['conN']) # VAR ! # Define analysis time self.csp_time = np.array(cfg['csp_time']) self.tInit, self.tFin = self.csp_time self.iInit, self.iFin = np.floor(self.csp_time * self.fs) self.chL = len(cfg['use_channels'].split(';')) #~ self.arrL = np.floor((self.iFin-self.iInit)/self.avrM) self.arrL = self.avrM self.nRepeat = int(cfg['nRepeat']) self.nMin = 3 self.nMax = 6 self.dec = -1 # Arrays self.flashCount = np.zeros(self.fields) # Array4 flash counting self.dArr = np.zeros(self.fields) # Array4 d val self.dArrTotal = {} for i in range(self.fields): self.dArrTotal[i] = np.array([]) self.sAnalArr = {} for i in range(self.fields): self.sAnalArr[i] = np.zeros(self.arrL * self.conN) # For statistical analysis p = float(cfg['pVal']) self.pVal = p self.z = st.norm.ppf(p) # w - values of diff between dVal and significal d (v) self.diffV = np.zeros(self.fields) def defineMethods(self): # Declare methods self.sp = DataAnalysis(self.fs) self.sp.initConst(self.avrM, self.conN, self.csp_time) #~ self.sp.set_lowPass_filter_ds(self.avrM/(self.csp_time[1]-self.csp_time[0])) def prepareSignal(self, signal): return self.sp.prepareSignal(signal) def testData(self, signal, blink): # Analyze signal when data for that flash is neede s = np.array([]) for con in range(self.conN): tmp = self.prepareSignal(np.dot(self.P[:, con], signal)) s = np.append(s, tmp) # Data projection on Fisher's space self.d = np.dot(s, self.w) - self.c self.dArr[blink] += self.d self.dArrTotal[blink] = np.append(self.d, self.dArrTotal[blink]) self.dArrTotal[blink] = self.dArrTotal[blink][:self.nMax] self.flashCount[blink] += 1 #~ print "self.d: ", self.d def isItEnought(self): if (self.flashCount < self.nMin).any(): return -1 return self.testSignificances() def testSignificances(self): """ Test significances of d values. If one differs MUCH number of it is returned as a P300 target. Temporarly, as a test, normal distribution boundries for pVal percentyl are calulated. If only one d is larger than that pVal then that's the target. """ dMean = np.zeros(self.fields) nMin = self.flashCount.min() #~ print "nMin: ", nMin for i in range(self.fields): dMean[i] = self.dArrTotal[i][:nMin].mean() #~ dMean = self.dArr / self.flashCount self.diffV = np.zeros(self.diffV.shape) for sq in range(self.fields): # Norm distribution tmp = np.delete(dMean, sq) mean, std = tmp.mean(), tmp.std() # Find right boundry #~ v = mean + self.z*std # Calculate distance #~ self.diffV[sq] = dMean[sq]-v # Assuming that this is t distribution self.diffV[sq] = st.t.cdf(dMean[sq], self.fields, loc=mean, scale=std) #~ print "{0}: (m, std, v) = ({1}, {2}, {3})".format(sq, mean, std, v) #~ print "{0}: (d, w) = ({1}, {2})".format(sq, dMean[sq], self.diffV[sq]) #~ print "self.diffV: ", self.diffV # If only one value is significantly distant if np.sum(self.diffV > self.pVal) == 1: #~ return True self.dec = np.arange(self.diffV.shape[0])[self.diffV > 0] self.dec = np.int(self.dec[0]) #~ print "wybrano -- {0}".format(self.dec) return self.dec else: return -1 def forceDecision(self): self.testSignificances() # If only one value is significantly distant # Decision is the field with largest w value self.dec = np.arange( self.diffV.shape[0])[self.diffV == np.max(self.diffV)] self.dec = np.int(self.dec[0]) # Return int value return self.dec def setPWC(self, P, w, c): self.P = P self.w = w self.c = c def getDecision(self): return self.dec def newEpoch(self): self.flashCount = self.flashCount * 0 # Array4 flash counting self.dArr = self.dArr * 0 # Array4 d val for i in range(self.fields): self.dArrTotal[i] = np.array([]) def getArrTotalD(self): return self.dArrTotal def getRecentD(self): return self.d def getArrD(self): return self.dArr def getProbabiltyDensity(self): dMean = np.zeros(self.fields) nMin = self.flashCount.min() #~ print "nMin: ", nMin for i in range(self.fields): dMean[i] = self.dArrTotal[i][:nMin].mean() # Assuming that dValues are from T distribution p = st.t.cdf(dMean, self.fields, loc=dMean.mean(), scale=dMean.std()) return p
class P300_analysis(object): def __init__(self, sampling, cfg={}, rows=6, cols=6): self.fs = sampling self.rows = rows self.cols = cols self.defineConst(cfg) self.defineMethods() def defineConst(self, cfg): # moving avr & resample factor self.avrM = int(cfg['avrM']) # Concatenate number of CSP vectors self.conN = int(cfg['conN']) # Define analysis time self.csp_time = np.array(cfg['csp_time']) self.tInit, self.tFin = self.csp_time self.iInit, self.iFin = np.floor(self.csp_time*self.fs) self.chL = len(cfg['use_channels'].split(';')) self.arrL = self.avrM self.nLast = int(cfg['nLast']) self.nMin = int(cfg['nMin']) self.nMax = int(cfg['nMax']) self.dec = -1 # Arrays self.flashCount = {} self.flashCount['r'] = np.zeros(self.rows) self.flashCount['c'] = np.zeros(self.cols) self.dArr = {} self.dArr['r'] = np.zeros(self.rows) self.dArr['c'] = np.zeros(self.cols) self.dArrTotal = {} self.dArrTotal['r'] = [np.array([]) for x in xrange(self.rows)] self.dArrTotal['c'] = [np.array([]) for x in xrange(self.cols)] self.sAnalArr = {} self.sAnalArr['r'] = [np.zeros(self.arrL*self.conN) for x in xrange(self.rows)] self.sAnalArr['c'] = [np.zeros(self.arrL*self.conN) for x in xrange(self.cols)] # For statistical analysis self.pdf = np.array(cfg['pdf']) #~ self.pPer = float(cfg['pPercent']) self.pPer = 95. # w - values of diff between dVal and significal d (v) self.diffV = {} self.diffV['r'] = np.zeros(self.rows) self.diffV['c'] = np.zeros(self.cols) def newEpoch(self): """ Clears all buffor arrays. """ self.flashCount['r'] = np.zeros(self.rows) self.flashCount['c'] = np.zeros(self.cols) self.dArr['r'] = np.zeros(self.rows) self.dArr['c'] = np.zeros(self.cols) self.dArrTotal['r'] = [np.array([]) for x in xrange(self.rows)] self.dArrTotal['c'] = [np.array([]) for x in xrange(self.cols)] def defineMethods(self): # Declare methods self.sp = DataAnalysis(self.fs) self.sp.initConst(self.avrM, self.conN, self.csp_time) #~ self.sp.set_lowPass_filter_ds(self.avrM/(self.csp_time[1]-self.csp_time[0])) def prepareSignal(self, signal): return self.sp.prepareSignal(signal) def testData(self, signal, lineFlag, blink): """ Analysis given data and stores it's classifier value. Takes: signal - CHANNEL x DATA signal; lineFlag - Indicating which line blinked. Either 'c' (column) or 'r' (row); blink - Position of blink ( 0 < blink < max(lineFlag) ); """ # Analyze signal when data for that flash is neede s = np.empty( self.avrM*self.conN) for con in range(self.conN): s[con*self.avrM:(con+1)*self.avrM] = self.prepareSignal(np.dot(self.P[:,con], signal)) # Data projection on Fisher's space self.d = np.dot(s,self.w) - self.c self.dArr[lineFlag][blink] += self.d self.dArrTotal[lineFlag][blink] = np.append(self.d, self.dArrTotal[lineFlag][blink]) self.dArrTotal[lineFlag][blink] = self.dArrTotal[lineFlag][blink][:self.nMax] self.flashCount[lineFlag][blink] += 1 #~ print "self.d: ", self.d def isItEnought(self): print "self.flashCount['c']: ", self.flashCount['c'] print "self.flashCount['r']: ", self.flashCount['r'] for flag in ['r', 'c']: if (self.flashCount[flag] <= self.nMin).any(): return -1 if (self.flashCount['c'] >= self.nMax).all() and \ (self.flashCount['r'] >= self.nMax).all(): return self.forceDecision() else: return self.testSignificances() def testSignificances(self): """ Test significances of d values. If one differs MUCH number of it is returned as a P300 target. Temporarly, as a test, normal distribution boundries for pVal percentyl are calulated. If only one d is larger than that pVal then that's the target. """ print "++ testSignificances ++ " dMeanR = np.zeros(self.rows) dMeanC = np.zeros(self.cols) nLast = self.nLast dMeanR = np.array([ np.mean(self.dArrTotal['r'][i][:nLast]) for i in range(self.rows)]) dMeanC = np.array([ np.mean(self.dArrTotal['c'][i][:nLast]) for i in range(self.cols)]) # This substitution is to not change much of old code. # In future, try to change code for new variable. self.diffV['r'] = self.diffR = dMeanR self.diffV['c'] = self.diffC = dMeanC self.per = np.zeros(self.rows*self.cols) for r in range(self.rows): for c in range(self.cols): d = 0.5*(dMeanR[r] + dMeanC[c]) self.per[c + r*self.cols] = st.percentileofscore(self.pdf, d) print self.per if np.sum(self.per > self.pPer) == 1: print "per: ", self.per self.dec = np.arange(self.rows*self.cols)[self.per==self.per.max()] self.dec = np.int(self.dec[0]) print "wybrano -- {0}".format(self.dec) return self.dec else: return -1 def forceDecision(self): print " ++ forceDecision ++ " self.testSignificances() print "per: ", self.per self.dec = int(np.arange(self.cols*self.rows)[self.per==self.per.max()][0]) # Return int value return self.dec def setPWC(self, P, w, c): self.P = P self.w = w self.c = c def setPdf(self, pdf): self.pdf = pdf def getDecision(self): return self.dec def getArrTotalD(self): return self.dArrTotal def getRecentD(self): return self.d def getArrD(self): return self.dArr def getProbabiltyDensity(self): """ Returns percentiles of given scores from nontarget propabilty density function. """ dMeanR = np.zeros(self.rows) dMeanC = np.zeros(self.cols) nMinR = self.flashCount['r'].min() nMinC = self.flashCount['c'].min() dMeanR = np.array([ np.mean(self.dArrTotal['r'][i][:nMinR]) for i in range(self.rows)]) dMeanC = np.array([ np.mean(self.dArrTotal['c'][i][:nMinC]) for i in range(self.cols)]) self.per = np.zeros(self.rows*self.cols) for r in range(self.rows): for c in range(self.cols): d = 0.5*(dMeanR[r] + dMeanC[c]) self.per[c + r*self.cols] = st.percentileofscore(self.pdf, d) return self.per