コード例 #1
0
 def set_dfa(self):
   for e in xrange(self.data.shape[0]):
     if np.all(self.data[e][:1000] == 0):
       continue
     name = 'dfa_e' + str(e)
     value = pyeeg.dfa(self.data[e], Ave = self.features['mean_e' + str(e)])
     self.features[name] = value
コード例 #2
0
def features(mat):
    Kmax = 5
    Tau = 4
    DE = 10
    M = 10
    R = 0.3
    Band = np.arange(1, 86)
    Fs = 173
    DFA = pyeeg.dfa(mat)
    HFD = pyeeg.hfd(mat, Kmax)
    SVD_Entropy = pyeeg.svd_entropy(mat, Tau, DE)
    Fisher_Information = pyeeg.fisher_info(mat, Tau, DE)
    PFD = pyeeg.pfd(mat)
    sleep(0.01)

    return (DFA, HFD, SVD_Entropy, Fisher_Information, PFD)
コード例 #3
0
ファイル: oscserver.py プロジェクト: Toruitas/thinkingcap
def handler(unused_addr, args, ch1, ch2, ch3, ch4):
    baseline = args[1]  # on recupere la baseline
    baselineSet = len(baseline) == 0
    L = [ch1, ch2, ch3, ch4]
    if baselineSet:
        args[0] = compute_baseline(args[0], 10, L)

    if not baselineSet:
        args[0] = np.vstack(
            (args[0],
             normalize(L, baseline)))  #On ajoute les nouvelles valeurs EEG
        data = args[0]
        buf = np.mean(data[-3 * 220:], axis=1)
        print(data.shape)
        value = pyeeg.dfa(buf.ravel())
        client.send_message("/puredata/dfa", value)
コード例 #4
0
def features(mat):
    Kmax = 5
    Tau = 4
    DE = 10
    M = 10
    R = 0.3
    Band = np.arange(1, 86)
    Fs = 173
    DFA = pyeeg.dfa(mat)
    HFD = pyeeg.hfd(mat, Kmax)
    SVD_Entropy = pyeeg.svd_entropy(mat, Tau, DE)
    Fisher_Information = pyeeg.fisher_info(mat, Tau, DE)
    #ApEn               = pyeeg.ap_entropy(mat, M, R)      # very slow
    PFD = pyeeg.pfd(mat)
    Spectral_Entropy = pyeeg.spectral_entropy(mat, Band, Fs, Power_Ratio=None)
    sleep(0.01)

    return (DFA, HFD, SVD_Entropy, Fisher_Information, PFD, Spectral_Entropy)
コード例 #5
0
    def StatisticalFeatures(self, data):

        mean = np.mean(data)  # Mean of data
        std = np.std(data)  # std of data
        pfd = pyeeg.pfd(data)  # Petrosian Fractal Dimension
        hurst = pyeeg.hurst(data)  # Hurst Exponent Feature
        dfa = pyeeg.dfa(data)  # Detrended Fluctuation Analysis
        corr = nolds.corr_dim(data, 1)  # Correlation Dimension Feature
        power = np.sum(np.abs(data)**2) / len(data)  # Power feature
        FD = hfda(data, 5)  # fractal dimension

        statistics = {
            "mean": mean,
            "std": std,
            "pfd": pfd,
            "hurst": hurst,
            "hjorth": hjorth,
            "dfa": dfa,
            "corr": corr,
            "power": power
        }

        return (statistics)
コード例 #6
0
 def extract_features(couple_data):
     pca1 =  pca_project_data(couple_data, 1) #take 1st pca dimension
     pca1_mean =  np.mean(pca1, axis=0) #
     pca1_std   = np.std(pca1, axis=0)  # 
     pca1_med = np.median(pca1,  axis=0) #
     features = []
     def sinuosity_deviation_features(seq, mean, std):
         sinuosity_dict = {"A":0, "B":0, "C":0}
         deviation_dict = {"I":0, "II":0, "III":0}
         sinuosity_deviation_dict = {"A-I":0,"A-II":0,"A-III":0,"B-I":0,"B-II":0,"B-III":0,"C-I":0,"C-II":0,"C-III":0}
         n = len(seq)
         for i in range(1,n-1):
             current_af = seq[i]
             prev_af = seq[i-1]
             next_af = seq[i+1]
             sinu = abs((next_af - current_af) + (current_af - prev_af))
             if sinu == 0:      label1 = "A"
             elif 0< sinu <= 1: label1 = "B"
             else:              label1 = 'C'
             sinuosity_dict[label1] += 1
             devi = abs(current_af - mean)
             close =  std / 2
             if devi <= close: label2 = "I"
             elif devi <= std: label2 = "II" 
             elif devi > std:  label2 = "III" 
             deviation_dict[label2] += 1
             sinuosity_deviation_dict["%s-%s"%(label1,label2)] += 1
         return sinuosity_deviation_dict.values()    
     n = len(pca1)
     pca1_sinuosity_deviation = sinuosity_deviation_features( pca1, pca1_mean,  pca1_std )
     features += list(np.array(pca1_sinuosity_deviation)/float(n-2))
     seq =  pca1
     dfa = eg.dfa(seq); pfd = eg.pfd(seq)
     apen = eg.ap_entropy(seq,1,np.std(seq)*.2)
     svden = eg.svd_entropy(seq, 2, 2)
     features += [pca1_mean, pca1_med, pca1_std, dfa, pfd, apen, svden]
     return features
コード例 #7
0
ファイル: utils.py プロジェクト: Grometton/Machine-Learning-
def advanced_statistics(signal, fs=128):
    K_boundary = 10  # to be tuned
    t_fisher = 12  # to be tuned
    d_fisher = 40  # to be tuned
    features_num = 11
    threshold = 0.0009
    advanced_stats = np.zeros((signal.shape[0], features_num))
    print("Gathering advanced statistics...")
    for i in tqdm((np.arange(signal.shape[0]))):
        feat_array = np.array([
            pyeeg.fisher_info(signal[i, :], t_fisher, d_fisher),
            pyeeg.pfd(signal[i, :]),
            pyeeg.dfa(signal[i, :]),
            pyeeg.hfd(signal[i, :], K_boundary),
            np.sum((abs(signal[i, :])**(-0.3)) > 20),
            np.sum((abs(signal[i, :])) > threshold),
            np.std(abs(signal[i, :])**(0.05)),
            np.sqrt(np.mean(np.power(np.diff(signal[i, :]), 2))),
            np.mean(np.abs(np.diff(signal[i, :]))),
            np.mean(signal[i, :]**5),
            np.sum(signal[i, :]**2)
        ])
        advanced_stats[i, :] = feat_array
    return advanced_stats
コード例 #8
0
ファイル: fractal.py プロジェクト: nishanthprakash/ARMiVE
import pyeeg
from numpy.random import randn
import csv
import pylab


# output = subprocess.Popen(["xentop"], stdout=subprocess.PIPE).communicate()[0]

with open('usage.csv', 'rb') as csvfile:
    usage = csv.reader(csvfile, delimiter=',', quotechar='|')
    dat=[]	
    for a,b in usage:
      dat.append(int(b))

print len(dat)
data=[]

numsteps =  len(dat)
for i in range(numsteps):
	data.append(dat[i])

#print data

print pyeeg.dfa(data)
pylab.plot(range(numsteps),data,'b')
pylab.show()
コード例 #9
0
ファイル: fractal.py プロジェクト: nishanthprakash/ARMiVE
import pyeeg
from numpy.random import randn
import csv
import pylab

# output = subprocess.Popen(["xentop"], stdout=subprocess.PIPE).communicate()[0]

with open('usage.csv', 'rb') as csvfile:
    usage = csv.reader(csvfile, delimiter=',', quotechar='|')
    dat = []
    for a, b in usage:
        dat.append(int(b))

print len(dat)
data = []

numsteps = len(dat)
for i in range(numsteps):
    data.append(dat[i])

#print data

print pyeeg.dfa(data)
pylab.plot(range(numsteps), data, 'b')
pylab.show()
コード例 #10
0
fisher_info_features_train = []
for i in range(X_train.shape[0]):
    ##print i
    h = fisher_info(X_train[i, ], 4, 10, W=None)
    fisher_info_features_train.append(h)

fisher_info_features_test = []
for i in range(X_test.shape[0]):
    ##print i
    h = fisher_info(X_test[i, ], 4, 10, W=None)
    fisher_info_features_test.append(h)
''' Detrended Fluctuation Analysis'''  ###Okay
dfa_features_train = []
for i in range(X_train.shape[0]):
    ##print i
    h = dfa(X_train[i, ], Ave=None, L=None)
    dfa_features_train.append(h)

dfa_features_test = []
for i in range(X_test.shape[0]):
    ##print i
    h = dfa(X_test[i, ], Ave=None, L=None)
    dfa_features_test.append(h)

#''' bin_power(), Power Spectral Density (PSD), spectrum power in a set of frequency bins, and, Relative Intensity Ratio (RIR)'''
#bin_power_features_train=[]  ###Okay
#for i in range(X_train.shape[0]):
#    ##print i
#    h=bin_power(X_train[i,],[0.54,5,7,12,50],173)
#    bin_power_features_train.append(h)
#
コード例 #11
0
    h = fisher_info(X_train[i,], 4, 10, W=None)
    fisher_info_features_train.append(h)


fisher_info_features_test = []
for i in range(X_test.shape[0]):
    ##print i
    h = fisher_info(X_test[i,], 4, 10, W=None)
    fisher_info_features_test.append(h)


""" Detrended Fluctuation Analysis"""  ###Okay
dfa_features_train = []
for i in range(X_train.shape[0]):
    ##print i
    h = dfa(X_train[i,], Ave=None, L=None)
    dfa_features_train.append(h)


dfa_features_test = []
for i in range(X_test.shape[0]):
    ##print i
    h = dfa(X_test[i,], Ave=None, L=None)
    dfa_features_test.append(h)

#''' bin_power(), Power Spectral Density (PSD), spectrum power in a set of frequency bins, and, Relative Intensity Ratio (RIR)'''
# bin_power_features_train=[]  ###Okay
# for i in range(X_train.shape[0]):
#    ##print i
#    h=bin_power(X_train[i,],[0.54,5,7,12,50],173)
#    bin_power_features_train.append(h)
コード例 #12
0
	def DFA(self):
		resp = pyeeg.dfa(self.channel_data)
		return [np.array([resp]),['DFA']]
コード例 #13
0
def myFeaturesExtractor(
        X, myM, myV):  # X has to be a matrix where each row is a channel
    N = len(X)  # number of channels
    L = len(X[0])
    maxtLyap = min(500, L // 2 + L // 4)
    lyapLags = np.arange(maxtLyap) / Fs

    # get number of features
    nFeatures = nMono * N + N * (N - 1) / 2
    # here we initialize the list of features // We will transform it to an array later
    featList = np.zeros((int(nFeatures)))
    # deal with monovariate features first
    for kChan in range(N):
        kFeat = 0
        mySig = X[kChan, :]
        #========== Stats ========================
        myMean = myM[kChan]
        featList[nMono * kChan + kFeat] = myMean
        kFeat += 1
        myMax = max(mySig)
        featList[nMono * kChan + kFeat] = myMax
        kFeat += 1
        myMin = min(mySig)
        featList[nMono * kChan + kFeat] = myMin
        kFeat += 1
        peak = max(abs(np.array([myMin, myMax])))
        featList[nMono * kChan + kFeat] = peak
        kFeat += 1
        myVar = myV[kChan]
        featList[nMono * kChan + kFeat] = myVar
        kFeat += 1
        featList[nMono * kChan + kFeat] = sp.skew(mySig)
        kFeat += 1
        featList[nMono * kChan + kFeat] = sp.kurtosis(mySig)
        kFeat += 1
        myRMS = rms(mySig)
        featList[nMono * kChan + kFeat] = myRMS
        kFeat += 1
        featList[nMono * kChan + kFeat] = peak / myRMS
        kFeat += 1

        featList[nMono * kChan + kFeat] = totVar(mySig)
        kFeat += 1
        featList[nMono * kChan + kFeat] = pyeeg.dfa(mySig)
        kFeat += 1
        featList[nMono * kChan + kFeat] = pyeeg.hurst(mySig)
        kFeat += 1
        hMob, hComp = pyeeg.hjorth(mySig)
        featList[nMono * kChan + kFeat] = hMob
        kFeat += 1
        featList[nMono * kChan + kFeat] = hComp
        kFeat += 1
        ## ======== fractal ========================
        # Now we need to get the embeding time lag Tau and embeding dmension
        ac = delay.acorr(mySig, maxtau=maxTauLag, norm=True, detrend=True)
        Tau = firstTrue(ac < corrThresh)  # embeding delay

        f1 , f2 , f3 = dimension.fnn(mySig, dim=dim, tau=Tau, R=10.0, A=2.0, metric='euclidean',\
                                     window=10,maxnum=None, parallel=True)
        myEmDim = firstTrue(f3 < fracThresh)
        # Here we construct the Embeding Matrix Em
        Em = pyeeg.embed_seq(mySig, Tau, myEmDim)
        U, s, Vh = linalg.svd(Em)
        W = s / np.sum(s)  # list of singular values in decreasing order
        FInfo = pyeeg.fisher_info(X, Tau, myEmDim, W=W)
        featList[nMono * kChan + kFeat] = FInfo
        kFeat += 1
        featList[nMono * kChan + kFeat] = Tau
        kFeat += 1
        featList[nMono * kChan + kFeat] = myEmDim
        kFeat += 1
        #========================================
        PFD = pyeeg.pfd(mySig, D=None)
        hfd6 = pyeeg.hfd(mySig, 6)
        hfd10 = pyeeg.hfd(mySig, 10)
        # Now we fit aline and get its slope to have Lyapunov exponent
        divAvg = lyapunov.mle(Em,
                              maxt=maxtLyap,
                              window=3 * Tau,
                              metric='euclidean',
                              maxnum=None)
        poly = np.polyfit(lyapLags,
                          divAvg,
                          1,
                          rcond=None,
                          full=False,
                          w=None,
                          cov=False)
        LyapExp = poly[0]

        featList[nMono * kChan + kFeat] = PFD
        kFeat += 1
        featList[nMono * kChan + kFeat] = hfd6
        kFeat += 1
        featList[nMono * kChan + kFeat] = hfd10
        kFeat += 1
        featList[nMono * kChan + kFeat] = LyapExp
        kFeat += 1

        ## ======== Entropy ========================
        tolerance = 1 / 4
        entropyDim = max([myEmDim, PFD])

        featList[nMono * kChan + kFeat] = pyeeg.samp_entropy(
            mySig, entropyDim, tolerance)
        kFeat += 1
        featList[nMono * kChan + kFeat] = pyeeg.svd_entropy(mySig,
                                                            Tau,
                                                            myEmDim,
                                                            W=W)
        kFeat += 1

        # here we compute bin power
        power, power_Ratio = pyeeg.bin_power(mySig, freqBins, Fs)
        featList[nMono * kChan + kFeat] = pyeeg.spectral_entropy(
            mySig, freqBins, Fs, Power_Ratio=power_Ratio)
        kFeat += 1
        ## ======== Spectral ========================
        for kBin in range(len(freqBins) - 1):
            featList[nMono * kChan + kFeat] = power[kBin]
            kFeat += 1
            featList[nMono * kChan + kFeat] = power_Ratio[kBin]
            kFeat += 1

    # deal with multivariate features first
    #============ connectivity ==================
    corrList = connectome(X)
    nConnect = len(corrList)
    if N * (N - 1) / 2 != nConnect:
        raise ValueError('incorrect number of correlation coeffs')

    for kC in range(nConnect):
        featList[-nConnect + kC] = corrList[kC]

    return featList
コード例 #14
0
ファイル: overlapping epochs.py プロジェクト: zty0312/Project
S = skew(epoch_overlap,1)

activity = np.zeros((n,4))
morbidity = np.zeros((n,4))
complexity = np.zeros((n,4))
zc = np.zeros((n,4))
d_f_a = np.zeros((n,4))
for ii in range(4):
    for k in range(n):
        a = epoch_overlap[k][:,ii]
        # Hjorth parameters
        activity[k,ii],morbidity[k,ii],complexity[k,ii] = Hjorth(a)
        # zero crossings
        zc[k,ii] = ((a[:-1] * a[1:]) < 0).sum()
        # DFA
        d_f_a[k,ii] = pyeeg.dfa(a)
feature_matrix_time = np.hstack((K,S,activity,morbidity,complexity,zc,d_f_a))

####################    Freq ###############################
bandpower = []
for row in range(n):
    bandpower.append([])
    for col in range(4):
        a = epoch_overlap[row][:,col]
        Power = power(a,fs)
        bandpower[row].append(Power.copy())
# normalised band power
delta_power = np.zeros((n,4))
theta_power = np.zeros((n,4))
alpha_power = np.zeros((n,4))
beta_power = np.zeros((n,4))
コード例 #15
0
 def find_dfa(self):
     self.dfa = pyeeg.dfa(self.filtered_signal, self.mean)
コード例 #16
0
 def DFA(self):
     resp = pyeeg.dfa(self.channel_data)
     return [np.array([resp]), ['DFA']]
コード例 #17
0
def myFeaturesExtractor(X): # X has to be a matrix where each row is a channel
    N = len(X)
    # L = len(X[0])
    # here we initialize the list of features // We will transform it to an array later
    featList = list()
    timeList =list ()
    featName =list()
    for kChan in range(1):
        mySig = X[kChan , :]
        if kChan == 0:
            start=time.perf_counter_ns()
            
        #========== Stats ========================
        myMean = np.mean(mySig)
        featList.append(myMean)
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append("mean")
            start=end
        featList.append(max(mySig))
        
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" max")
            start=end
        featList.append(min(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" min")
            start=end            
        peak =max(abs(mySig))
        featList.append(peak)
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" peak")
            start=end            
        myVar = np.var(mySig)
        featList.append(myVar)
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" var")
            start=end
        myVar = np.var(mySig)    
        myStd = np.sqrt(myVar)
        featList.append(myStd)
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" std")
            start=end             
        featList.append(sp.skew(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" skew")
            start=end

        featList.append(sp.kurtosis(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" kurt")
            start=end
        myRMS = rms(mySig)
        featList.append(myRMS)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" rms")
            start=end
        myRMS = rms(mySig)    
        featList.append(peak/myRMS)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" fact")
            start=end
        myRMS = rms(mySig)    
        featList.append(myRMS/myMean)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" papr")
            start=end
        featList.append(totVar(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" totVar")
            start=end
            
        featList.append(pyeeg.dfa(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" dfa")
            start=end
        featList.append(pyeeg.hurst(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" hurst")
            start=end
        hMob , hComp = pyeeg.hjorth(mySig )
        featList.append(hMob)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" Hmob")
            timeList.append(end -start)
            featName.append(" Hcomp")
            start=end
        
        featList.append(hComp)
            
            
        
#        ## ======== fractal ========================
#        # Now we need to get the embeding time lag Tau and embeding dmension
#        ac=delay.acorr(mySig, maxtau=maxTauLag, norm=True, detrend=True)
#        Tau = firstTrue(ac < corrThresh) # embeding delay
#        featList.append(Tau)
#        if kChan == 0:
#            end=time.perf_counter_ns()
#            
#            timeList.append(end -start)
#            featName.append(" dCorrTime")
#            start=end
#        f1 , f2 , f3 = dimension.fnn(mySig, dim=dim, tau=Tau, R=10.0, A=2.0, metric='chebyshev', window=10,maxnum=None, parallel=True)
#        myEmDim = firstTrue(f3 < fracThresh)
##        if kChan == 0:
##            end=time.perf_counter_ns()
##            timeList.append(end -start)
##            featName.append(" embDim")
##            start=end
#        # Here we construct the Embeding Matrix Em
#        Em = pyeeg.embed_seq(mySig, Tau, myEmDim)
#        U, s, Vh = linalg.svd(Em)
#        W = s/np.sum(s)  # list of singular values in decreasing order 
#        
#        FInfo = pyeeg.fisher_info(X, Tau, myEmDim , W=W)
#        featList.append(FInfo)
#        if kChan == 0:
#            end=time.perf_counter_ns()
#            
#            timeList.append(end -start)
#            featName.append(" FInfo")
#            start=end
#
#        featList.append(myEmDim)
        
        
        PFD = pyeeg.pfd(mySig, D=None)
        featList.append(PFD)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" pfd")
            start=end
            
        hfd6 = pyeeg.hfd(mySig , 6)
        featList.append(hfd6)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" hfd6")
            start=end
        hfd10 = pyeeg.hfd(mySig , 10)
        featList.append(hfd10)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" hfd10")
            start=end
        # Now we fit aline and get its slope to have Lyapunov exponent
#        divAvg = lyapunov.mle(Em, maxt=maxtLyap, window= 3 * Tau, metric='euclidean', maxnum=None)
#        poly = np.polyfit(lyapLags, divAvg, 1, rcond=None, full=False, w=None, cov=False)
#        LyapExp = poly[0]
#        featList.append(np.mean(LyapExp)) 
#        if kChan == 0:
#            end=time.perf_counter_ns()
#            
#            timeList.append(end -start)
#            featName.append("Lyapunov")
#            start=end
               
        ## ======== Entropy ========================
        
        # here we compute bin power 
        power, power_Ratio = pyeeg.bin_power(mySig , freqBins , Fs )
        
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append("Spectral")
            start=end
        featList.append( pyeeg.spectral_entropy(mySig, freqBins, Fs, Power_Ratio=power_Ratio))
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" specEn")
            start=end
            
#        tolerance = myStd / 4
#        entropyDim = max([myEmDim , PFD])
#        featList.append( pyeeg.samp_entropy(mySig , entropyDim , tolerance ) )
#        if kChan == 0:
#            end=time.perf_counter_ns()
#            
#            timeList.append(end -start)
#            featName.append(" sampEn")
#            start=end
#        featList.append( pyeeg.svd_entropy(mySig, Tau, myEmDim , W=W) )
#        if kChan == 0:
#            end=time.perf_counter_ns()
#            
#            timeList.append(end -start)
#            featName.append(" svdEn")
#            start=end
            
        ## ======== Spectral ========================
        appendArray2List(featList , power )
        appendArray2List(featList , power_Ratio )
    
    start=time.perf_counter_ns()
    connectome(X , featList)
    end=time.perf_counter_ns()
    timeList.append((end -start)/N/(N-1)*2)
    featName.append("connectivity")
            
    ll=list()
    ll.append(featName)
    ll.append(timeList)    
    return np.asarray(featList) , ll
コード例 #18
0
ファイル: myAnalysis.py プロジェクト: celiacintas/fractaloso
from numpy import arange, loadtxt, random
from random import randrange

def randomize(data):
	randomData = []
	for i in range(len(data)-1):
		randomData.append(data[randrange(0, len(data)-1)])
	
	return randomData

if __name__ == "__main__":
	
	originalData = loadtxt('F/F003.txt').T
	randomData = randomize(originalData)
	#get the dfa for both set of values
	alphaO, forplotO, forplotNO = dfa(originalData)
	alphaR, forplotR, forplotNR = dfa(randomData)

	#plot the results
	fig = pylab.figure()
	ax = fig.add_subplot(3, 1, 1)
	ori = ax.plot(log10(forplotNO), forplotO, 'b-', label = "Original Data")
	ran = ax.plot(log10(forplotNR), forplotR, 'g-', label = "Random Data")

	pylab.title("DFA")
	pylab.legend([ori[0], ran[0]], ['Original Data','Random Data'])
	pylab.text(2,3, r'$\alpha Original = %f $'%(alphaO), multialignment = 'center')
	pylab.text(2,2, r'$\alpha Random  = %f $'%(alphaR), multialignment = 'center')

	#plot the original and randomize data
	bx = fig.add_subplot(3, 1, 2)
コード例 #19
0
    def feature_wave(self, toolName=None, Fs=256):
        if (toolName == None):
            print('please select a tool')
            return

        if toolName in self.FeatureSet.dict[0]:
            index = self.FeatureSet.dict[0][toolName]
        else:
            index = -1
        print(toolName)
        if toolName == 'DWT':
            answer_train = DWT(self.DataSet.trainSet_data[0], 'db4')
            answer_test = DWT(self.DataSet.testSet_data[0], 'db4')
            print('DWT feature extraction succeed db4')
        elif toolName == 'hurst':
            answer_train = [
                pyeeg.hurst(i) for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.hurst(i) for i in self.DataSet.testSet_data[0]
            ]
            print('hurst feature extraction succeed')
        elif toolName == 'dfa':
            answer_train = [
                pyeeg.dfa(i, L=[4, 8, 16, 32, 64])
                for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.dfa(i, L=[4, 8, 16, 32, 64])
                for i in self.DataSet.testSet_data[0]
            ]
            print('dfa feature extraction succeed')
        elif toolName == 'fisher_info':
            answer_train = [
                pyeeg.fisher_info(i, 2, 20)
                for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.fisher_info(i, 2, 20)
                for i in self.DataSet.testSet_data[0]
            ]
            print('fisher_info feature extraction succeed')
        elif toolName == 'svd_entropy':
            answer_train = [
                pyeeg.svd_entropy(i, 2, 20)
                for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.svd_entropy(i, 2, 20)
                for i in self.DataSet.testSet_data[0]
            ]
            print('svd_entropy feature extraction succeed')
        elif toolName == 'spectral_entropy':
            bandlist = [0.5, 4, 7, 12, 30, 100]
            answer_train = [
                pyeeg.spectral_entropy(i, bandlist, Fs)
                for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.spectral_entropy(i, bandlist, Fs)
                for i in self.DataSet.testSet_data[0]
            ]
            print('spectral_entropy feature extraction succeed')
        elif toolName == 'hjorth':
            # 得到两个量 第一个是 mobility 第二个是 complexity
            answer_train = [
                pyeeg.hjorth(i) for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.hjorth(i) for i in self.DataSet.testSet_data[0]
            ]
            answer_train = np.array(answer_train)
            answer_test = np.array(answer_test)

            for i in answer_train:
                i[1] = i[1] / 100
            for i in answer_test:
                i[1] = i[1] / 100

            #只取Mobility
            answer_train = np.array(answer_train[:, 0])
            answer_test = np.array(answer_test[:, 0])
            print('hjorth feature extraction succeed')
        elif toolName == 'hfd':
            answer_train = [
                pyeeg.hfd(i, 8) for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.hfd(i, 8) for i in self.DataSet.testSet_data[0]
            ]
            print('hfd feature extraction succeed')
        elif toolName == 'pfd':
            answer_train = [
                pyeeg.pfd(i) for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [pyeeg.pfd(i) for i in self.DataSet.testSet_data[0]]
            print('pfd feature extraction succeed')
        elif toolName == 'bin_power':
            bandlist = [0.5, 4, 7, 12]  #,30,100]
            answer_train = [
                pyeeg.bin_power(i, bandlist, Fs)
                for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.bin_power(i, bandlist, Fs)
                for i in self.DataSet.testSet_data[0]
            ]
            print('bin_power feature extraction succeed')

        else:
            print('does not have this kind of mode')

        answer_train = np.array(answer_train)
        answer_train = answer_train.reshape(len(answer_train), -1)
        answer_test = np.array(answer_test)
        answer_test = answer_test.reshape(len(answer_test), -1)
        if index == -1:
            #print(len(self.FeatureSet.feature.trainSet_feat[0]),len(answer_train))
            self.FeatureSet.feature.trainSet_feat[0] = np.column_stack(
                (self.FeatureSet.feature.trainSet_feat[0], answer_train))
            self.FeatureSet.feature.testSet_feat[0] = np.column_stack(
                (self.FeatureSet.feature.testSet_feat[0], answer_test))
            self.FeatureSet.dict[0][toolName] = [
                self.FeatureSet.size[0],
                self.FeatureSet.size[0] + len(answer_train[0])
            ]
            self.FeatureSet.size[0] += len(answer_train[0])
        else:
            self.FeatureSet.feature.trainSet_feat[0][:, index[0]:index[1]] = [
                i for i in answer_train
            ]
            self.FeatureSet.feature.testSet_feat[0][:, index[0]:index[1]] = [
                i for i in answer_test
            ]
コード例 #20
0
        epoch = data_epoch[:, 1:]
        hypnogram = data_epoch[5,
                               0]  # take the fifth hypnogram as the true state

        K = kurtosis(epoch)
        S = skew(epoch)
        activity = np.zeros(4)
        morbidity = np.zeros(4)
        complexity = np.zeros(4)
        zc = np.zeros(4)
        d_f_a = np.zeros(4)
        for k in range(4):
            activity[k], morbidity[k], complexity[k] = Hjorth(epoch[:, k])
            zc[k] = ((epoch[:, k][:-1] * epoch[:, k][1:]) < 0).sum()
            d_f_a[k] = pyeeg.dfa(epoch[:, k])
        feature_time = np.hstack(
            (K, S, activity, morbidity, complexity, zc, d_f_a))

        ############## freq ################################
        bandpower = []
        for col in range(4):
            Power = power(epoch[:, col], fs)
            bandpower.append(Power.copy())
        delta_power = np.zeros(4)
        theta_power = np.zeros(4)
        alpha_power = np.zeros(4)
        beta_power = np.zeros(4)
        gamma_power = np.zeros(4)
        for ii in range(4):
            delta_power[ii] = bandpower[ii]['Delta']
コード例 #21
0
def DFA(x):

    resp = pyeeg.dfa(x)

    return resp
コード例 #22
0
def myFeaturesExtractor1(
        X, myM, myV, myMin,
        myMax):  # X has to be a matrix where each row is a channel
    N = len(X)  # number of channels
    #    L = len(X[0])

    # get number of features
    nFeatures = nMono * N + N * (N - 1) / 2
    # here we initialize the list of features // We will transform it to an array later
    featList = np.zeros((int(nFeatures)))
    # deal with monovariate features first
    for kChan in range(N):
        kFeat = 0
        mySig = X[kChan, :]
        #========== Stats ========================
        myMean = myM[kChan]
        featList[nMono * kChan + kFeat] = myMean
        kFeat += 1

        featList[nMono * kChan + kFeat] = myMax[kChan]
        kFeat += 1

        featList[nMono * kChan + kFeat] = myMin[kChan]
        kFeat += 1

        myVar = myV[kChan]
        featList[nMono * kChan + kFeat] = myVar
        kFeat += 1
        featList[nMono * kChan + kFeat] = sp.skew(mySig)
        kFeat += 1
        featList[nMono * kChan + kFeat] = sp.kurtosis(mySig)
        kFeat += 1

        featList[nMono * kChan + kFeat] = pyeeg.dfa(mySig)
        kFeat += 1

        hMob, hComp = pyeeg.hjorth(mySig)
        featList[nMono * kChan + kFeat] = hMob
        kFeat += 1
        featList[nMono * kChan + kFeat] = hComp
        kFeat += 1
        ## ======== fractal ========================
        # Now we need to get the embeding time lag Tau and embeding dmension
        ac = delay.acorr(mySig, maxtau=maxTauLag, norm=True, detrend=True)
        Tau = firstTrue(ac < corrThresh)  # embeding delay

        featList[nMono * kChan + kFeat] = Tau
        kFeat += 1

        PFD = pyeeg.pfd(mySig, D=None)
        hfd10 = pyeeg.hfd(mySig, 10)

        featList[nMono * kChan + kFeat] = PFD
        kFeat += 1

        featList[nMono * kChan + kFeat] = hfd10
        kFeat += 1

        ## ======== Entropy ========================
        # here we compute bin power
        power, power_Ratio = pyeeg.bin_power(mySig, freqBins, Fs)
        featList[nMono * kChan + kFeat] = pyeeg.spectral_entropy(
            mySig, freqBins, Fs, Power_Ratio=power_Ratio)
        kFeat += 1
        ## ======== Spectral ========================
        for kBin in range(len(freqBins) - 1):
            featList[nMono * kChan + kFeat] = power[kBin]
            kFeat += 1
            featList[nMono * kChan + kFeat] = power_Ratio[kBin]
            kFeat += 1

    # deal with multivariate features first
    #============ connectivity ==================
    corrList = connectome(X)
    nConnect = len(corrList)
    if N * (N - 1) / 2 != nConnect:
        raise ValueError('incorrect number of correlation coeffs')

    for kC in range(nConnect):
        featList[-nConnect + kC] = corrList[kC]

    return featList
コード例 #23
0
def calculate_features(samples):
    data = samples
    if not samples:
        print("no samples")
        return []

    band = [0.5, 4, 7, 12, 30]
    a = randn(4097)
    # approx = pyeeg.ap_entropy(data, 5, 1)
    approx = 0
    DFA = pyeeg.dfa(data)
    first_order_diff = [data[i] - data[i - 1] for i in range(1, len(data))]
    fisher_info = pyeeg.fisher_info(data, 1, 1, W=None)
    embed_seq = pyeeg.embed_seq(data, 1, 1)
    hfd = pyeeg.hfd(data, 6)
    hjorth = pyeeg.hjorth(data, D=None)
    hurst = pyeeg.hurst(data)
    PFD = pyeeg.pfd(data)
    sam_ent = pyeeg.samp_entropy(data, 1, 2)
    spectral_entropy = pyeeg.spectral_entropy(data,
                                              band,
                                              256,
                                              Power_Ratio=None)
    svd = pyeeg.svd_entropy(data, 6, 4, W=None)
    PSI = pyeeg.bin_power(data, band, 256)

    # # Power Spectral Intensity (PSI) and Relative Intensity Ratio (RIR) Two 1- D v ec t o rs
    # # print("bin_power = ", PSI)
    # # Petrosian Fractal Dimension (PFD) Ascalar
    # print("PFD = ", PFD)
    # # Higuchi Fractal Dimension (HFD) Ascalar
    # print("hfd = ", hfd)
    # # Hjorth mobility and complexity Two s c a la rs
    # print("hjorth = ", hjorth)
    # # Spectral Entropy (Shannon’s entropy of RIRs) Ascalar
    # print("spectral_entropy = ", spectral_entropy)
    # # SVD Entropy Ascalar
    # print("svd = ", svd)
    # # Fisher Information Ascalar
    # print("fisher_info = ", fisher_info)
    # # Approximate Entropy (ApEn) Ascalar
    # print("approx entrophy = ", approx)
    # # Detrended Fluctuation Analysis (DFA) Ascalar
    # print("DFA = ", DFA)
    # # HurstExponent(Hurst) Ascalar
    # print("Hurst_Exponent = ", hurst)
    # # Build a set of embedding sequences from given time series X with lag Tau and embedding dimension
    # print("embed_seq = ", embed_seq)
    # # Compute the first order difference of a time series.
    # print("first_order_diff = ", first_order_diff)

    return {
        'approximate': approx,
        'DFA': DFA,
        'fisher_info': fisher_info,
        'embed_seq': embed_seq,
        'hfd': hfd,
        'hjorth': hjorth,
        'hurst': hurst,
        'PFD': PFD,
        'sam_ent': sam_ent,
        'spectral_entropy': spectral_entropy,
        'svd': svd,
        'PSI': PSI,
        'first_order_diff': first_order_diff
    }