Exemple #1
0
def get_state_features(state):

    nof = len(state)
    po = 600

    pfds = np.zeros((4, int(nof / po)))
    ap_entropy = np.zeros((4, int(nof / po)))
    hursts = np.zeros((4, int(nof / po)))
    hfd = np.zeros((4, int(nof / po)))
    bins = np.zeros(((int(nof / po), 4, 2, 5)))

    lastnum = 0

    for i in range(0, (int(nof / po))):
        channels = np.zeros((4, po))

        for x in range(0, po):
            for y in range(0, 4):

                channels[y, x] = float(state[lastnum + x, y])

        for x in range(0, 4):
            channels[x] = scipy.signal.savgol_filter(channels[x],
                                                     11,
                                                     3,
                                                     deriv=0,
                                                     delta=1.0,
                                                     axis=-1,
                                                     mode='interp',
                                                     cval=0.0)

        #alpha=[]
        if ((nof - lastnum) != 0):
            for x in range(0, 4):
                hursts[x, i] = pyeeg.hurst(channels[x])
                pfds[x, i] = pyeeg.pfd(channels[x])
                #ap_entropy[x,i] = pyeeg.ap_entropy(X, M, R)
                hfd[x, i] = pyeeg.hfd(channels[x], 15)
                bins[i, x] = pyeeg.bin_power(channels[x],
                                             [0.5, 4, 7, 12, 15, 18], 200)
                k = 1
        lastnum = lastnum + po

    return pfds, hursts, bins, hfd
def getdata(num,):
	global lastnum
	global pfds
	global dfas
	global hursts
	global bins
	global nof
	global po
	#file = 'C:\\Users\\Ammar Raufi\\Desktop\\openbci\\software\\application.windows64\\SavedData\\OpenBCI-RAW-2017-03-18_18-46-49.txt' 

	#fid = open(file, 'r')
	
	#lines = fid.readlines()

	#numberOfFrames = len(lines)-6
	#print(numberOfFrames-lastnum)
	
	
	channels = np.zeros((4,po))
	
	#alpha = np.zeros(4)
	for x in range(0,po): #numberOfFrames-lastnum-6
		
		for y in range(0,4):
			
			channels[y,x] = float(lines[lastnum+x+6].split(',')[y+1])
	
	#alpha=[]
	if((nof-lastnum)!=0):
		for x in range(0,4):
			hursts[x,num] = pyeeg.hurst(channels[x])
			#pfds[x,num] = pyeeg.pfd(channels[x])
			#dfas[x,num] = pyeeg.dfa(channels[x])			
			bins[num,x] = pyeeg.bin_power(channels[x], [0.5,4,7,12,30], 200)	
			k=1

	print (lastnum)
		#print (alpha)
		
	lastnum=lastnum+po
	return channels[0]
Exemple #3
0
def get_features(signal):
    #print(signal)

    freq_cutoffs = [3, 8, 12, 27, 50]

    features = []

    features.append(rms(signal))

    s = lpf(signal, SAMPLING_RATE, freq_cutoffs[0])
    features.append(rms(s))

    for i in range(len(freq_cutoffs) - 1):
        s = bp(signal, SAMPLING_RATE, freq_cutoffs[i], freq_cutoffs[i + 1])
        features.append(rms(s))

    fourier = np.fft.rfft(signal * np.hamming(signal.size))
    features.extend(abs(fourier))

    wsize = 64
    X = mne.time_frequency.stft(signal, wsize, verbose=False)
    freqs = np.reshape(abs(X), X.size)
    features.extend(freqs)

    features.append(pyeeg.hurst(signal))
    features.append(pyeeg.hfd(signal, 10))
    e = pyeeg.spectral_entropy(signal, np.append(0.5, freq_cutoffs),
                               SAMPLING_RATE)
    features.append(e)

    features.extend(pyeeg.hjorth(signal))
    features.append(pyeeg.pfd(signal))
    features.append(pyeeg.mean(signal))

    features.append(scipy.stats.skew(signal))
    features.append(scipy.stats.kurtosis(signal))

    #features.extend(signal)

    return features
def get_features(signal):
    #print(signal)

    freq_cutoffs = [3, 8, 12, 27, 50]

    features = []

    features.append(rms(signal))

    s = lpf(signal, SAMPLING_RATE, freq_cutoffs[0])
    features.append(rms(s))

    for i in range(len(freq_cutoffs)-1):
        s = bp(signal, SAMPLING_RATE, freq_cutoffs[i], freq_cutoffs[i+1])
        features.append(rms(s))

    fourier = np.fft.rfft(signal * np.hamming(signal.size))
    features.extend(abs(fourier))

    wsize = 64
    X = mne.time_frequency.stft(signal, wsize, verbose=False)
    freqs = np.reshape(abs(X), X.size)
    features.extend(freqs)

    features.append(pyeeg.hurst(signal))
    features.append(pyeeg.hfd(signal, 10))
    e = pyeeg.spectral_entropy(signal, np.append(0.5, freq_cutoffs), SAMPLING_RATE)
    features.append(e)

    features.extend(pyeeg.hjorth(signal))
    features.append(pyeeg.pfd(signal))
    features.append(pyeeg.mean(signal))

    features.append(scipy.stats.skew(signal))
    features.append(scipy.stats.kurtosis(signal))

    #features.extend(signal)

    return features
Exemple #5
0
def eeg_features(data):
    data = np.asarray(data)
    res = np.zeros([22])
    Kmax = 5
    # M    = 10
    # R    = 0.3
    Band = [1, 5, 10, 15, 20, 25]
    Fs = 256
    power, power_ratio = pyeeg.bin_power(data, Band, Fs)
    f, P = welch(data, fs=Fs, window='hanning', noverlap=0,
                 nfft=int(256.))  # Signal power spectrum
    area_freq = cumtrapz(P, f, initial=0)
    res[0] = np.sqrt(np.sum(np.power(data, 2)) /
                     data.shape[0])  # amplitude RMS
    res[1] = statistics.stdev(data)**2  # variance
    res[2] = kurtosis(data)  # kurtosis
    res[3] = skew(data)  # skewness
    res[4] = max(data)  # max amplitude
    res[5] = min(data)  # min amplitude
    res[6] = len(argrelextrema(
        data, np.greater)[0])  # number of local extrema or peaks
    res[7] = ((data[:-1] * data[1:]) < 0).sum()  # number of zero crossings
    res[8] = pyeeg.hfd(data, Kmax)  # Higuchi Fractal Dimension
    res[9] = pyeeg.pfd(data)  # Petrosian Fractal Dimension
    res[10] = pyeeg.hurst(data)  # Hurst exponent
    res[11] = pyeeg.spectral_entropy(
        data, Band, Fs, Power_Ratio=power_ratio)  # spectral entropy (1.21s)
    res[12] = area_freq[-1]  # total power
    res[13] = f[np.where(area_freq >= res[12] / 2)[0][0]]  # median frequency
    res[14] = f[np.argmax(P)]  # peak frequency
    res[15], res[16] = pyeeg.hjorth(data)  # Hjorth mobility and complexity
    res[17] = power_ratio[0]
    res[18] = power_ratio[1]
    res[19] = power_ratio[2]
    res[20] = power_ratio[3]
    res[21] = power_ratio[4]
    # res[22] = pyeeg.samp_entropy(data, M, R)             # sample entropy
    # res[23] = pyeeg.ap_entropy(data, M, R)             # approximate entropy (1.14s)
    return (res)
Exemple #6
0
    def StatisticalFeatures(self, data):

        mean = np.mean(data)  # Mean of data
        std = np.std(data)  # std of data
        pfd = pyeeg.pfd(data)  # Petrosian Fractal Dimension
        hurst = pyeeg.hurst(data)  # Hurst Exponent Feature
        dfa = pyeeg.dfa(data)  # Detrended Fluctuation Analysis
        corr = nolds.corr_dim(data, 1)  # Correlation Dimension Feature
        power = np.sum(np.abs(data)**2) / len(data)  # Power feature
        FD = hfda(data, 5)  # fractal dimension

        statistics = {
            "mean": mean,
            "std": std,
            "pfd": pfd,
            "hurst": hurst,
            "hjorth": hjorth,
            "dfa": dfa,
            "corr": corr,
            "power": power
        }

        return (statistics)
Exemple #7
0
def get_state_features(channel):

    nof = len(channel)
    pfds = np.zeros((4))
    ap_entropy = np.zeros((4))
    hursts = np.zeros((4))
    hfd = np.zeros((4))
    bins = np.zeros(((4, 2, 5)))

    lastnum = 0

    #alpha=[]
    if ((nof - lastnum) != 0):
        for x in range(0, 4):
            hursts[x] = pyeeg.hurst(channel[x])
            pfds[x] = pyeeg.pfd(channel[x])
            #ap_entropy[x,i] = pyeeg.ap_entropy(X, M, R)
            hfd[x] = pyeeg.hfd(channel[x], 15)
            bins[x] = pyeeg.bin_power(channel[x], [0.5, 4, 7, 12, 15, 18], 200)

    delta = np.zeros((4))
    beta = np.zeros((4))
    alpha = np.zeros((4))
    theta = np.zeros((4))
    dfas = np.zeros((4))
    bt = np.zeros((4))

    for y in range(0, 4):
        delta[y] = bins[y, 0, 0]
        theta[y] = bins[y, 0, 1]
        alpha[y] = bins[y, 0, 2]
        beta[y] = bins[y, 0, 4]
        bt[y] = theta[y] / beta[y]

    lastnum = lastnum + nof

    return pfds, dfas, hursts, bins, bt, hfd
 def Hurst(self):
     resp = pyeeg.hurst(self.channel_data)
     return [np.array([resp]), ['hurst']]
    def feature_wave(self, toolName=None, Fs=256):
        if (toolName == None):
            print('please select a tool')
            return

        if toolName in self.FeatureSet.dict[0]:
            index = self.FeatureSet.dict[0][toolName]
        else:
            index = -1
        print(toolName)
        if toolName == 'DWT':
            answer_train = DWT(self.DataSet.trainSet_data[0], 'db4')
            answer_test = DWT(self.DataSet.testSet_data[0], 'db4')
            print('DWT feature extraction succeed db4')
        elif toolName == 'hurst':
            answer_train = [
                pyeeg.hurst(i) for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.hurst(i) for i in self.DataSet.testSet_data[0]
            ]
            print('hurst feature extraction succeed')
        elif toolName == 'dfa':
            answer_train = [
                pyeeg.dfa(i, L=[4, 8, 16, 32, 64])
                for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.dfa(i, L=[4, 8, 16, 32, 64])
                for i in self.DataSet.testSet_data[0]
            ]
            print('dfa feature extraction succeed')
        elif toolName == 'fisher_info':
            answer_train = [
                pyeeg.fisher_info(i, 2, 20)
                for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.fisher_info(i, 2, 20)
                for i in self.DataSet.testSet_data[0]
            ]
            print('fisher_info feature extraction succeed')
        elif toolName == 'svd_entropy':
            answer_train = [
                pyeeg.svd_entropy(i, 2, 20)
                for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.svd_entropy(i, 2, 20)
                for i in self.DataSet.testSet_data[0]
            ]
            print('svd_entropy feature extraction succeed')
        elif toolName == 'spectral_entropy':
            bandlist = [0.5, 4, 7, 12, 30, 100]
            answer_train = [
                pyeeg.spectral_entropy(i, bandlist, Fs)
                for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.spectral_entropy(i, bandlist, Fs)
                for i in self.DataSet.testSet_data[0]
            ]
            print('spectral_entropy feature extraction succeed')
        elif toolName == 'hjorth':
            # 得到两个量 第一个是 mobility 第二个是 complexity
            answer_train = [
                pyeeg.hjorth(i) for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.hjorth(i) for i in self.DataSet.testSet_data[0]
            ]
            answer_train = np.array(answer_train)
            answer_test = np.array(answer_test)

            for i in answer_train:
                i[1] = i[1] / 100
            for i in answer_test:
                i[1] = i[1] / 100

            #只取Mobility
            answer_train = np.array(answer_train[:, 0])
            answer_test = np.array(answer_test[:, 0])
            print('hjorth feature extraction succeed')
        elif toolName == 'hfd':
            answer_train = [
                pyeeg.hfd(i, 8) for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.hfd(i, 8) for i in self.DataSet.testSet_data[0]
            ]
            print('hfd feature extraction succeed')
        elif toolName == 'pfd':
            answer_train = [
                pyeeg.pfd(i) for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [pyeeg.pfd(i) for i in self.DataSet.testSet_data[0]]
            print('pfd feature extraction succeed')
        elif toolName == 'bin_power':
            bandlist = [0.5, 4, 7, 12]  #,30,100]
            answer_train = [
                pyeeg.bin_power(i, bandlist, Fs)
                for i in self.DataSet.trainSet_data[0]
            ]
            answer_test = [
                pyeeg.bin_power(i, bandlist, Fs)
                for i in self.DataSet.testSet_data[0]
            ]
            print('bin_power feature extraction succeed')

        else:
            print('does not have this kind of mode')

        answer_train = np.array(answer_train)
        answer_train = answer_train.reshape(len(answer_train), -1)
        answer_test = np.array(answer_test)
        answer_test = answer_test.reshape(len(answer_test), -1)
        if index == -1:
            #print(len(self.FeatureSet.feature.trainSet_feat[0]),len(answer_train))
            self.FeatureSet.feature.trainSet_feat[0] = np.column_stack(
                (self.FeatureSet.feature.trainSet_feat[0], answer_train))
            self.FeatureSet.feature.testSet_feat[0] = np.column_stack(
                (self.FeatureSet.feature.testSet_feat[0], answer_test))
            self.FeatureSet.dict[0][toolName] = [
                self.FeatureSet.size[0],
                self.FeatureSet.size[0] + len(answer_train[0])
            ]
            self.FeatureSet.size[0] += len(answer_train[0])
        else:
            self.FeatureSet.feature.trainSet_feat[0][:, index[0]:index[1]] = [
                i for i in answer_train
            ]
            self.FeatureSet.feature.testSet_feat[0][:, index[0]:index[1]] = [
                i for i in answer_test
            ]
Exemple #10
0
def Hurst(x):

    resp = pyeeg.hurst(x)

    return resp
Exemple #11
0
reqs_escala=[]
soma = 0
while (indice < len(reqs)):
    soma = 0
    for i in range(indice, indice + escala):
        try:
          soma += reqs[i]
        except:
          pass
    reqs_escala.append(soma)
    indice += escala




H=pyeeg.hurst(reqs_escala)
print escala, H



#calcula requisições acumuladas (A_t) entre t1 e t2
#para escala 1s t2 - t1 = 86400

A_t = []
A_t_acumulado = 0

for i in reqs_escala:
  A_t_acumulado += i
  A_t.append(A_t_acumulado)

E = 0.9999
Exemple #12
0
import pyeeg as pyeeg
from numpy.random import randn

f = open('array_dia2','r')

lines=f.readlines()
reqs=[]
for i in lines:
    reqs.append(i.split(" ")[1])


for i in range(len(reqs)):
           reqs[i]=float(reqs[i])
pyeeg.hurst(reqs)
	while len(vibration_list) != 90:
		vibration_list.append(0)
	#print vibration_list
	#vibration_array = np.zeros(fft_size)
	#for i in range(len(vibration_list)):
	#	index = (vibration_list[i]-1) * sampling_rate
	#	vibration_array[index:index+sampling_rate] = 1

	(power_xf, power_xf_filtered, freqs, xfp) = return_filtered_epoch(time_series)

	dominant_f = return_dominant_freq(freqs, power_xf_filtered)
	(delta_ratio, theta_ratio, alpha_ratio, sigma_ratio, beta_ratio) = return_power_ratio(freqs, power_xf_filtered)
	(A5_mean, D5_mean, D4_mean, D3_mean, A5_std, D5_std, D4_std, D3_std, A5_pm, D5_pm, D4_pm, D3_pm, \
			A5_ratio_mean, D5_ratio_mean, D4_ratio_mean, D3_ratio_mean) = return_DWT_feature(time_series)

	hurst_index = pyeeg.hurst(time_series)
	pfd_index = pyeeg.pfd(time_series)
	sp_entropy = pyeeg.spectral_entropy(time_series, [0.5, 3, 8, 12, 16, 30], sampling_rate, Power_Ratio = None)
	hj_activity, hj_mobility, hj_complexity = pyeeg.hjorth(time_series)

	fmax=getfmax(time_series)
	fmin=getfmin(time_series)
	fmean=getfmean(time_series)
	fstd=getfstd(time_series)
	fvar=getfvar(time_series)
	fskew=getfskew(time_series)
	fkur=getfkur(time_series)
	fmd=getfmd(time_series)
	zcnum=getzcnum(time_series)

	print [fmax, fmin, fmean, fstd, fvar, fskew, fkur, fmd, zcnum, dominant_f[0], delta_ratio, \
def myFeaturesExtractor(
        X, myM, myV):  # X has to be a matrix where each row is a channel
    N = len(X)  # number of channels
    L = len(X[0])
    maxtLyap = min(500, L // 2 + L // 4)
    lyapLags = np.arange(maxtLyap) / Fs

    # get number of features
    nFeatures = nMono * N + N * (N - 1) / 2
    # here we initialize the list of features // We will transform it to an array later
    featList = np.zeros((int(nFeatures)))
    # deal with monovariate features first
    for kChan in range(N):
        kFeat = 0
        mySig = X[kChan, :]
        #========== Stats ========================
        myMean = myM[kChan]
        featList[nMono * kChan + kFeat] = myMean
        kFeat += 1
        myMax = max(mySig)
        featList[nMono * kChan + kFeat] = myMax
        kFeat += 1
        myMin = min(mySig)
        featList[nMono * kChan + kFeat] = myMin
        kFeat += 1
        peak = max(abs(np.array([myMin, myMax])))
        featList[nMono * kChan + kFeat] = peak
        kFeat += 1
        myVar = myV[kChan]
        featList[nMono * kChan + kFeat] = myVar
        kFeat += 1
        featList[nMono * kChan + kFeat] = sp.skew(mySig)
        kFeat += 1
        featList[nMono * kChan + kFeat] = sp.kurtosis(mySig)
        kFeat += 1
        myRMS = rms(mySig)
        featList[nMono * kChan + kFeat] = myRMS
        kFeat += 1
        featList[nMono * kChan + kFeat] = peak / myRMS
        kFeat += 1

        featList[nMono * kChan + kFeat] = totVar(mySig)
        kFeat += 1
        featList[nMono * kChan + kFeat] = pyeeg.dfa(mySig)
        kFeat += 1
        featList[nMono * kChan + kFeat] = pyeeg.hurst(mySig)
        kFeat += 1
        hMob, hComp = pyeeg.hjorth(mySig)
        featList[nMono * kChan + kFeat] = hMob
        kFeat += 1
        featList[nMono * kChan + kFeat] = hComp
        kFeat += 1
        ## ======== fractal ========================
        # Now we need to get the embeding time lag Tau and embeding dmension
        ac = delay.acorr(mySig, maxtau=maxTauLag, norm=True, detrend=True)
        Tau = firstTrue(ac < corrThresh)  # embeding delay

        f1 , f2 , f3 = dimension.fnn(mySig, dim=dim, tau=Tau, R=10.0, A=2.0, metric='euclidean',\
                                     window=10,maxnum=None, parallel=True)
        myEmDim = firstTrue(f3 < fracThresh)
        # Here we construct the Embeding Matrix Em
        Em = pyeeg.embed_seq(mySig, Tau, myEmDim)
        U, s, Vh = linalg.svd(Em)
        W = s / np.sum(s)  # list of singular values in decreasing order
        FInfo = pyeeg.fisher_info(X, Tau, myEmDim, W=W)
        featList[nMono * kChan + kFeat] = FInfo
        kFeat += 1
        featList[nMono * kChan + kFeat] = Tau
        kFeat += 1
        featList[nMono * kChan + kFeat] = myEmDim
        kFeat += 1
        #========================================
        PFD = pyeeg.pfd(mySig, D=None)
        hfd6 = pyeeg.hfd(mySig, 6)
        hfd10 = pyeeg.hfd(mySig, 10)
        # Now we fit aline and get its slope to have Lyapunov exponent
        divAvg = lyapunov.mle(Em,
                              maxt=maxtLyap,
                              window=3 * Tau,
                              metric='euclidean',
                              maxnum=None)
        poly = np.polyfit(lyapLags,
                          divAvg,
                          1,
                          rcond=None,
                          full=False,
                          w=None,
                          cov=False)
        LyapExp = poly[0]

        featList[nMono * kChan + kFeat] = PFD
        kFeat += 1
        featList[nMono * kChan + kFeat] = hfd6
        kFeat += 1
        featList[nMono * kChan + kFeat] = hfd10
        kFeat += 1
        featList[nMono * kChan + kFeat] = LyapExp
        kFeat += 1

        ## ======== Entropy ========================
        tolerance = 1 / 4
        entropyDim = max([myEmDim, PFD])

        featList[nMono * kChan + kFeat] = pyeeg.samp_entropy(
            mySig, entropyDim, tolerance)
        kFeat += 1
        featList[nMono * kChan + kFeat] = pyeeg.svd_entropy(mySig,
                                                            Tau,
                                                            myEmDim,
                                                            W=W)
        kFeat += 1

        # here we compute bin power
        power, power_Ratio = pyeeg.bin_power(mySig, freqBins, Fs)
        featList[nMono * kChan + kFeat] = pyeeg.spectral_entropy(
            mySig, freqBins, Fs, Power_Ratio=power_Ratio)
        kFeat += 1
        ## ======== Spectral ========================
        for kBin in range(len(freqBins) - 1):
            featList[nMono * kChan + kFeat] = power[kBin]
            kFeat += 1
            featList[nMono * kChan + kFeat] = power_Ratio[kBin]
            kFeat += 1

    # deal with multivariate features first
    #============ connectivity ==================
    corrList = connectome(X)
    nConnect = len(corrList)
    if N * (N - 1) / 2 != nConnect:
        raise ValueError('incorrect number of correlation coeffs')

    for kC in range(nConnect):
        featList[-nConnect + kC] = corrList[kC]

    return featList
	def Hurst(self):
		resp = pyeeg.hurst(self.channel_data)
		return [np.array([resp]),['hurst']]
Exemple #16
0
def myFeaturesExtractor(X): # X has to be a matrix where each row is a channel
    N = len(X)
    # L = len(X[0])
    # here we initialize the list of features // We will transform it to an array later
    featList = list()
    timeList =list ()
    featName =list()
    for kChan in range(1):
        mySig = X[kChan , :]
        if kChan == 0:
            start=time.perf_counter_ns()
            
        #========== Stats ========================
        myMean = np.mean(mySig)
        featList.append(myMean)
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append("mean")
            start=end
        featList.append(max(mySig))
        
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" max")
            start=end
        featList.append(min(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" min")
            start=end            
        peak =max(abs(mySig))
        featList.append(peak)
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" peak")
            start=end            
        myVar = np.var(mySig)
        featList.append(myVar)
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" var")
            start=end
        myVar = np.var(mySig)    
        myStd = np.sqrt(myVar)
        featList.append(myStd)
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" std")
            start=end             
        featList.append(sp.skew(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            timeList.append(end -start)
            featName.append(" skew")
            start=end

        featList.append(sp.kurtosis(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" kurt")
            start=end
        myRMS = rms(mySig)
        featList.append(myRMS)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" rms")
            start=end
        myRMS = rms(mySig)    
        featList.append(peak/myRMS)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" fact")
            start=end
        myRMS = rms(mySig)    
        featList.append(myRMS/myMean)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" papr")
            start=end
        featList.append(totVar(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" totVar")
            start=end
            
        featList.append(pyeeg.dfa(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" dfa")
            start=end
        featList.append(pyeeg.hurst(mySig))
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" hurst")
            start=end
        hMob , hComp = pyeeg.hjorth(mySig )
        featList.append(hMob)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" Hmob")
            timeList.append(end -start)
            featName.append(" Hcomp")
            start=end
        
        featList.append(hComp)
            
            
        
#        ## ======== fractal ========================
#        # Now we need to get the embeding time lag Tau and embeding dmension
#        ac=delay.acorr(mySig, maxtau=maxTauLag, norm=True, detrend=True)
#        Tau = firstTrue(ac < corrThresh) # embeding delay
#        featList.append(Tau)
#        if kChan == 0:
#            end=time.perf_counter_ns()
#            
#            timeList.append(end -start)
#            featName.append(" dCorrTime")
#            start=end
#        f1 , f2 , f3 = dimension.fnn(mySig, dim=dim, tau=Tau, R=10.0, A=2.0, metric='chebyshev', window=10,maxnum=None, parallel=True)
#        myEmDim = firstTrue(f3 < fracThresh)
##        if kChan == 0:
##            end=time.perf_counter_ns()
##            timeList.append(end -start)
##            featName.append(" embDim")
##            start=end
#        # Here we construct the Embeding Matrix Em
#        Em = pyeeg.embed_seq(mySig, Tau, myEmDim)
#        U, s, Vh = linalg.svd(Em)
#        W = s/np.sum(s)  # list of singular values in decreasing order 
#        
#        FInfo = pyeeg.fisher_info(X, Tau, myEmDim , W=W)
#        featList.append(FInfo)
#        if kChan == 0:
#            end=time.perf_counter_ns()
#            
#            timeList.append(end -start)
#            featName.append(" FInfo")
#            start=end
#
#        featList.append(myEmDim)
        
        
        PFD = pyeeg.pfd(mySig, D=None)
        featList.append(PFD)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" pfd")
            start=end
            
        hfd6 = pyeeg.hfd(mySig , 6)
        featList.append(hfd6)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" hfd6")
            start=end
        hfd10 = pyeeg.hfd(mySig , 10)
        featList.append(hfd10)
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" hfd10")
            start=end
        # Now we fit aline and get its slope to have Lyapunov exponent
#        divAvg = lyapunov.mle(Em, maxt=maxtLyap, window= 3 * Tau, metric='euclidean', maxnum=None)
#        poly = np.polyfit(lyapLags, divAvg, 1, rcond=None, full=False, w=None, cov=False)
#        LyapExp = poly[0]
#        featList.append(np.mean(LyapExp)) 
#        if kChan == 0:
#            end=time.perf_counter_ns()
#            
#            timeList.append(end -start)
#            featName.append("Lyapunov")
#            start=end
               
        ## ======== Entropy ========================
        
        # here we compute bin power 
        power, power_Ratio = pyeeg.bin_power(mySig , freqBins , Fs )
        
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append("Spectral")
            start=end
        featList.append( pyeeg.spectral_entropy(mySig, freqBins, Fs, Power_Ratio=power_Ratio))
        if kChan == 0:
            end=time.perf_counter_ns()
            
            timeList.append(end -start)
            featName.append(" specEn")
            start=end
            
#        tolerance = myStd / 4
#        entropyDim = max([myEmDim , PFD])
#        featList.append( pyeeg.samp_entropy(mySig , entropyDim , tolerance ) )
#        if kChan == 0:
#            end=time.perf_counter_ns()
#            
#            timeList.append(end -start)
#            featName.append(" sampEn")
#            start=end
#        featList.append( pyeeg.svd_entropy(mySig, Tau, myEmDim , W=W) )
#        if kChan == 0:
#            end=time.perf_counter_ns()
#            
#            timeList.append(end -start)
#            featName.append(" svdEn")
#            start=end
            
        ## ======== Spectral ========================
        appendArray2List(featList , power )
        appendArray2List(featList , power_Ratio )
    
    start=time.perf_counter_ns()
    connectome(X , featList)
    end=time.perf_counter_ns()
    timeList.append((end -start)/N/(N-1)*2)
    featName.append("connectivity")
            
    ll=list()
    ll.append(featName)
    ll.append(timeList)    
    return np.asarray(featList) , ll
def calculate_features(samples):
    data = samples
    if not samples:
        print("no samples")
        return []

    band = [0.5, 4, 7, 12, 30]
    a = randn(4097)
    # approx = pyeeg.ap_entropy(data, 5, 1)
    approx = 0
    DFA = pyeeg.dfa(data)
    first_order_diff = [data[i] - data[i - 1] for i in range(1, len(data))]
    fisher_info = pyeeg.fisher_info(data, 1, 1, W=None)
    embed_seq = pyeeg.embed_seq(data, 1, 1)
    hfd = pyeeg.hfd(data, 6)
    hjorth = pyeeg.hjorth(data, D=None)
    hurst = pyeeg.hurst(data)
    PFD = pyeeg.pfd(data)
    sam_ent = pyeeg.samp_entropy(data, 1, 2)
    spectral_entropy = pyeeg.spectral_entropy(data,
                                              band,
                                              256,
                                              Power_Ratio=None)
    svd = pyeeg.svd_entropy(data, 6, 4, W=None)
    PSI = pyeeg.bin_power(data, band, 256)

    # # Power Spectral Intensity (PSI) and Relative Intensity Ratio (RIR) Two 1- D v ec t o rs
    # # print("bin_power = ", PSI)
    # # Petrosian Fractal Dimension (PFD) Ascalar
    # print("PFD = ", PFD)
    # # Higuchi Fractal Dimension (HFD) Ascalar
    # print("hfd = ", hfd)
    # # Hjorth mobility and complexity Two s c a la rs
    # print("hjorth = ", hjorth)
    # # Spectral Entropy (Shannon’s entropy of RIRs) Ascalar
    # print("spectral_entropy = ", spectral_entropy)
    # # SVD Entropy Ascalar
    # print("svd = ", svd)
    # # Fisher Information Ascalar
    # print("fisher_info = ", fisher_info)
    # # Approximate Entropy (ApEn) Ascalar
    # print("approx entrophy = ", approx)
    # # Detrended Fluctuation Analysis (DFA) Ascalar
    # print("DFA = ", DFA)
    # # HurstExponent(Hurst) Ascalar
    # print("Hurst_Exponent = ", hurst)
    # # Build a set of embedding sequences from given time series X with lag Tau and embedding dimension
    # print("embed_seq = ", embed_seq)
    # # Compute the first order difference of a time series.
    # print("first_order_diff = ", first_order_diff)

    return {
        'approximate': approx,
        'DFA': DFA,
        'fisher_info': fisher_info,
        'embed_seq': embed_seq,
        'hfd': hfd,
        'hjorth': hjorth,
        'hurst': hurst,
        'PFD': PFD,
        'sam_ent': sam_ent,
        'spectral_entropy': spectral_entropy,
        'svd': svd,
        'PSI': PSI,
        'first_order_diff': first_order_diff
    }
Exemple #18
0
def get_state_features(state):
	
	nof = len(state)
	po = 600
	
	pfds = np.zeros((4,int(nof/po)))
	
	ap_entropy = np.zeros((4,int(nof/po)))
	hursts = np.zeros((4,int(nof/po)))
	hfd = np.zeros((4,int(nof/po)))
	bins = np.zeros(((int(nof/po),4,2,5)))
	
	lastnum=0

	for i in range (0,(int(nof/po))):
		channels = np.zeros((4,po))		
		channels2 = np.zeros((4,po))
		channels3 = np.zeros((4,po))
		channels4 = np.zeros((4,po))
		channels5 = np.zeros((4,po))
		
		for x in range(0,po):			
			for y in range(0,4):				
				channels[y,x] = float(state[lastnum+x,y])
				
		for y in range(0,4):				
			channels[y] = scipy.signal.savgol_filter(channels[y], 11, 3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)
		
		
		#for y in range(0,4):
		
			#nyq = 0.5 * 200
			#low = 1 / nyq
			#high = 50 / nyq
			#high2 = 70 / nyq
			#high3 = 90 / nyq
			#high4 = 95 / nyq
			#b, a = butter(5, [low, high], btype='band')
			#b2, a2 = butter(5, [low, high2], btype='band')
			#b3, a3 = butter(5, [low, high3], btype='band')
			#b4, a4 = butter(5, [low, high4], btype='band')
			
			#channels2[y] = lfilter(b, a, channels[y])
			#channels3[y] = lfilter(b2, a2, channels[y])
			#channels4[y] = lfilter(b3, a3, channels[y])
			#channels5[y] = lfilter(b4, a4, channels[y])
		
		
		
		
		
		#x = np.linspace(0,len(channels[1]),len(channels[1]))
		#f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
		#f.suptitle("Time Series")
		#ax1.set_ylabel('Amplitude (uV)')
		
		
		
		#ax1.plot(x, channels2[0],color='red')
		#ax1.plot(x, channels3[0],color='blue')
		#ax1.plot(x, channels4[0],color='blue')
		
		#ax1.plot(x, channels[0])
		#ax1.plot(x, channels5[0],color='yellow')
		#ax1.plot(x, y4)
		#ax1.plot(x, y5,color='red')
		#ax1.plot(x, y4,color='green')
		
		
		
		#ax1.set_title('Fp1')
		
		#ax2.plot(x, channels2[1],color='red')
		#ax2.plot(x, channels3[1],color='blue')
		
		#ax2.plot(x, channels4[1],color='blue')
		#ax2.plot(x, channels[1])
		#ax2.plot(x, y5)
		#ax2.set_title('Fp2')
		
		#ax3.plot(x, channels2[2],color='red')
		#ax3.plot(x, channels3[2],color='blue')
		#ax3.plot(x, channels4[2],color='blue')
		#ax3.plot(x,channels[2])
		#ax3.plot(x,y6)
		#ax3.set_title('O1')
		#ax3.set_xlabel('sample')
		#ax3.set_ylabel('Amplitude (uV)')
		
		#ax4.plot(x, channels2[3],color='red')
		#ax4.plot(x, channels3[3],color='blue')
		#ax4.plot(x, channels4[3],color='blue')
		#ax4.plot(x,channels[3])
		#ax4.plot(x,y6)
		#ax4.set_title('O2')
		#ax4.set_xlabel('sample')
		#plt.show()
		
		if((nof-lastnum)!=0):
			for x in range(0,4):
				hursts[x,i] = pyeeg.hurst(channels[x])
				pfds[x,i] = pyeeg.pfd(channels[x])	
				#ap_entropy[x,i] = pyeeg.ap_entropy(X, M, R)
				hfd[x,i] = pyeeg.hfd(channels[x],15)
				bins[i,x] = pyeeg.bin_power(channels[x], [0.5,4,7,12,15,18], 200)				
				k=1
		lastnum=lastnum+po
	
	return pfds,dfas,hursts,bins,hfd	
Exemple #19
0
def feature_extraction(data):
    features = []
    for values in data.T:
        features.append(hurst(values))
    return features
Exemple #20
0
import pyeeg as pyeeg
import numpy as np

entrada=open('array_dia2','r')
lines=entrada.readlines()
reqs=[]
times=[]

tamanho=86400
dreqs={}
for i in lines:
    dreqs[float(i.split(" ")[0])]=float(i.split(" ")[1])
for i in range(tamanho):
    if i not in dreqs.keys():
        dreqs[float(i)]=0.0
reqs=dreqs.values()
H=pyeeg.hurst(reqs)
print H
Exemple #21
0
 def find_hurst_exponent(self):
     self.hurst_exponent = pyeeg.hurst(self.filtered_signal)
Exemple #22
0
  A_t.append(A_t_acumulado)

E = 0.1
var = np.var(reqs_escala)
media=np.mean(reqs_escala)
kapa =np.sqrt((-2)*np.log(E))

def A_ep_t(t, var, media, kapa):
  A_ep_t =  media*t + kapa*np.sqrt(var)*t**H
  return A_ep_t

inicio = 0
for i in range(div_dia):
  fim = (i+1)*(86400/4) - 1
  array_tmp = reqs[inicio:fim]
  Hs.append(pyeeg.hurst(array_tmp)) 
  medias.append(np.mean(array_tmp))
  variancias.append(np.var(array_tmp))
  inicio = fim + 1

A_ep_t_plot=[]
inicio = 0
for i in range(div_dia):
  fim = (i+1)*(86400/4) - 1
  for j in range(inicio,fim):
    A_ep_t_plot.append(A_ep_t(j, variancias[i], medias[i], kapa))
  taxa.append((A_ep_t_plot[len(A_ep_t_plot) - 1] - A_ep_t_plot[inicio])/(fim - inicio))
  inicio = fim + 1

#A_ep_t_plot=[]
#for i in range(86400):