def get_nonlinear(self, cols=['ENMO', 'mean_hr', 'hrv_ms']): nonlin = defaultdict(dict) for col in cols: column = defaultdict(dict) column_sleep = defaultdict(dict) column_wake = defaultdict(dict) for idx in range(len(self.sleep_windows)): params = defaultdict(dict) params['DFA'] = nolds.dfa(self.sleep_windows[idx][col], debug_data=False) params['SampEn'] = nolds.sampen(self.sleep_windows[idx][col], debug_data=False) column_sleep[idx] = params for idx in range(len(self.wake_windows)): params = defaultdict(dict) params['DFA'] = nolds.dfa(self.wake_windows[idx][col], debug_data=False) params['SampEn'] = nolds.sampen(self.wake_windows[idx][col], debug_data=False) column_wake[idx] = params column['sleep'] = column_sleep column['wake'] = column_wake nonlin[col] = column self.nonlinear = nonlin return self
def test_complexity(): signal = np.cos(np.linspace(start=0, stop=30, num=100)) # Shannon assert np.allclose(nk.entropy_shannon(signal) - pyentrp.shannon_entropy(signal), 0) # Approximate assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146) assert np.allclose(nk.entropy_approximate(signal, 2, 0.2*np.std(signal, ddof=1)) - entropy_app_entropy(signal, 2), 0) assert nk.entropy_approximate(signal, 2, 0.2*np.std(signal, ddof=1)) != pyeeg_ap_entropy(signal, 2, 0.2*np.std(signal, ddof=1)) # Sample assert np.allclose(nk.entropy_sample(signal, 2, 0.2*np.std(signal, ddof=1)) - entropy_sample_entropy(signal, 2), 0) assert np.allclose(nk.entropy_sample(signal, 2, 0.2) - nolds.sampen(signal, 2, 0.2), 0) assert np.allclose(nk.entropy_sample(signal, 2, 0.2) - entro_py_sampen(signal, 2, 0.2, scale=False), 0) assert np.allclose(nk.entropy_sample(signal, 2, 0.2) - pyeeg_samp_entropy(signal, 2, 0.2), 0) assert nk.entropy_sample(signal, 2, 0.2) != pyentrp.sample_entropy(signal, 2, 0.2)[1] assert nk.entropy_sample(signal, 2, 0.2*np.sqrt(np.var(signal))) != MultiscaleEntropy_sample_entropy(signal, 2, 0.2)[0.2][2] # MSE # assert nk.entropy_multiscale(signal, 2, 0.2*np.sqrt(np.var(signal))) != np.trapz(MultiscaleEntropy_mse(signal, [i+1 for i in range(10)], 2, 0.2, return_type="list")) # assert nk.entropy_multiscale(signal, 2, 0.2*np.std(signal, ddof=1)) != np.trapz(pyentrp.multiscale_entropy(signal, 2, 0.2, 10)) # Fuzzy assert np.allclose(nk.entropy_fuzzy(signal, 2, 0.2, 1) - entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0)
def get_nonlin_params(df, col): params = defaultdict(dict) #params['hurst'] = nolds.hurst_rs(df[col],debug_data=False) params['DFA'] = nolds.dfa(df[col], debug_data=False) params['sampen'] = nolds.sampen(df[col], debug_data=False) #params['lyap1'] = nolds.lyap_r(df[col],debug_data=False) return params
def test_complexity(): signal = np.cos(np.linspace(start=0, stop=30, num=100)) # Shannon assert np.allclose(nk.entropy_shannon(signal), 6.6438561897747395, atol=0.0000001) assert nk.entropy_shannon(signal) == pyentrp.shannon_entropy(signal) # Approximate assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146, atol=0.000001) assert np.allclose(nk.entropy_approximate(np.array([85, 80, 89] * 17)), 1.0996541105257052e-05, atol=0.000001) # assert nk.entropy_approximate(signal, 2, 0.2) == pyeeg.ap_entropy(signal, 2, 0.2) # Sample assert np.allclose(nk.entropy_sample(signal, order=2, r=0.2 * np.std(signal)), nolds.sampen(signal, emb_dim=2, tolerance=0.2 * np.std(signal)), atol=0.000001) # assert nk.entropy_sample(signal, 2, 0.2) == pyeeg.samp_entropy(signal, 2, 0.2) # pyentrp.sample_entropy(signal, 2, 0.2) # Gives something different # Fuzzy assert np.allclose(nk.entropy_fuzzy(signal), 0.5216395432372958, atol=0.000001)
def test_complexity_vs_Python(): signal = np.cos(np.linspace(start=0, stop=30, num=100)) # Shannon shannon = nk.entropy_shannon(signal) # assert scipy.stats.entropy(shannon, pd.Series(signal).value_counts()) assert np.allclose(shannon - pyentrp.shannon_entropy(signal), 0) # Approximate assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146) assert np.allclose( nk.entropy_approximate( signal, dimension=2, r=0.2 * np.std(signal, ddof=1)) - entropy_app_entropy(signal, 2), 0) assert nk.entropy_approximate( signal, dimension=2, r=0.2 * np.std(signal, ddof=1)) != pyeeg_ap_entropy( signal, 2, 0.2 * np.std(signal, ddof=1)) # Sample assert np.allclose( nk.entropy_sample(signal, dimension=2, r=0.2 * np.std(signal, ddof=1)) - entropy_sample_entropy(signal, 2), 0) assert np.allclose( nk.entropy_sample(signal, dimension=2, r=0.2) - nolds.sampen(signal, 2, 0.2), 0) assert np.allclose( nk.entropy_sample(signal, dimension=2, r=0.2) - entro_py_sampen(signal, 2, 0.2, scale=False), 0) assert np.allclose( nk.entropy_sample(signal, dimension=2, r=0.2) - pyeeg_samp_entropy(signal, 2, 0.2), 0) # import sampen # sampen.sampen2(signal[0:300], mm=2, r=r) assert nk.entropy_sample(signal, dimension=2, r=0.2) != pyentrp.sample_entropy( signal, 2, 0.2)[1] assert nk.entropy_sample( signal, dimension=2, r=0.2 * np.sqrt(np.var(signal))) != MultiscaleEntropy_sample_entropy( signal, 2, 0.2)[0.2][2] # MSE # assert nk.entropy_multiscale(signal, 2, 0.2*np.sqrt(np.var(signal))) != np.trapz(MultiscaleEntropy_mse(signal, [i+1 for i in range(10)], 2, 0.2, return_type="list")) # assert nk.entropy_multiscale(signal, 2, 0.2*np.std(signal, ddof=1)) != np.trapz(pyentrp.multiscale_entropy(signal, 2, 0.2, 10)) # Fuzzy assert np.allclose( nk.entropy_fuzzy(signal, dimension=2, r=0.2, delay=1) - entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0) # DFA assert nk.fractal_dfa(signal, windows=np.array([ 4, 8, 12, 20 ])) != nolds.dfa(signal, nvals=[4, 8, 12, 20], fit_exp="poly")
def calc(images, adversarial_images, measure='sampen'): """Calculate and returns the nonlinear measure of both original and adversarial images. Set measure to what you want to calculate. 'sampen' : Sample entropy 'frac' : Correlation/Fractal dimension 'hurst' : Hurst exponent 'lyapr' : Largest Lyapunov exponent using Rosenstein et al. methods Docs : https://cschoel.github.io/nolds/ If the adversarial image is found to be NaN, we output 0. The reason some adversarial iamges are NaN is because adversarial generation were unsuccessful for them. There is a maximum iteration one can set for adversarial generation, the program outputs NaN when the max iteration is reached before an adversarial perturbation is found. For more info look at "adversarial_gen.ipynb" """ imageCalc_data = [] advimageCalc_data = [] for i in tqdm(range(len(images))): image = images[i] image = image.flatten() advimage = adversarial_images[i] advimage = advimage.flatten() if measure == 'sampen': imageCalc_data.append(nolds.sampen(image)) if np.isnan(np.sum(advimage)): advimageCalc_data.append(0) else: advimageCalc_data.append(nolds.samepn(advimage)) elif measure == 'frac': imageCalc_data.append(nolds.corr_dim(image, 1)) if np.isnan(np.sum(advimage)): advimageCalc_data.append(0) else: advimageCalc_data.append(nolds.corr_dim(advimage, 1)) elif measure == 'hurst': imageCalc_data.append(nolds.hurst_rs(image)) if np.isnan(np.sum(advimage)): advimageCalc_data.append(0) else: advimageCalc_data.append(nolds.hurst_rs(advimage)) elif measure == 'lyapr': imageCalc_data.append(nolds.lyap_r(image)) if np.isnan(np.sum(advimage)): advimageCalc_data.append(0) else: advimageCalc_data.append(nolds.lyap_r(advimage)) return imageCalc_data, advimageCalc_data
def sampen(windowed_buffers, **kwargs): """Find the sample entropy of the buffers""" T, C, _ = windowed_buffers.size() sampen_feature = torch.zeros((T, C, 1), dtype=torch.float32) for tt in range(T): for cc in range(C): sampen_feature[tt, cc, 0] = nolds.sampen(windowed_buffers[tt, cc, :]) return sampen_feature
def feature_extraction_EMG(clip_data): #extract features for EMG features_list = [ 'RMS', 'range', 'mean', 'var', 'skew', 'kurt', 'Pdom_rel', 'Dom_freq', 'Sen', 'PSD_mean', 'PSD_std', 'PSD_skew', 'PSD_kurt' ] trial = list(clip_data.keys())[0] features = [] for c in range(len(clip_data[trial]['elec']['data'])): rawdata = clip_data[trial]['elec']['data'][c] rawdata_wmag = rawdata.copy() N = len(rawdata) RMS = 1 / N * np.sqrt(np.sum(rawdata**2)) r = np.max(rawdata) - np.min(rawdata) mean = np.mean(rawdata) var = np.std(rawdata) sk = skew(rawdata) kurt = kurtosis(rawdata) Pxx = power_spectra_welch(rawdata_wmag, fm=20, fM=70) domfreq = Pxx.iloc[:, -1].idxmax() Pdom_rel = Pxx.loc[domfreq] / Pxx.iloc[:, -1].sum() Pxx_moments = np.array([ np.nanmean(Pxx.values), np.nanstd(Pxx.values), skew(Pxx.values)[0], kurtosis(Pxx.values)[0] ]) x = rawdata.iloc[:, 0] x = x[::5] n = len(x) Fs = np.mean(1 / (np.diff(x.index) / 1000)) sH_raw = nolds.sampen(x) X = np.concatenate( (RMS, r, mean, var, sk, kurt, Pdom_rel, np.array([domfreq, sH_raw]))) Y = np.concatenate((X, Pxx_moments)) features.append(Y) F = np.asarray(features) clip_data[trial]['elec']['features'] = pd.DataFrame(data=F, columns=features_list, dtype='float32')
def sample(x: np.ndarray): """ Sample Entropy :param x: a 1-d numeric vector :return: scalar feature """ out = nolds.sampen(x) if np.isinf(out): out = np.nan return out
def plot_entropy( df1, df2 ): # calculating and plotting the sample entropy for embeding dimensions in range (1-10) cd1 = [] cd2 = [] n = [] for i in range(1, 11): cd1.append(nolds.sampen(df1, emb_dim=i)) cd2.append(nolds.sampen(df2, emb_dim=i)) n.append(i) print(i) plt.grid() plt.plot(n, cd1, color='red', label='Model 1') plt.scatter(n, cd1, color='red') plt.plot(n, cd2, color='green', label='Model 2') plt.scatter(n, cd2, color='green') plt.xlabel('Embedding dimension') plt.ylabel('Sample entropy') plt.legend() plt.show() print('Model 1 max: ', max(cd1)) print('Model 2 max: ', max(cd2))
def load_feature(s): rw = [lwalk(i) for i in s] sd = [np.std(i) for i in rw] dfa = [nolds.dfa(i) for i in rw] hurst = [nolds.hurst_rs(i) for i in rw] sampen = [nolds.sampen(i) for i in rw] ac = [autocorrelation(i, 100) for i in rw] rvntsl = [ratio_value_number_to_time_series_length(i) for i in rw] ac_200 = [autocorrelation(i, 200) for i in rw] ac_300 = [autocorrelation(i, 300) for i in rw] lyapr = [nolds.lyap_r(i) for i in rw] inpv = pd.DataFrame( [sd, dfa, hurst, sampen, ac, rvntsl, ac_200, ac_300, lyapr]) return inpv.transpose()
def sample_entropy(nni=None, rpeaks=None, dim=2, tolerance=None): """Computes the sample entropy (sampen) of the NNI series. Parameters ---------- nni : array NN intervals in [ms] or [s]. rpeaks : array R-peak times in [ms] or [s]. dim : int, optional Entropy embedding dimension (default: 2). tolerance : int, float, optional Tolerance distance for which the vectors to be considered equal (default: std(NNI) * 0.2). Returns (biosppy.utils.ReturnTuple Object) ------------------------------------------ [key : format] Description. sample_entropy : float Sample entropy of the NNI series. Raises ------ TypeError If 'tolerance' is no numeric value. """ # Check input values nn = pu.check_input(nni, rpeaks) if tolerance is None: tolerance = np.std(nn, ddof=-1) * 0.2 else: try: tolerance = float(tolerance) except: raise TypeError( 'Tolerance level cannot be converted to float.' 'Please verify that tolerance is a numeric (int or float).') # Compute Sample Entropy sampen = float(nolds.sampen(nn, dim, tolerance)) # Output args = (sampen, ) names = ('sampen', ) return biosppy.utils.ReturnTuple(args, names)
def extractNonLinear(self, x): ''' :param x: raw respiration data :return: zeros-crossing features (mean, min, max) and respiration rate (mean, min, max, vector) and nonlinear ''' zeros = self.extracRate(x, self.fs)[0] zeros_diff = np.insert(np.diff(zeros), 0, zeros[0]).astype(np.float) # interpolate zeros_diff f = interpolate.interp1d(np.arange(0, len(zeros_diff)), zeros_diff) xnew = np.arange(0, len(zeros_diff) - 1, 0.5) zeros_diff_new = f(xnew) # nonlinear sample_ent = nolds.sampen(zeros_diff_new, emb_dim=1) lypanov_exp = nolds.lyap_e(zeros_diff_new, emb_dim=2, matrix_dim=2)[0] return np.array([sample_ent, lypanov_exp])
def extractNonLinear(self, x): ''' :param x: raw PPG :return: zeros-crossing features (mean, min, max) and respiration rate (mean, min, max, vector) and nonlinear ''' onsets, = biosppy.signals.bvp.find_onsets(x, sampling_rate=self.fs) onsets_diff = np.insert(np.diff(onsets), 0, onsets[0]).astype(np.float) # interpolate zeros_diff f = interpolate.interp1d(np.arange(0, len(onsets_diff)), onsets_diff) xnew = np.arange(0, len(onsets_diff) - 1, 0.5) onsets_diff_new = f(xnew) # nonlinear sample_ent = nolds.sampen(onsets_diff_new, emb_dim=1) lypanov_exp = nolds.lyap_e(onsets_diff_new, emb_dim=2, matrix_dim=2)[0] return np.array([sample_ent, lypanov_exp])
def test_complexity(): signal = np.cos(np.linspace(start=0, stop=30, num=100)) # Shannon assert np.allclose( nk.entropy_shannon(signal) - pyentrp.shannon_entropy(signal), 0) # Approximate assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146) assert np.allclose( nk.entropy_approximate(signal, 2, 0.2 * np.std(signal, ddof=1)) - entropy_app_entropy(signal, 2), 0) assert nk.entropy_approximate( signal, 2, 0.2 * np.std(signal, ddof=1)) != pyeeg_ap_entropy( signal, 2, 0.2 * np.std(signal, ddof=1)) # Sample assert np.allclose( nk.entropy_sample(signal, 2, 0.2 * np.std(signal, ddof=1)) - entropy_sample_entropy(signal, 2), 0) assert np.allclose( nk.entropy_sample(signal, 2, 0.2) - nolds.sampen(signal, 2, 0.2), 0) assert np.allclose( nk.entropy_sample(signal, 2, 0.2) - entro_py_sampen(signal, 2, 0.2, scale=False), 0) assert np.allclose( nk.entropy_sample(signal, 2, 0.2) - pyeeg_samp_entropy(signal, 2, 0.2), 0) assert nk.entropy_sample(signal, 2, 0.2) != pyentrp.sample_entropy( signal, 2, 0.2)[1] # Fuzzy assert np.allclose( nk.entropy_fuzzy(signal, 2, 0.2, 1) - entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0)
def get_sampen(nn_intervals: List[float]) -> dict: """ Function computing the sample entropy of the given data. Must use this function on short term recordings, from 1 minute window. Parameters --------- nn_intervals : list Normal to Normal Interval Returns --------- sampen : float The sample entropy of the data References ---------- .. [5] Physiological time-series analysis using approximate entropy and sample entropy, \ JOSHUA S. RICHMAN1, J. RANDALL MOORMAN - 2000 """ sampen = nolds.sampen(nn_intervals, emb_dim=2) return {'sampen': sampen}
def complexity(signal, shannon=True, sampen=True, multiscale=True, fractal_dim=True, hurst=True, dfa=True, lyap_r=False, lyap_e=False, emb_dim=2, tolerance="default"): """ Returns several chaos/complexity indices of a signal (including entropy, fractal dimensions, Hurst and Lyapunov exponent etc.). Parameters ---------- signal : list or array List or array of values. shannon : bool Computes Shannon entropy. sampen : bool Computes approximate sample entropy (sampen) using Chebychev and Euclidean distances. multiscale : bool Computes multiscale entropy (MSE). Note that it uses the 'euclidean' distance. fractal_dim : bool Computes the fractal (correlation) dimension. hurst : bool Computes the Hurst exponent. dfa : bool Computes DFA. lyap_r : bool Computes Positive Lyapunov exponents (Rosenstein et al. (1993) method). lyap_e : bool Computes Positive Lyapunov exponents (Eckmann et al. (1986) method). emb_dim : int The embedding dimension (*m*, the length of vectors to compare). Used in sampen and fractal_dim. tolerance : float Distance *r* threshold for two template vectors to be considered equal. Default is 0.2*std(signal). Used in sampen and fractal_dim. Returns ---------- complexity : dict Dict containing values for each indices. Example ---------- >>> import neurokit as nk >>> import numpy as np >>> >>> signal = np.sin(np.log(np.random.sample(666))) >>> complexity = nk.complexity(signal) Notes ---------- *Details* - **shannon entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content. - **sample entropy (sampen)**: Measures the complexity of a time-series, based on approximate entropy. The sample entropy of a time series is defined as the negative natural logarithm of the conditional probability that two sequences similar for emb_dim points remain similar at the next point, excluding self-matches. A lower value for the sample entropy therefore corresponds to a higher probability indicating more self-similarity. - **multiscale entropy**: Multiscale entropy (MSE) analysis is a new method of measuring the complexity of finite length time series. - **fractal dimension**: A measure of the fractal (or correlation) dimension of a time series which is also related to complexity. The correlation dimension is a characteristic measure that can be used to describe the geometry of chaotic attractors. It is defined using the correlation sum C(r) which is the fraction of pairs of points X_i in the phase space whose distance is smaller than r. - **hurst**: The Hurst exponent is a measure of the "long-term memory" of a time series. It can be used to determine whether the time series is more, less, or equally likely to increase if it has increased in previous steps. This property makes the Hurst exponent especially interesting for the analysis of stock data. - **dfa**: DFA measures the Hurst parameter H, which is very similar to the Hurst exponent. The main difference is that DFA can be used for non-stationary processes (whose mean and/or variance change over time). - **lyap**: Positive Lyapunov exponents indicate chaos and unpredictability. Provides the algorithm of Rosenstein et al. (1993) to estimate the largest Lyapunov exponent and the algorithm of Eckmann et al. (1986) to estimate the whole spectrum of Lyapunov exponents. *Authors* - Christopher Schölzel (https://github.com/CSchoel) - tjugo (https://github.com/nikdon) - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - nolds - numpy *See Also* - nolds package: https://github.com/CSchoel/nolds - pyEntropy package: https://github.com/nikdon/pyEntropy References ----------- - Richman, J. S., & Moorman, J. R. (2000). Physiological time-series analysis using approximate entropy and sample entropy. American Journal of Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049. - Costa, M., Goldberger, A. L., & Peng, C. K. (2005). Multiscale entropy analysis of biological signals. Physical review E, 71(2), 021906. """ if tolerance == "default": tolerance = 0.2 * np.std(signal) # Initialize results storing complexity = {} # Shannon if shannon is True: try: complexity["Shannon_Entropy"] = entropy_shannon(signal) except: print( "NeuroKit warning: complexity(): Failed to compute Shannon entropy." ) complexity["Shannon_Entropy"] = np.nan # Sampen if sampen is True: try: complexity["Sample_Entropy_Chebychev"] = nolds.sampen( signal, emb_dim, tolerance, dist="chebychev", debug_plot=False, plot_file=None) except: print( "NeuroKit warning: complexity(): Failed to compute sample entropy (sampen) using chebychev distance." ) complexity["Sample_Entropy_Chebychev"] = np.nan try: complexity["Sample_Entropy_Euclidean"] = nolds.sampen( signal, emb_dim, tolerance, dist="euclidean", debug_plot=False, plot_file=None) except: try: complexity["Sample_Entropy_Euclidean"] = nolds.sampen( signal, emb_dim, tolerance, dist="euler", debug_plot=False, plot_file=None) except: print( "NeuroKit warning: complexity(): Failed to compute sample entropy (sampen) using euclidean distance." ) complexity["Sample_Entropy_Euclidean"] = np.nan # multiscale if multiscale is True: try: complexity["Multiscale_Entropy"] = entropy_multiscale( signal, emb_dim, tolerance) except: print( "NeuroKit warning: complexity(): Failed to compute Multiscale Entropy (MSE)." ) complexity["Multiscale_Entropy"] = np.nan # fractal_dim if fractal_dim is True: try: complexity["Fractal_Dimension"] = nolds.corr_dim(signal, emb_dim, rvals=None, fit="RANSAC", debug_plot=False, plot_file=None) except: print( "NeuroKit warning: complexity(): Failed to compute fractal_dim." ) complexity["Fractal_Dimension"] = np.nan # Hurst if hurst is True: try: complexity["Hurst"] = nolds.hurst_rs(signal, nvals=None, fit="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute hurst.") complexity["Hurst"] = np.nan # DFA if dfa is True: try: complexity["DFA"] = nolds.dfa(signal, nvals=None, overlap=True, order=1, fit_trend="poly", fit_exp="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute dfa.") complexity["DFA"] = np.nan # Lyap_r if lyap_r is True: try: complexity["Lyapunov_R"] = nolds.lyap_r(signal, emb_dim=10, lag=None, min_tsep=None, tau=1, min_vectors=20, trajectory_len=20, fit="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute lyap_r.") complexity["Lyapunov_R"] = np.nan # Lyap_e if lyap_e is True: try: result = nolds.lyap_e(signal, emb_dim=10, matrix_dim=4, min_nb=None, min_tsep=0, tau=1, debug_plot=False, plot_file=None) for i, value in enumerate(result): complexity["Lyapunov_E_" + str(i)] = value except: print("NeuroKit warning: complexity(): Failed to compute lyap_e.") complexity["Lyapunov_E"] = np.nan return (complexity)
# Save response time RT_series = pd.DataFrame({ "Time": response["SpawnTime"].values, "RT": response["RT"].values }) RT_series.to_csv(result_path + f_name + "_RTSeries.csv") # compute gaze velocity skip = 1 time = gaze_data["Time"].values gazex = gaze_data["GazeX"].values gazey = gaze_data["GazeY"].values gaze_avg = np.array([gazex, gazey]).transpose() velocity = gaze_data["Velocity"].values acceleration = gaze_data["Acceleration"].values sampen_velocity = sampen(velocity, 2) # computed sample entropy of gaze velocity sampen_acceleration = sampen( acceleration, 2) # compute sample entropy of gaze acceleration # compute sample entropy and angle (1e-25 to avoid NAN) dist_avg = euclidianDistT( gaze_avg, skip=2) # compute euclidian distance for consecutive gaze angle_avg = anglesEstimation( gaze_avg, skip=2) # compute angle distance for consecutive gaze # compute sample entropy of gaze distance sampen_dist = sampen(dist_avg, 2) sampen_angle = sampen(angle_avg, 2)
# FEATURE 1: MEAN all_mean = np.mean(all_raw_pos, axis=0) # FEATURE 2: MAX all_max = np.max(all_raw_pos, axis=0) # FEATURE 3: MIN all_min = np.min(all_raw_pos, axis=0) # FEATURE 4: VAR all_var = np.var(all_raw_pos, axis=0) # FEATURE 5: MEDIAN all_med = np.median(all_raw_pos, axis=0) # FEATURE 6: SKEW all_skew = skew(all_raw_pos, axis=0) # FEATURE 7: KURIOSIS all_kuriosis = kurtosis(all_raw_pos, axis=0) # FEATURE 8: SAMPLE ENTROPY all_se = nolds.sampen(all_raw_pos) # FEATURE 9: PCA #pca = PCA(n_components=30) #pca.fit(all_raw) #all_pca = pca.components_[1,:] # FEATURE 10: FFT quat_head = np.transpose(np.array(data['quat_head'])) head_fft = np.absolute( np.sqrt( np.sum(np.square(np.fft.fft(quat_head, axis=1)), axis=0)))[1:6] quat_left = np.transpose(np.array(data['quat_left'])) left_fft = np.absolute( np.sqrt( np.sum(np.square(np.fft.fft(quat_left, axis=1)), axis=0)))[1:6]
def sampE(y): return nolds.sampen(y)
hurst = m[0]*2.0 hurst #farctal dimension (correlation dimension)= slope of the line fitted to log(r) vs log(C(r)) # If the correlation dimension is constant for all ‘m’ the time series will be deterministic #if the correlation exponentincreases with increase in ‘m’ the time series will be stochastic. h01 = nolds.corr_dim(F,2,debug_plot=True) h01 #lyap_r = estimate largest lyapunov exponent h1=nolds.lyap_r(F,emb_dim=2,debug_plot=True) h1 #lyap_e = estimate whole spectrum of lyapunov exponents h2=nolds.lyap_e(F) h2 from pyentrp import entropy as ent T1=np.std(F) T1 k= 0.2*T1 k #sample entropy h = nolds.sampen(F,3,tolerance=k) h #permutation entropy h2=ent.permutation_entropy(F,order=3,normalize=True) h2
def complexity(signal, sampling_rate=1000, shannon=True, sampen=True, multiscale=True, spectral=True, svd=True, correlation=True, higushi=True, petrosian=True, fisher=True, hurst=True, dfa=True, lyap_r=False, lyap_e=False, emb_dim=2, tolerance="default", k_max=8, bands=None, tau=1): """ Computes several chaos/complexity indices of a signal (including entropy, fractal dimensions, Hurst and Lyapunov exponent etc.). Parameters ---------- signal : list or array List or array of values. sampling_rate : int Sampling rate (samples/second). shannon : bool Computes Shannon entropy. sampen : bool Computes approximate sample entropy (sampen) using Chebychev and Euclidean distances. multiscale : bool Computes multiscale entropy (MSE). Note that it uses the 'euclidean' distance. spectral : bool Computes Spectral Entropy. svd : bool Computes the Singular Value Decomposition (SVD) entropy. correlation : bool Computes the fractal (correlation) dimension. higushi : bool Computes the Higushi fractal dimension. petrosian : bool Computes the Petrosian fractal dimension. fisher : bool Computes the Fisher Information. hurst : bool Computes the Hurst exponent. dfa : bool Computes DFA. lyap_r : bool Computes Positive Lyapunov exponents (Rosenstein et al. (1993) method). lyap_e : bool Computes Positive Lyapunov exponents (Eckmann et al. (1986) method). emb_dim : int The embedding dimension (*m*, the length of vectors to compare). Used in sampen, fisher, svd and fractal_dim. tolerance : float Distance *r* threshold for two template vectors to be considered equal. Default is 0.2*std(signal). Used in sampen and fractal_dim. k_max : int The maximal value of k used for Higushi fractal dimension. The point at which the FD plateaus is considered a saturation point and that kmax value should be selected (Gómez, 2009). Some studies use a value of 8 or 16 for ECG signal and other 48 for MEG. bands : int Used for spectral density. A list of numbers delimiting the bins of the frequency bands. If None the entropy is computed over the whole range of the DFT (from 0 to `f_s/2`). tau : int The delay. Used for fisher, svd, lyap_e and lyap_r. Returns ---------- complexity : dict Dict containing values for each indices. Example ---------- >>> import neurokit as nk >>> import numpy as np >>> >>> signal = np.sin(np.log(np.random.sample(666))) >>> complexity = nk.complexity(signal) Notes ---------- *Details* - **Entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content. - *Shannon entropy*: Shannon entropy was introduced by Claude E. Shannon in his 1948 paper "A Mathematical Theory of Communication". Shannon entropy provides an absolute limit on the best possible average length of lossless encoding or compression of an information source. - *Sample entropy (sampen)*: Measures the complexity of a time-series, based on approximate entropy. The sample entropy of a time series is defined as the negative natural logarithm of the conditional probability that two sequences similar for emb_dim points remain similar at the next point, excluding self-matches. A lower value for the sample entropy therefore corresponds to a higher probability indicating more self-similarity. - *Multiscale entropy*: Multiscale entropy (MSE) analysis is a new method of measuring the complexity of finite length time series. - *SVD Entropy*: Indicator of how many vectors are needed for an adequate explanation of the data set. Measures feature-richness in the sense that the higher the entropy of the set of SVD weights, the more orthogonal vectors are required to adequately explain it. - **fractal dimension**: The term *fractal* was first introduced by Mandelbrot in 1983. A fractal is a set of points that when looked at smaller scales, resembles the whole set. The concept of fractak dimension (FD) originates from fractal geometry. In traditional geometry, the topological or Euclidean dimension of an object is known as the number of directions each differential of the object occupies in space. This definition of dimension works well for geometrical objects whose level of detail, complexity or *space-filling* is the same. However, when considering two fractals of the same topological dimension, their level of *space-filling* is different, and that information is not given by the topological dimension. The FD emerges to provide a measure of how much space an object occupies between Euclidean dimensions. The FD of a waveform represents a powerful tool for transient detection. This feature has been used in the analysis of ECG and EEG to identify and distinguish specific states of physiologic function. Many algorithms are available to determine the FD of the waveform (Acharya, 2005). - *Correlation*: A measure of the fractal (or correlation) dimension of a time series which is also related to complexity. The correlation dimension is a characteristic measure that can be used to describe the geometry of chaotic attractors. It is defined using the correlation sum C(r) which is the fraction of pairs of points X_i in the phase space whose distance is smaller than r. - *Higushi*: Higuchi proposed in 1988 an efficient algorithm for measuring the FD of discrete time sequences. As the reconstruction of the attractor phase space is not necessary, this algorithm is simpler and faster than D2 and other classical measures derived from chaos theory. FD can be used to quantify the complexity and self-similarity of a signal. HFD has already been used to analyse the complexity of brain recordings and other biological signals. - *Petrosian Fractal Dimension*: Provide a fast computation of the FD of a signal by translating the series into a binary sequence. - **Other**: - *Fisher Information*: A way of measuring the amount of information that an observable random variable X carries about an unknown parameter θ of a distribution that models X. Formally, it is the variance of the score, or the expected value of the observed information. - *Hurst*: The Hurst exponent is a measure of the "long-term memory" of a time series. It can be used to determine whether the time series is more, less, or equally likely to increase if it has increased in previous steps. This property makes the Hurst exponent especially interesting for the analysis of stock data. - *DFA*: DFA measures the Hurst parameter H, which is very similar to the Hurst exponent. The main difference is that DFA can be used for non-stationary processes (whose mean and/or variance change over time). - *Lyap*: Positive Lyapunov exponents indicate chaos and unpredictability. Provides the algorithm of Rosenstein et al. (1993) to estimate the largest Lyapunov exponent and the algorithm of Eckmann et al. (1986) to estimate the whole spectrum of Lyapunov exponents. *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) - Christopher Schölzel (https://github.com/CSchoel) - tjugo (https://github.com/nikdon) - Quentin Geissmann (https://github.com/qgeissmann) *Dependencies* - nolds - numpy *See Also* - nolds package: https://github.com/CSchoel/nolds - pyEntropy package: https://github.com/nikdon/pyEntropy - pyrem package: https://github.com/gilestrolab/pyrem References ----------- - Accardo, A., Affinito, M., Carrozzi, M., & Bouquet, F. (1997). Use of the fractal dimension for the analysis of electroencephalographic time series. Biological cybernetics, 77(5), 339-350. - Pierzchalski, M. Application of Higuchi Fractal Dimension in Analysis of Heart Rate Variability with Artificial and Natural Noise. Recent Advances in Systems Science. - Acharya, R., Bhat, P. S., Kannathal, N., Rao, A., & Lim, C. M. (2005). Analysis of cardiac health using fractal dimension and wavelet transformation. ITBM-RBM, 26(2), 133-139. - Richman, J. S., & Moorman, J. R. (2000). Physiological time-series analysis using approximate entropy and sample entropy. American Journal of Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049. - Costa, M., Goldberger, A. L., & Peng, C. K. (2005). Multiscale entropy analysis of biological signals. Physical review E, 71(2), 021906. """ if tolerance == "default": tolerance = 0.2 * np.std(signal) # Initialize results storing complexity = {} # ------------------------------------------------------------------------------ # Shannon if shannon is True: try: complexity["Entropy_Shannon"] = entropy_shannon(signal) except: print( "NeuroKit warning: complexity(): Failed to compute Shannon entropy." ) complexity["Entropy_Shannon"] = np.nan # Sampen if sampen is True: try: complexity["Entropy_Sample"] = nolds.sampen(signal, emb_dim, tolerance, dist="chebychev", debug_plot=False, plot_file=None) except: print( "NeuroKit warning: complexity(): Failed to compute sample entropy (sampen)." ) complexity["Entropy_Sample"] = np.nan # multiscale if multiscale is True: try: complexity["Entropy_Multiscale"] = entropy_multiscale( signal, emb_dim, tolerance) except: print( "NeuroKit warning: complexity(): Failed to compute Multiscale Entropy (MSE)." ) complexity["Entropy_Multiscale"] = np.nan # spectral if spectral is True: try: complexity["Entropy_Spectral"] = entropy_spectral( signal, sampling_rate=sampling_rate, bands=bands) except: print( "NeuroKit warning: complexity(): Failed to compute Spectral Entropy." ) complexity["Entropy_Spectral"] = np.nan # SVD if svd is True: try: complexity["Entropy_SVD"] = entropy_svd(signal, tau=tau, emb_dim=emb_dim) except: print( "NeuroKit warning: complexity(): Failed to compute SVD Entropy." ) complexity["Entropy_SVD"] = np.nan # ------------------------------------------------------------------------------ # fractal_dim if correlation is True: try: complexity["Fractal_Dimension_Correlation"] = nolds.corr_dim( signal, emb_dim, rvals=None, fit="RANSAC", debug_plot=False, plot_file=None) except: print( "NeuroKit warning: complexity(): Failed to compute fractal_dim." ) complexity["Fractal_Dimension_Correlation"] = np.nan # higushi if higushi is True: try: complexity["Fractal_Dimension_Higushi"] = fd_higushi(signal, k_max) except: print("NeuroKit warning: complexity(): Failed to compute higushi.") complexity["Fractal_Dimension_Higushi"] = np.nan # petrosian if petrosian is True: try: complexity["Fractal_Dimension_Petrosian"] = fd_petrosian(signal) except: print( "NeuroKit warning: complexity(): Failed to compute petrosian.") complexity["Fractal_Dimension_Petrosian"] = np.nan # ------------------------------------------------------------------------------ # Fisher if fisher is True: try: complexity["Fisher_Information"] = fisher_info(signal, tau=tau, emb_dim=emb_dim) except: print( "NeuroKit warning: complexity(): Failed to compute Fisher Information." ) complexity["Fisher_Information"] = np.nan # Hurst if hurst is True: try: complexity["Hurst"] = nolds.hurst_rs(signal, nvals=None, fit="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute hurst.") complexity["Hurst"] = np.nan # DFA if dfa is True: try: complexity["DFA"] = nolds.dfa(signal, nvals=None, overlap=True, order=1, fit_trend="poly", fit_exp="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute dfa.") complexity["DFA"] = np.nan # Lyap_r if lyap_r is True: try: complexity["Lyapunov_R"] = nolds.lyap_r(signal, emb_dim=10, lag=None, min_tsep=None, tau=tau, min_vectors=20, trajectory_len=20, fit="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute lyap_r.") complexity["Lyapunov_R"] = np.nan # Lyap_e if lyap_e is True: try: result = nolds.lyap_e(signal, emb_dim=10, matrix_dim=4, min_nb=None, min_tsep=0, tau=tau, debug_plot=False, plot_file=None) for i, value in enumerate(result): complexity["Lyapunov_E_" + str(i)] = value except: print("NeuroKit warning: complexity(): Failed to compute lyap_e.") complexity["Lyapunov_E"] = np.nan return (complexity)
def feature_extraction(clip_data): features_list = [ 'RMSX', 'RMSY', 'RMSZ', 'rangeX', 'rangeY', 'rangeZ', 'meanX', 'meanY', 'meanZ', 'varX', 'varY', 'varZ', 'skewX', 'skewY', 'skewZ', 'kurtX', 'kurtY', 'kurtZ', 'xcor_peakXY', 'xcorr_peakXZ', 'xcorr_peakYZ', 'xcorr_lagXY', 'xcorr_lagXZ', 'xcorr_lagYZ', 'Dom_freq', 'Pdom_rel', 'PSD_mean', 'PSD_std', 'PSD_skew', 'PSD_kur', 'jerk_mean', 'jerk_std', 'jerk_skew', 'jerk_kur', 'Sen_X', 'Sen_Y', 'Sen_Z', 'RMS_mag', 'range_mag', 'mean_mag', 'var_mag', 'skew_mag', 'kurt_mag', 'Sen_mag' ] #cycle through all clips for current trial and save dataframe of features for current trial and sensor features = [] for c in range(len(clip_data['data'])): rawdata = clip_data['data'][c] #acceleration magnitude rawdata_wmag = rawdata.copy() rawdata_wmag['Accel_Mag'] = np.sqrt((rawdata**2).sum(axis=1)) #extract features on current clip #Root mean square of signal on each axis N = len(rawdata) RMS = 1 / N * np.sqrt(np.asarray(np.sum(rawdata**2, axis=0))) RMS_mag = 1 / N * np.sqrt(np.sum(rawdata_wmag['Accel_Mag']**2, axis=0)) #range on each axis min_xyz = np.min(rawdata, axis=0) max_xyz = np.max(rawdata, axis=0) r = np.asarray(max_xyz - min_xyz) r_mag = np.max(rawdata_wmag['Accel_Mag']) - np.min( rawdata_wmag['Accel_Mag']) #Moments on each axis mean = np.asarray(np.mean(rawdata, axis=0)) var = np.asarray(np.std(rawdata, axis=0)) sk = skew(rawdata) kurt = kurtosis(rawdata) mean_mag = np.mean(rawdata_wmag['Accel_Mag']) var_mag = np.std(rawdata_wmag['Accel_Mag']) sk_mag = skew(rawdata_wmag['Accel_Mag']) kurt_mag = kurtosis(rawdata_wmag['Accel_Mag']) #Cross-correlation between axes pairs xcorr_xy = np.correlate(rawdata.iloc[:, 0], rawdata.iloc[:, 1], mode='same') # xcorr_xy = xcorr_xy/np.abs(np.sum(xcorr_xy)) #normalize values xcorr_peak_xy = np.max(xcorr_xy) xcorr_lag_xy = (np.argmax(xcorr_xy)) / len(xcorr_xy) #normalized lag xcorr_xz = np.correlate(rawdata.iloc[:, 0], rawdata.iloc[:, 2], mode='same') # xcorr_xz = xcorr_xz/np.abs(np.sum(xcorr_xz)) #normalize values xcorr_peak_xz = np.max(xcorr_xz) xcorr_lag_xz = (np.argmax(xcorr_xz)) / len(xcorr_xz) xcorr_yz = np.correlate(rawdata.iloc[:, 1], rawdata.iloc[:, 2], mode='same') # xcorr_yz = xcorr_yz/np.abs(np.sum(xcorr_yz)) #normalize values xcorr_peak_yz = np.max(xcorr_yz) xcorr_lag_yz = (np.argmax(xcorr_yz)) / len(xcorr_yz) #pack xcorr features xcorr_peak = np.array([xcorr_peak_xy, xcorr_peak_xz, xcorr_peak_yz]) xcorr_lag = np.array([xcorr_lag_xy, xcorr_lag_xz, xcorr_lag_yz]) #Dominant freq and relative magnitude (on acc magnitude) Pxx = power_spectra_welch(rawdata_wmag, fm=0, fM=10) domfreq = np.asarray([Pxx.iloc[:, -1].idxmax()]) Pdom_rel = Pxx.loc[domfreq].iloc[:, -1].values / Pxx.iloc[:, -1].sum( ) #power at dominant freq rel to total #moments of PSD Pxx_moments = np.array([ np.nanmean(Pxx.values), np.nanstd(Pxx.values), skew(Pxx.values), kurtosis(Pxx.values) ]) #moments of jerk magnitude jerk = rawdata_wmag['Accel_Mag'].diff().values jerk_moments = np.array([ np.nanmean(jerk), np.nanstd(jerk), skew(jerk[~np.isnan(jerk)]), kurtosis(jerk[~np.isnan(jerk)]) ]) #sample entropy raw data (magnitude) and FFT sH_raw = [] sH_fft = [] for a in range(3): x = rawdata.iloc[:, a] n = len(x) #number of samples in clip Fs = np.mean(1 / (np.diff(x.index) / 1000)) #sampling rate in clip sH_raw.append(nolds.sampen(x)) #samp entr raw data #for now disable SH on fft # f,Pxx_den = welch(x,Fs,nperseg=min(256,n/4)) # sH_fft.append(nolds.sampen(Pxx_den)) #samp entr fft sH_mag = nolds.sampen(rawdata_wmag['Accel_Mag']) #Assemble features in array Y = np.array( [RMS_mag, r_mag, mean_mag, var_mag, sk_mag, kurt_mag, sH_mag]) X = np.concatenate( (RMS, r, mean, var, sk, kurt, xcorr_peak, xcorr_lag, domfreq, Pdom_rel, Pxx_moments, jerk_moments, sH_raw, Y)) features.append(X) F = np.asarray(features) #feature matrix for all clips from current trial clip_data['features'] = pd.DataFrame(data=F, columns=features_list, dtype='float32')
def entropy_multiscale(signal, emb_dim=2, tolerance="default"): """ Returns the Multiscale Entropy. Copied from the `pyEntropy <https://github.com/nikdon/pyEntropy>`_ repo by tjugo. Uses sample entropy with 'chebychev' distance. Parameters ---------- signal : list or array List or array of values. emb_dim : int The embedding dimension (*m*, the length of vectors to compare). tolerance : float Distance *r* threshold for two template vectors to be considered equal. Default is 0.2*std(signal). Returns ---------- multiscale_entropy : float The Multiscale Entropy as float value. Example ---------- >>> import neurokit as nk >>> >>> signal = [5, 1, 7, 2, 5, 1, 7, 4, 6, 7, 5, 4, 1, 1, 4, 4] >>> multiscale_entropy = nk.entropy_multiscale(signal) Notes ---------- *Details* - **multiscale entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content. Multiscale entropy (MSE) analysis is a new method of measuring the complexity of finite length time series. *Authors* - tjugo (https://github.com/nikdon) - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - numpy *See Also* - pyEntropy package: https://github.com/nikdon/pyEntropy References ----------- - Richman, J. S., & Moorman, J. R. (2000). Physiological time-series analysis using approximate entropy and sample entropy. American Journal of Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049. - Costa, M., Goldberger, A. L., & Peng, C. K. (2005). Multiscale entropy analysis of biological signals. Physical review E, 71(2), 021906. """ if tolerance == "default": tolerance = 0.2 * np.std(signal) n = len(signal) mse = np.zeros((1, emb_dim)) for i in range(emb_dim): b = int(np.fix(n / (i + 1))) temp_ts = [0] * int(b) for j in range(b): num = sum(signal[j * (i + 1):(j + 1) * (i + 1)]) den = i + 1 temp_ts[j] = float(num) / float(den) # Replaced the sample entropy computation with nolds' one... # se = sample_entropy(temp_ts, 1, tolerance) try: se = nolds.sampen(temp_ts, 1, tolerance, dist="euclidean", debug_plot=False, plot_file=None) except: se = nolds.sampen(temp_ts, 1, tolerance, dist="euler", debug_plot=False, plot_file=None) mse[0, i] = se multiscale_entropy = mse[0][emb_dim - 1] return (multiscale_entropy)
def processSingleFile(self, file): self.logger.log( "INFO", "EXTRACTING FEATURE {} FROM FILE: {}".format(self.algorithm, file)) res = self.readFile(file) if (self.algorithm == 'DfaMeanCorr'): feature = np.zeros([378, 1]) else: feature = np.zeros([26, 1]) if (res["error"]): return (feature) if (res["data"].shape[0] == 0): return (feature) if (res["data"].shape[1] < 10): return (feature) data = self.prepareMetrics(res["data"]) try: if (self.algorithm == 'correlation'): feature = self.correlation(data) elif (self.algorithm == 'dfa'): feature = np.zeros([26, 1]) for i in range(0, data.shape[1]): feature[i] = nolds.dfa(data[:, i]) elif (self.algorithm == 'sampen'): feature = np.zeros([26, 1]) for i in range(0, data.shape[1]): feature[i] = nolds.sampen(data[:,i],emb_dim=2,\ tolerance=0.2*np.std(data[:,i])) elif (self.algorithm == 'hurst'): feature = np.zeros([26, 1]) for i in range(0, data.shape[1]): feature[i] = nolds.hurst_rs(data[:, i]) elif (self.algorithm == 'DfaMeanCorr'): cache_filename = file + ".pkl" if (os.path.exists(cache_filename)): self.logger.log("INFO", "CACHE FOUND, USING IT") file = open(cache_filename, 'rb') cache = pickle.load(file) else: cache = {} if (not "corr" in cache): featureCorr = self.correlation(data) cache["corr"] = featureCorr else: featureCorr = cache["corr"] if (not "cons" in cache): featureConsump = self.consumption(res["data"]) cache["featureConsump"] = featureConsump else: featureConsump = cache["cons"] if (not "dfa" in cache): featureDfa = np.zeros([26, 1]) for i in range(0, int(data.shape[1])): featureDfa[i] = nolds.dfa(data[:, i]) cache["dfa"] = featureDfa else: featureDfa = cache["dfa"] if (not "entropy" in cache): featureEntropy = np.zeros([26, 1]) for i in range(0, int(data.shape[1])): featureEntropy[i] = nolds.sampen(data[:, i], 2) cache["entropy"] = featureEntropy with open(cache_filename, 'wb') as output: pickle.dump(cache, output, pickle.HIGHEST_PROTOCOL) feature = np.vstack( [featureDfa, featureConsump, featureEntropy, featureCorr]) except Exception as e: self.logger.log("ERROR","ERROR EXTRACTING FEATURE {} FROM FILE: {}, SKIPPING"\ .format(self.algorithm,file)) print(e) traceback.print_exc() for i in range(0, feature.shape[0]): feature[i] = 0 return (feature)
def r_features(r_peaks): # Sanity check after artifact removal if len(r_peaks) < 5: print( "NeuroKit Warning: ecg_hrv(): Not enough normal R peaks to compute HRV :/" ) r_peaks = [1000, 2000, 3000, 4000] hrv_dict = dict() RRis = np.diff(r_peaks) RRis = RRis / 500 RRis = RRis.astype(float) # Artifact detection - Statistical rr1 = 0 rr2 = 0 rr3 = 0 rr4 = 0 median_rr = np.median(RRis) for index, rr in enumerate(RRis): # Remove RR intervals that differ more than 25% from the previous one if rr < 0.6: rr1 += 1 if rr > 1.3: rr2 += 1 if rr < median_rr * 0.75: rr3 += 1 if rr > median_rr * 1.25: rr4 += 1 # Artifacts treatment hrv_dict["n_Artifacts1"] = rr1 / len(RRis) hrv_dict["n_Artifacts2"] = rr2 / len(RRis) hrv_dict["n_Artifacts3"] = rr3 / len(RRis) hrv_dict["n_Artifacts4"] = rr4 / len(RRis) hrv_dict["RMSSD"] = np.sqrt(np.mean(np.diff(RRis)**2)) hrv_dict["meanNN"] = np.mean(RRis) hrv_dict["sdNN"] = np.std(RRis, ddof=1) # make it calculate N-1 hrv_dict["cvNN"] = hrv_dict["sdNN"] / hrv_dict["meanNN"] hrv_dict["CVSD"] = hrv_dict["RMSSD"] / hrv_dict["meanNN"] hrv_dict["medianNN"] = np.median(abs(RRis)) hrv_dict["madNN"] = mad(RRis, constant=1) hrv_dict["mcvNN"] = hrv_dict["madNN"] / hrv_dict["medianNN"] nn50 = sum(abs(np.diff(RRis)) > 50) nn20 = sum(abs(np.diff(RRis)) > 20) hrv_dict["pNN50"] = nn50 / len(RRis) * 100 hrv_dict["pNN20"] = nn20 / len(RRis) * 100 hrv_dict["Shannon"] = complexity_entropy_shannon(RRis) hrv_dict["Sample_Entropy"] = nolds.sampen(RRis, emb_dim=2) #mse = complexity_entropy_multiscale(RRis, max_scale_factor=20, m=2) #hrv_dict["Entropy_Multiscale_AUC"] = mse["MSE_AUC"] hrv_dict["Entropy_SVD"] = complexity_entropy_svd(RRis, emb_dim=2) hrv_dict["Entropy_Spectral_VLF"] = complexity_entropy_spectral( RRis, 500, bands=np.arange(0.0033, 0.04, 0.001)) hrv_dict["Entropy_Spectral_LF"] = complexity_entropy_spectral( RRis, 500, bands=np.arange(0.04, 0.15, 0.001)) hrv_dict["Entropy_Spectral_HF"] = complexity_entropy_spectral( RRis, 500, bands=np.arange(0.15, 0.40, 0.001)) hrv_dict["Fisher_Info"] = complexity_fisher_info(RRis, tau=1, emb_dim=2) #hrv_dict["FD_Petrosian"] = complexity_fd_petrosian(RRis) #hrv_dict["FD_Higushi"] = complexity_fd_higushi(RRis, k_max=16) hrv_dict.update(hrv.time_domain(RRis)) hrv_dict.update(hrv.frequency_domain(RRis)) # RRI Velocity diff_rri = np.diff(RRis) hrv_dict.update(add_suffix(hrv.time_domain(diff_rri), "fil1")) hrv_dict.update(add_suffix(hrv.frequency_domain(diff_rri), "fil1")) # RRI Acceleration diff2_rri = np.diff(diff_rri) hrv_dict.update(add_suffix(hrv.time_domain(diff2_rri), "fil2")) hrv_dict.update(add_suffix(hrv.frequency_domain(diff2_rri), "fil2")) return hrv_dict
def sample_entropy(y): # Sample Entropy return nolds.sampen(y)
# 100 dp sliding windows with 10 step jump between each window to save space window_size = 100 window_size = 2000 emb_dim = 4 rolling = rolling_window(df.logR_ask, window_size, 10) rolling = rolling_window(df_std.logR_ask, window_size, window_size) rolling = rolling_window(df_QN_laplace_std.values.transpose()[0], window_size, window_size) rolling_ns = rolling_window(df.ask, window_size, 10) rolling_ts = rolling_window(df.index, window_size, 10) df_ = pd.DataFrame(rolling) sw_1 = rolling[1] sw_1_ns = rolling[1] nolds.lyap_r(sw_1, emb_dim = emb_dim) nolds.lyap_e(sw_1, emb_dim = emb_dim) nolds.sampen(sw_1, emb_dim= emb_dim) nolds.hurst_rs(sw_1) nolds.corr_dim(sw_1, emb_dim=emb_dim) nolds.dfa(sw_1) ent.shannon_entropy(sw_1) # is this even valid? we do not have any p_i states i ALSO IGNORES TEMPORAL ORDER - Practical consideration of permutation entropy ent.sample_entropy(sw_1, sample_length = 10) #what is sample length? #ent.multiscale_entropy(sw_1, sample_length = 10, tolerance = 0.1*np.std(sw_1)) # what is tolerance? "Practical considerations of permutation entropy: A Tutorial review - how to choose parameters in permutation entropy" ent.permutation_entropy(sw_1, m=8, delay = emd_dim ) #Reference paper above #ent.composite_multiscale_entropy() lempel_ziv_complexity(sw_1) gzip_compress_ratio(sw_1_ns, 9) #https://www.researchgate.net/post/How_can_we_find_out_which_value_of_embedding_dimensions_is_more_accurate
for sheet in worksheets: # Activate worksheet to write dataframe active = workbook[sheet] # load dataset series = read_csv('../data/datasets/' + sheet + '_final_week.csv', header=0, index_col=0, parse_dates=True, squeeze=True) x = series.values x = x.astype('float32') # x = np.fromiter(series.values, dtype="float32") columns = list(range(2, 5)) emb_dim = -1 for column in columns: if column == 2: emb_dim = 3 elif column == 3: emb_dim = 5 elif column == 4: emb_dim = 4 # Do the calculation and put it on a specific cell sample_entropy = nolds.sampen(x, emb_dim=emb_dim, tolerance=None) print("Sample entropy: " + str(sample_entropy)) active.cell(row=27, column=column).value = sample_entropy # Save workbook to write workbook.save("../data/chaos_data/results_presentation.xlsx") workbook.close()
def calculate_approximate_entropy(traffic): return nolds.sampen(traffic)