def calc(images, adversarial_images, measure='sampen'): """Calculate and returns the nonlinear measure of both original and adversarial images. Set measure to what you want to calculate. 'sampen' : Sample entropy 'frac' : Correlation/Fractal dimension 'hurst' : Hurst exponent 'lyapr' : Largest Lyapunov exponent using Rosenstein et al. methods Docs : https://cschoel.github.io/nolds/ If the adversarial image is found to be NaN, we output 0. The reason some adversarial iamges are NaN is because adversarial generation were unsuccessful for them. There is a maximum iteration one can set for adversarial generation, the program outputs NaN when the max iteration is reached before an adversarial perturbation is found. For more info look at "adversarial_gen.ipynb" """ imageCalc_data = [] advimageCalc_data = [] for i in tqdm(range(len(images))): image = images[i] image = image.flatten() advimage = adversarial_images[i] advimage = advimage.flatten() if measure == 'sampen': imageCalc_data.append(nolds.sampen(image)) if np.isnan(np.sum(advimage)): advimageCalc_data.append(0) else: advimageCalc_data.append(nolds.samepn(advimage)) elif measure == 'frac': imageCalc_data.append(nolds.corr_dim(image, 1)) if np.isnan(np.sum(advimage)): advimageCalc_data.append(0) else: advimageCalc_data.append(nolds.corr_dim(advimage, 1)) elif measure == 'hurst': imageCalc_data.append(nolds.hurst_rs(image)) if np.isnan(np.sum(advimage)): advimageCalc_data.append(0) else: advimageCalc_data.append(nolds.hurst_rs(advimage)) elif measure == 'lyapr': imageCalc_data.append(nolds.lyap_r(image)) if np.isnan(np.sum(advimage)): advimageCalc_data.append(0) else: advimageCalc_data.append(nolds.lyap_r(advimage)) return imageCalc_data, advimageCalc_data
def compare_sim(): filename = "Torsion Chaotic Data.xlsx" df = pd.read_excel(filename) lim = 1000 angles = np.array(df["Angle (rad)"])[:lim] LE = nolds.lyap_r(angles, min_tsep = 10, lag = None) print(LE)
def process(self, signals): features = np.empty(self.__len__(), dtype=np.float64) for i in range(self.__len__()): has_nan = checkDropOutsByChannel(signals[:, i]) features[i] = nolds.lyap_r(signals[:, i]) if not has_nan else np.nan return features
def lyapunov(Ω): '''\ Use nolds lyap_r to find lyapunov exponent ''' try: return nd.lyap_r(Ω) except: return 0.0
def get_lyapunov_exponent(signal): result = [] for channel in signal: result.append(lyap_r(channel)) result = np.array([result]).transpose() result = pd.DataFrame(result, columns=["lyapunov_exp"]) result.index.name = "channel" return result
def chaos_report(): """ Calculates the maximum lyapunov exponent of the system for various parameters and initial conditions. """ num_values = 1000 # parameter spacing #theta0_values = np.linspace(-np.pi, np.pi, num_values) drive_frequencies = np.linspace(0, 2, num_values) #theta_dot0_values = np.linspace(-np.pi, np.pi, num_values) lines = [] time = np.linspace(0, 100, 1000) for count, f in enumerate(drive_frequencies): print("Iteration: ", count) #for theta_dot in theta_dot0_values: tp = TorsionPendulum() tp.drive_frequency = f init_condits = [0, 0] chaotic_theta, chaotic_theta_dot = tp.integrate(init_condits, time, model_type='chaotic') driven_theta, driven_theta_dot = tp.integrate(init_condits, time, model_type='damped_driven') #damped_theta, damped_theta_dot = tp.integrate(init_condits, time, model_type='damped') chaotic_le = nolds.lyap_r(chaotic_theta, min_tsep = 10, lag = None) driven_le = nolds.lyap_r(driven_theta, min_tsep = 10, lag = None) #damped_le = nolds.lyap_r(damped_theta, min_tsep = 10, lag = None) lines.append([f, chaotic_le, driven_le]) chaos_report = open("chaos_report_frequency_{}.txt".format(num_values), 'w') #header = "Theta\t Theta Dot\t Chaotic Lyapunov Exponent\t Damped Driven Lyapunov Exponent\t Damped Lyapunov Exponent\n" #header = "Theta\t Chaotic Lyapunov Exponent\t Damped Driven Lyapunov Exponent\t Damped Lyapunov Exponent\n" header = "Drive Frequency\t Chaotic Lyapunov Exponent\t Damped Driven Lyapunov Exponent\n" chaos_report.write(header) string_lines = [] for line in lines: string_line = "" for value in line: string_line += (str(value) + '\t') string_lines.append(string_line + '\n') chaos_report.writelines(string_lines) chaos_report.close()
def load_feature(s): rw = [lwalk(i) for i in s] sd = [np.std(i) for i in rw] dfa = [nolds.dfa(i) for i in rw] hurst = [nolds.hurst_rs(i) for i in rw] sampen = [nolds.sampen(i) for i in rw] ac = [autocorrelation(i, 100) for i in rw] rvntsl = [ratio_value_number_to_time_series_length(i) for i in rw] ac_200 = [autocorrelation(i, 200) for i in rw] ac_300 = [autocorrelation(i, 300) for i in rw] lyapr = [nolds.lyap_r(i) for i in rw] inpv = pd.DataFrame( [sd, dfa, hurst, sampen, ac, rvntsl, ac_200, ac_300, lyapr]) return inpv.transpose()
def mle(x: np.ndarray) -> float: """ Maximum Lyapunov Exponent :param x: 1-d numeric vector :return: numeric scalar """ k = int(np.sqrt(len(x))) try: out = nolds.lyap_r(data=x, emb_dim=k, trajectory_len=k, min_neighbors=k) except (ValueError, np.linalg.LinAlgError, AssertionError) as e: out = np.nan return out
def ft_exp_max_lyap(cls, ts: np.ndarray, embed_dim: int = 10, lag: t.Optional[int] = None) -> float: """Estimation of the maximum Lyapunov coefficient. Parameters ---------- ts : :obj:`np.ndarray` One-dimensional time-series values. embed_dim : int, optional (default=10) Time-series embed dimension. lag : int, optional Lag of the embed. Returns ------- float Estimation of the maximum Lyapunov coefficient. References ---------- .. [1] H. E. Hurst, The problem of long-term storage in reservoirs, International Association of Scientific Hydrology. Bulletin, vol. 1, no. 3, pp. 13–27, 1956. .. [2] H. E. Hurst, A suggested statistical model of some time series which occur in nature, Nature, vol. 180, p. 494, 1957. .. [3] R. Weron, Estimating long-range dependence: finite sample properties and confidence intervals, Physica A: Statistical Mechanics and its Applications, vol. 312, no. 1, pp. 285–299, 2002. .. [4] "nolds" Python package: https://pypi.org/project/nolds/ .. [5] Lemke, Christiane & Gabrys, Bogdan. (2010). Meta-learning for time series forecasting and forecast combination. Neurocomputing. 73. 2006-2016. 10.1016/j.neucom.2009.09.020. """ with warnings.catch_warnings(): warnings.filterwarnings("ignore", module="nolds", category=RuntimeWarning) max_lyap_exp = nolds.lyap_r(data=ts, lag=lag, emb_dim=embed_dim) return max_lyap_exp
def transform(self, X): if self.num_point is None: self.num_point = X.shape[1] dataset = MyIterator(X) for idx, inputs in enumerate(dataset): val = np.array([ np.log( np.fabs((self.eps + inputs[i + 1] - inputs[i]) / self.dt)) for i in range(len(inputs) - 1) ]) #max_lyapunov= nolds.lyap_r(inputs.tolist(), emb_dim=2, fit='poly', debug_plot=True, debug_data=True, plot_file=f'./tmp.png') max_lyapunov = nolds.lyap_r(inputs.tolist(), emb_dim=2, fit='poly', min_tsep=int(len(inputs) // 4)) if idx == 0: transformed_X = max_lyapunov else: transformed_X = np.r_[transformed_X, max_lyapunov] return transformed_X
def get_lyapunov_exponent_over_time(trades, st, et, step_minutes, window_minutes): num_steps = ((et - st).total_seconds() / 60) / step_minutes lyap_exps = [] times = [] for i in range(0, int(num_steps)): iter_st = st + datetime.timedelta(minutes=step_minutes * i) iter_et = iter_st + datetime.timedelta(minutes=window_minutes) window = DataSplitter.get_between(trades, iter_st, iter_et) prices = np.asarray(window['price'].dropna(), dtype=np.float32) if len(prices) == 0: continue lyap_exp = nolds.lyap_r(prices) if lyap_exp > 0: lyap_exps.append(lyap_exp) times.append(iter_et) else: pass return times, lyap_exps
def complexity(signal, shannon=True, sampen=True, multiscale=True, fractal_dim=True, hurst=True, dfa=True, lyap_r=False, lyap_e=False, emb_dim=2, tolerance="default"): """ Returns several chaos/complexity indices of a signal (including entropy, fractal dimensions, Hurst and Lyapunov exponent etc.). Parameters ---------- signal : list or array List or array of values. shannon : bool Computes Shannon entropy. sampen : bool Computes approximate sample entropy (sampen) using Chebychev and Euclidean distances. multiscale : bool Computes multiscale entropy (MSE). Note that it uses the 'euclidean' distance. fractal_dim : bool Computes the fractal (correlation) dimension. hurst : bool Computes the Hurst exponent. dfa : bool Computes DFA. lyap_r : bool Computes Positive Lyapunov exponents (Rosenstein et al. (1993) method). lyap_e : bool Computes Positive Lyapunov exponents (Eckmann et al. (1986) method). emb_dim : int The embedding dimension (*m*, the length of vectors to compare). Used in sampen and fractal_dim. tolerance : float Distance *r* threshold for two template vectors to be considered equal. Default is 0.2*std(signal). Used in sampen and fractal_dim. Returns ---------- complexity : dict Dict containing values for each indices. Example ---------- >>> import neurokit as nk >>> import numpy as np >>> >>> signal = np.sin(np.log(np.random.sample(666))) >>> complexity = nk.complexity(signal) Notes ---------- *Details* - **shannon entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content. - **sample entropy (sampen)**: Measures the complexity of a time-series, based on approximate entropy. The sample entropy of a time series is defined as the negative natural logarithm of the conditional probability that two sequences similar for emb_dim points remain similar at the next point, excluding self-matches. A lower value for the sample entropy therefore corresponds to a higher probability indicating more self-similarity. - **multiscale entropy**: Multiscale entropy (MSE) analysis is a new method of measuring the complexity of finite length time series. - **fractal dimension**: A measure of the fractal (or correlation) dimension of a time series which is also related to complexity. The correlation dimension is a characteristic measure that can be used to describe the geometry of chaotic attractors. It is defined using the correlation sum C(r) which is the fraction of pairs of points X_i in the phase space whose distance is smaller than r. - **hurst**: The Hurst exponent is a measure of the "long-term memory" of a time series. It can be used to determine whether the time series is more, less, or equally likely to increase if it has increased in previous steps. This property makes the Hurst exponent especially interesting for the analysis of stock data. - **dfa**: DFA measures the Hurst parameter H, which is very similar to the Hurst exponent. The main difference is that DFA can be used for non-stationary processes (whose mean and/or variance change over time). - **lyap**: Positive Lyapunov exponents indicate chaos and unpredictability. Provides the algorithm of Rosenstein et al. (1993) to estimate the largest Lyapunov exponent and the algorithm of Eckmann et al. (1986) to estimate the whole spectrum of Lyapunov exponents. *Authors* - Christopher Schölzel (https://github.com/CSchoel) - tjugo (https://github.com/nikdon) - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - nolds - numpy *See Also* - nolds package: https://github.com/CSchoel/nolds - pyEntropy package: https://github.com/nikdon/pyEntropy References ----------- - Richman, J. S., & Moorman, J. R. (2000). Physiological time-series analysis using approximate entropy and sample entropy. American Journal of Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049. - Costa, M., Goldberger, A. L., & Peng, C. K. (2005). Multiscale entropy analysis of biological signals. Physical review E, 71(2), 021906. """ if tolerance == "default": tolerance = 0.2 * np.std(signal) # Initialize results storing complexity = {} # Shannon if shannon is True: try: complexity["Shannon_Entropy"] = entropy_shannon(signal) except: print( "NeuroKit warning: complexity(): Failed to compute Shannon entropy." ) complexity["Shannon_Entropy"] = np.nan # Sampen if sampen is True: try: complexity["Sample_Entropy_Chebychev"] = nolds.sampen( signal, emb_dim, tolerance, dist="chebychev", debug_plot=False, plot_file=None) except: print( "NeuroKit warning: complexity(): Failed to compute sample entropy (sampen) using chebychev distance." ) complexity["Sample_Entropy_Chebychev"] = np.nan try: complexity["Sample_Entropy_Euclidean"] = nolds.sampen( signal, emb_dim, tolerance, dist="euclidean", debug_plot=False, plot_file=None) except: try: complexity["Sample_Entropy_Euclidean"] = nolds.sampen( signal, emb_dim, tolerance, dist="euler", debug_plot=False, plot_file=None) except: print( "NeuroKit warning: complexity(): Failed to compute sample entropy (sampen) using euclidean distance." ) complexity["Sample_Entropy_Euclidean"] = np.nan # multiscale if multiscale is True: try: complexity["Multiscale_Entropy"] = entropy_multiscale( signal, emb_dim, tolerance) except: print( "NeuroKit warning: complexity(): Failed to compute Multiscale Entropy (MSE)." ) complexity["Multiscale_Entropy"] = np.nan # fractal_dim if fractal_dim is True: try: complexity["Fractal_Dimension"] = nolds.corr_dim(signal, emb_dim, rvals=None, fit="RANSAC", debug_plot=False, plot_file=None) except: print( "NeuroKit warning: complexity(): Failed to compute fractal_dim." ) complexity["Fractal_Dimension"] = np.nan # Hurst if hurst is True: try: complexity["Hurst"] = nolds.hurst_rs(signal, nvals=None, fit="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute hurst.") complexity["Hurst"] = np.nan # DFA if dfa is True: try: complexity["DFA"] = nolds.dfa(signal, nvals=None, overlap=True, order=1, fit_trend="poly", fit_exp="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute dfa.") complexity["DFA"] = np.nan # Lyap_r if lyap_r is True: try: complexity["Lyapunov_R"] = nolds.lyap_r(signal, emb_dim=10, lag=None, min_tsep=None, tau=1, min_vectors=20, trajectory_len=20, fit="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute lyap_r.") complexity["Lyapunov_R"] = np.nan # Lyap_e if lyap_e is True: try: result = nolds.lyap_e(signal, emb_dim=10, matrix_dim=4, min_nb=None, min_tsep=0, tau=1, debug_plot=False, plot_file=None) for i, value in enumerate(result): complexity["Lyapunov_E_" + str(i)] = value except: print("NeuroKit warning: complexity(): Failed to compute lyap_e.") complexity["Lyapunov_E"] = np.nan return (complexity)
# plot on log-log scale plot(log(lags), log(tau)); show() # calculate Hurst as slope of log-log plot m = polyfit(log(lags), log(tau), 1) hurst = m[0]*2.0 hurst #farctal dimension (correlation dimension)= slope of the line fitted to log(r) vs log(C(r)) # If the correlation dimension is constant for all ‘m’ the time series will be deterministic #if the correlation exponentincreases with increase in ‘m’ the time series will be stochastic. h01 = nolds.corr_dim(F,2,debug_plot=True) h01 #lyap_r = estimate largest lyapunov exponent h1=nolds.lyap_r(F,emb_dim=2,debug_plot=True) h1 #lyap_e = estimate whole spectrum of lyapunov exponents h2=nolds.lyap_e(F) h2 from pyentrp import entropy as ent T1=np.std(F) T1 k= 0.2*T1 k #sample entropy h = nolds.sampen(F,3,tolerance=k) h
def complexity(signal, sampling_rate=1000, shannon=True, sampen=True, multiscale=True, spectral=True, svd=True, correlation=True, higushi=True, petrosian=True, fisher=True, hurst=True, dfa=True, lyap_r=False, lyap_e=False, emb_dim=2, tolerance="default", k_max=8, bands=None, tau=1): """ Computes several chaos/complexity indices of a signal (including entropy, fractal dimensions, Hurst and Lyapunov exponent etc.). Parameters ---------- signal : list or array List or array of values. sampling_rate : int Sampling rate (samples/second). shannon : bool Computes Shannon entropy. sampen : bool Computes approximate sample entropy (sampen) using Chebychev and Euclidean distances. multiscale : bool Computes multiscale entropy (MSE). Note that it uses the 'euclidean' distance. spectral : bool Computes Spectral Entropy. svd : bool Computes the Singular Value Decomposition (SVD) entropy. correlation : bool Computes the fractal (correlation) dimension. higushi : bool Computes the Higushi fractal dimension. petrosian : bool Computes the Petrosian fractal dimension. fisher : bool Computes the Fisher Information. hurst : bool Computes the Hurst exponent. dfa : bool Computes DFA. lyap_r : bool Computes Positive Lyapunov exponents (Rosenstein et al. (1993) method). lyap_e : bool Computes Positive Lyapunov exponents (Eckmann et al. (1986) method). emb_dim : int The embedding dimension (*m*, the length of vectors to compare). Used in sampen, fisher, svd and fractal_dim. tolerance : float Distance *r* threshold for two template vectors to be considered equal. Default is 0.2*std(signal). Used in sampen and fractal_dim. k_max : int The maximal value of k used for Higushi fractal dimension. The point at which the FD plateaus is considered a saturation point and that kmax value should be selected (Gómez, 2009). Some studies use a value of 8 or 16 for ECG signal and other 48 for MEG. bands : int Used for spectral density. A list of numbers delimiting the bins of the frequency bands. If None the entropy is computed over the whole range of the DFT (from 0 to `f_s/2`). tau : int The delay. Used for fisher, svd, lyap_e and lyap_r. Returns ---------- complexity : dict Dict containing values for each indices. Example ---------- >>> import neurokit as nk >>> import numpy as np >>> >>> signal = np.sin(np.log(np.random.sample(666))) >>> complexity = nk.complexity(signal) Notes ---------- *Details* - **Entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content. - *Shannon entropy*: Shannon entropy was introduced by Claude E. Shannon in his 1948 paper "A Mathematical Theory of Communication". Shannon entropy provides an absolute limit on the best possible average length of lossless encoding or compression of an information source. - *Sample entropy (sampen)*: Measures the complexity of a time-series, based on approximate entropy. The sample entropy of a time series is defined as the negative natural logarithm of the conditional probability that two sequences similar for emb_dim points remain similar at the next point, excluding self-matches. A lower value for the sample entropy therefore corresponds to a higher probability indicating more self-similarity. - *Multiscale entropy*: Multiscale entropy (MSE) analysis is a new method of measuring the complexity of finite length time series. - *SVD Entropy*: Indicator of how many vectors are needed for an adequate explanation of the data set. Measures feature-richness in the sense that the higher the entropy of the set of SVD weights, the more orthogonal vectors are required to adequately explain it. - **fractal dimension**: The term *fractal* was first introduced by Mandelbrot in 1983. A fractal is a set of points that when looked at smaller scales, resembles the whole set. The concept of fractak dimension (FD) originates from fractal geometry. In traditional geometry, the topological or Euclidean dimension of an object is known as the number of directions each differential of the object occupies in space. This definition of dimension works well for geometrical objects whose level of detail, complexity or *space-filling* is the same. However, when considering two fractals of the same topological dimension, their level of *space-filling* is different, and that information is not given by the topological dimension. The FD emerges to provide a measure of how much space an object occupies between Euclidean dimensions. The FD of a waveform represents a powerful tool for transient detection. This feature has been used in the analysis of ECG and EEG to identify and distinguish specific states of physiologic function. Many algorithms are available to determine the FD of the waveform (Acharya, 2005). - *Correlation*: A measure of the fractal (or correlation) dimension of a time series which is also related to complexity. The correlation dimension is a characteristic measure that can be used to describe the geometry of chaotic attractors. It is defined using the correlation sum C(r) which is the fraction of pairs of points X_i in the phase space whose distance is smaller than r. - *Higushi*: Higuchi proposed in 1988 an efficient algorithm for measuring the FD of discrete time sequences. As the reconstruction of the attractor phase space is not necessary, this algorithm is simpler and faster than D2 and other classical measures derived from chaos theory. FD can be used to quantify the complexity and self-similarity of a signal. HFD has already been used to analyse the complexity of brain recordings and other biological signals. - *Petrosian Fractal Dimension*: Provide a fast computation of the FD of a signal by translating the series into a binary sequence. - **Other**: - *Fisher Information*: A way of measuring the amount of information that an observable random variable X carries about an unknown parameter θ of a distribution that models X. Formally, it is the variance of the score, or the expected value of the observed information. - *Hurst*: The Hurst exponent is a measure of the "long-term memory" of a time series. It can be used to determine whether the time series is more, less, or equally likely to increase if it has increased in previous steps. This property makes the Hurst exponent especially interesting for the analysis of stock data. - *DFA*: DFA measures the Hurst parameter H, which is very similar to the Hurst exponent. The main difference is that DFA can be used for non-stationary processes (whose mean and/or variance change over time). - *Lyap*: Positive Lyapunov exponents indicate chaos and unpredictability. Provides the algorithm of Rosenstein et al. (1993) to estimate the largest Lyapunov exponent and the algorithm of Eckmann et al. (1986) to estimate the whole spectrum of Lyapunov exponents. *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) - Christopher Schölzel (https://github.com/CSchoel) - tjugo (https://github.com/nikdon) - Quentin Geissmann (https://github.com/qgeissmann) *Dependencies* - nolds - numpy *See Also* - nolds package: https://github.com/CSchoel/nolds - pyEntropy package: https://github.com/nikdon/pyEntropy - pyrem package: https://github.com/gilestrolab/pyrem References ----------- - Accardo, A., Affinito, M., Carrozzi, M., & Bouquet, F. (1997). Use of the fractal dimension for the analysis of electroencephalographic time series. Biological cybernetics, 77(5), 339-350. - Pierzchalski, M. Application of Higuchi Fractal Dimension in Analysis of Heart Rate Variability with Artificial and Natural Noise. Recent Advances in Systems Science. - Acharya, R., Bhat, P. S., Kannathal, N., Rao, A., & Lim, C. M. (2005). Analysis of cardiac health using fractal dimension and wavelet transformation. ITBM-RBM, 26(2), 133-139. - Richman, J. S., & Moorman, J. R. (2000). Physiological time-series analysis using approximate entropy and sample entropy. American Journal of Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049. - Costa, M., Goldberger, A. L., & Peng, C. K. (2005). Multiscale entropy analysis of biological signals. Physical review E, 71(2), 021906. """ if tolerance == "default": tolerance = 0.2 * np.std(signal) # Initialize results storing complexity = {} # ------------------------------------------------------------------------------ # Shannon if shannon is True: try: complexity["Entropy_Shannon"] = entropy_shannon(signal) except: print( "NeuroKit warning: complexity(): Failed to compute Shannon entropy." ) complexity["Entropy_Shannon"] = np.nan # Sampen if sampen is True: try: complexity["Entropy_Sample"] = nolds.sampen(signal, emb_dim, tolerance, dist="chebychev", debug_plot=False, plot_file=None) except: print( "NeuroKit warning: complexity(): Failed to compute sample entropy (sampen)." ) complexity["Entropy_Sample"] = np.nan # multiscale if multiscale is True: try: complexity["Entropy_Multiscale"] = entropy_multiscale( signal, emb_dim, tolerance) except: print( "NeuroKit warning: complexity(): Failed to compute Multiscale Entropy (MSE)." ) complexity["Entropy_Multiscale"] = np.nan # spectral if spectral is True: try: complexity["Entropy_Spectral"] = entropy_spectral( signal, sampling_rate=sampling_rate, bands=bands) except: print( "NeuroKit warning: complexity(): Failed to compute Spectral Entropy." ) complexity["Entropy_Spectral"] = np.nan # SVD if svd is True: try: complexity["Entropy_SVD"] = entropy_svd(signal, tau=tau, emb_dim=emb_dim) except: print( "NeuroKit warning: complexity(): Failed to compute SVD Entropy." ) complexity["Entropy_SVD"] = np.nan # ------------------------------------------------------------------------------ # fractal_dim if correlation is True: try: complexity["Fractal_Dimension_Correlation"] = nolds.corr_dim( signal, emb_dim, rvals=None, fit="RANSAC", debug_plot=False, plot_file=None) except: print( "NeuroKit warning: complexity(): Failed to compute fractal_dim." ) complexity["Fractal_Dimension_Correlation"] = np.nan # higushi if higushi is True: try: complexity["Fractal_Dimension_Higushi"] = fd_higushi(signal, k_max) except: print("NeuroKit warning: complexity(): Failed to compute higushi.") complexity["Fractal_Dimension_Higushi"] = np.nan # petrosian if petrosian is True: try: complexity["Fractal_Dimension_Petrosian"] = fd_petrosian(signal) except: print( "NeuroKit warning: complexity(): Failed to compute petrosian.") complexity["Fractal_Dimension_Petrosian"] = np.nan # ------------------------------------------------------------------------------ # Fisher if fisher is True: try: complexity["Fisher_Information"] = fisher_info(signal, tau=tau, emb_dim=emb_dim) except: print( "NeuroKit warning: complexity(): Failed to compute Fisher Information." ) complexity["Fisher_Information"] = np.nan # Hurst if hurst is True: try: complexity["Hurst"] = nolds.hurst_rs(signal, nvals=None, fit="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute hurst.") complexity["Hurst"] = np.nan # DFA if dfa is True: try: complexity["DFA"] = nolds.dfa(signal, nvals=None, overlap=True, order=1, fit_trend="poly", fit_exp="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute dfa.") complexity["DFA"] = np.nan # Lyap_r if lyap_r is True: try: complexity["Lyapunov_R"] = nolds.lyap_r(signal, emb_dim=10, lag=None, min_tsep=None, tau=tau, min_vectors=20, trajectory_len=20, fit="RANSAC", debug_plot=False, plot_file=None) except: print("NeuroKit warning: complexity(): Failed to compute lyap_r.") complexity["Lyapunov_R"] = np.nan # Lyap_e if lyap_e is True: try: result = nolds.lyap_e(signal, emb_dim=10, matrix_dim=4, min_nb=None, min_tsep=0, tau=tau, debug_plot=False, plot_file=None) for i, value in enumerate(result): complexity["Lyapunov_E_" + str(i)] = value except: print("NeuroKit warning: complexity(): Failed to compute lyap_e.") complexity["Lyapunov_E"] = np.nan return (complexity)
for k in range(len(times)-1): t = times[k] h = (times[k+1]-times[k])/subdiv for j in range(subdiv): k1 = f(u,t)*h k2 = f(u+0.5*k1, t+0.5*h)*h u, t = u+k2, t+h uout[k+1]=u return uout def plotphase(A,B,C,D,E): def derivs(u,t): y,z = u; return np.array([ z, -A*y**3 + B*y - C*z + D*np.cos(E*t) ]) N=60 u0 = np.array([0.0, 0.0]) t = np.arange(0,300,2*np.pi/N); u = RK2(derivs, u0, t, subdiv = 10) plt.plot(u[:-2*N,0],u[:-2*N,1],'.--y', u[-2*N:,0],u[-2*N:,1], '.-b', lw=0.5, ms=2); plt.plot(u[::N,0],u[::N,1],'rs', ms=4); plt.grid(); plt.show() return u l = plotphase(1.0, 5.0, 0.02, 8.0, 0.5) import nolds qr = nolds.lyap_r(l[:,0]) qe = nolds.lyap_e(l[:,0]) import nolitsa.lyapunov as qn #qn = nolitsa.lyapunov qww = qn.mle(l) maximum= np.amax(qww)
""" # 100 dp sliding windows with 10 step jump between each window to save space window_size = 100 window_size = 2000 emb_dim = 4 rolling = rolling_window(df.logR_ask, window_size, 10) rolling = rolling_window(df_std.logR_ask, window_size, window_size) rolling = rolling_window(df_QN_laplace_std.values.transpose()[0], window_size, window_size) rolling_ns = rolling_window(df.ask, window_size, 10) rolling_ts = rolling_window(df.index, window_size, 10) df_ = pd.DataFrame(rolling) sw_1 = rolling[1] sw_1_ns = rolling[1] nolds.lyap_r(sw_1, emb_dim = emb_dim) nolds.lyap_e(sw_1, emb_dim = emb_dim) nolds.sampen(sw_1, emb_dim= emb_dim) nolds.hurst_rs(sw_1) nolds.corr_dim(sw_1, emb_dim=emb_dim) nolds.dfa(sw_1) ent.shannon_entropy(sw_1) # is this even valid? we do not have any p_i states i ALSO IGNORES TEMPORAL ORDER - Practical consideration of permutation entropy ent.sample_entropy(sw_1, sample_length = 10) #what is sample length? #ent.multiscale_entropy(sw_1, sample_length = 10, tolerance = 0.1*np.std(sw_1)) # what is tolerance? "Practical considerations of permutation entropy: A Tutorial review - how to choose parameters in permutation entropy" ent.permutation_entropy(sw_1, m=8, delay = emd_dim ) #Reference paper above #ent.composite_multiscale_entropy() lempel_ziv_complexity(sw_1) gzip_compress_ratio(sw_1_ns, 9)
def Feature_Exteraction(signal, fs): # Frequency Filter h = sig.firwin(101, [4, 50], width=None, window='hamming', pass_zero=False, scale=True, fs=fs) # myplottools.mfreqz(h, 1, fs) # bode plot ECG_filtered = sig.fftconvolve(h, signal) # R-Peak Detection rpeaks = RPeak.QRS_detection(ECG_filtered, fs) RR = [] for i in range(len(rpeaks) - 2): RR.append(rpeaks[i + 2] - rpeaks[i + 1]) seperated = [] N = 30 number = int(np.floor(len(RR) / N)) for i in range(number): seperated.append(RR[i:i + N]) ####### RR Features ####### mean_Feature = [] std_Feature = [] for i in range(len(seperated)): mean_Feature.append(np.mean(seperated[i])) std_Feature.append(np.std(seperated[i])) feature1 = np.mean(mean_Feature) feature2 = np.mean(std_Feature) ####### RR_Seperated Features ####### RR_seperated = np.zeros((number, N - 1)) for i in range(number): for j in range(N - 1): RR_seperated[i, j] = seperated[i][j + 1] - seperated[i][j] RR_seperated_mean_Feature = [] RR_seperated_std_Feature = [] for i in range(number): RR_seperated_mean_Feature.append(np.mean(RR_seperated[i, :])) RR_seperated_std_Feature.append(np.std(RR_seperated[i, :])) feature3 = np.mean(RR_seperated_std_Feature) feature4 = np.mean(RR_seperated_mean_Feature) ####### pNN Feature ####### pNN50_Feature = [] pNN10_Feature = [] pNN5_Feature = [] for i in range(number): pNN50_Feature.append([abs(x) for x in RR_seperated[i][:] if abs(x) > 50 * fs / 1000]) pNN10_Feature.append([abs(x) for x in RR_seperated[i][:] if abs(x) > 10 * fs / 1000]) pNN5_Feature.append([abs(x) for x in RR_seperated[i][:] if abs(x) > 5 * fs / 1000]) feature5 = len(pNN5_Feature) feature6 = len(pNN10_Feature) feature7 = len(pNN50_Feature) ####### Frequncy Energy Ratio Feature ####### h_high = sig.firwin(101, [0.15, 0.4], width=None, window='hamming', pass_zero=False, scale=True, fs=1) h_low = sig.firwin(101, [0.04, 0.15], width=None, window='hamming', pass_zero=False, scale=True, fs=1) RR_high = sig.fftconvolve(h_high, RR) RR_low = sig.fftconvolve(h_low, RR) RR_Energy_Ratio_Feature = np.sum(RR_high ** 2) / np.sum(RR_low ** 2) feature8 = RR_Energy_Ratio_Feature ####### Poincare Map Feature ####### x = np.array(RR[0:-1]) y = np.array(RR[1:]) SD1 = np.std(np.abs(x - y)) SD2 = np.std(np.abs(x - y + 2 * np.mean(RR))) SD_Ratio_Feature = SD1 / SD2 feature9 = SD_Ratio_Feature ####### Approximate Entropy Feature ####### ApEn_Feature = [] for i in range(number): ApEn_Feature.append(ApEn(np.array(seperated[i]), 2, 2 * np.std(seperated[i]))) feature10 = np.mean(ApEn_Feature) ####### Spectral Entropy Feature ####### RR_fft = np.abs(ffttools.fft(RR)) RR_fft[0] = 0 # Remove ProbDens = 2. / len(signal) * np.abs(RR_fft[0:len(RR_fft) // 2]) ProbDens = ProbDens / np.sum(ProbDens) SpEn_Feature = stats.entropy(ProbDens, base=2) Lya_Exp_Feature = nolds.lyap_r(np.array(RR), emb_dim=2, lag=1, min_tsep=10, tau=1) feature11 = Lya_Exp_Feature ####### Detrended Fluctuation Analysis Feature ####### DFA_Slope_Feature = nolds.dfa(np.array(RR)) feature12 = DFA_Slope_Feature ####### Sequential Trend Analysis Feature ####### DeltaRR = np.array(RR[1:]) - np.array(RR[0:-1]) PosSeqTrend_Feature = 0 NegSeqTrend_Feature = 0 for i in range(len(DeltaRR) - 1): if DeltaRR[i] > 0 and DeltaRR[i + 1] > 0: PosSeqTrend_Feature += 1 if DeltaRR[i] < 0 and DeltaRR[i + 1] < 0: NegSeqTrend_Feature += 1 feature13 = PosSeqTrend_Feature feature14 = NegSeqTrend_Feature ####### Feature Vector ####### Feature = [feature1, feature2, feature3, feature4, feature5, feature6, feature7, feature8, feature9, feature10, feature11, feature12, feature13, feature14] return Feature
import nolds import numpy as np rwalk = np.cumsum(np.random.random(1000)) print("fractal {}".format(nolds.dfa(rwalk))) print("Lup {}".format(nolds.lyap_e(rwalk))) print("Lup {}".format(nolds.lyap_r(rwalk))) print("Hurst {}".format(nolds.hurst_rs(rwalk)))
def GetEntropy(self, farry): resultlist = [] resultlist.append(nolds.sampen(farry)) resultlist.append(nolds.lyap_r(farry)) resultlist.append(nolds.hurst_rs(farry)) resultlist.append(nolds.dfa(farry))
def process_data(data, channelNames, srate): global f_labels, processed_channel_names # Default RQA parameters embedding = 10 # Embedding dimension tdelay = 2 # Time delay tau = 30 # threshold # Multiscaling is accomplished with a wavelet transform # Options for basis functions: ['haar', 'db', 'sym', 'coif', 'bior', 'rbio', 'dmey'] #wavelet = 'haar' wavelet = 'db4' mode = 'cpd' #mode = pywt.Modes.smooth # Simple array for entropy value ent = np.zeros(1) # Determine the number of levels required so that # the lowest level approximation is roughly the # delta band (freq range 0-4 Hz) if srate <= 128: levels = 4 elif srate <= 256: levels = 5 elif srate <= 512: # subsample srate = srate / 2.0 n = len(data[0]) data = data[0:, 0:n:2] levels = 5 elif srate <= 1024: srate = srate / 4.0 n = len(data[0]) data = data[0:, 0:n:4] levels = 5 nbands = levels wavelet_scale = {} f_limit = {} # The following function returns the highest level (ns) approximation # in dec[0], then details for level ns in dec[1]. Each successive # level of detail coefficients is in dec[2] through dec[ns]. # # level approximation details # 0 original signal -- # 1 - dec[ns] # 2 - dec[ns-1] # 3 - dec[ns-2] # i - dec[ns-i+1] # ns dec[0] dec[1] WRITE_RP_IMAGE_FILE = False # Print screen headers sys.stdout.write("%10s %6s " % ("Sensor", "Freq")) for f in all_features: sys.stdout.write(" %8s " % (f)) sys.stdout.write("\n") D = {} for c, ch in enumerate(channelNames): if ch in master_channel_list: processed_channel_names.append(ch) # Create a raw recurrence plot image for the original signal from this channel if WRITE_RP_IMAGE_FILE: rp_plot_name = filename + "_" + ch + "_" + "rp" + ".png" print(" write rp image file ", rp_plot_name) settings = Settings(data[c], embedding_dimension=embedding, time_delay=tdelay, neighbourhood=FixedRadius(0)) #computation = RQAComputation.create(settings, verbose=False) rp_computation = RecurrencePlotComputation.create( settings, verbose=False) result = rp_computation.run() ImageGenerator.save_recurrence_plot( result.recurrence_matrix_reverse, rp_plot_name) D[ch] = {} #-------------------------------------------------------------------- # Get the wavelet decomposition. See pywavelet (or pywt) documents. # Deconstruct the waveforms # S = An + Dn + Dn-1 + ... + D1 #-------------------------------------------------------------------- w = pywt.Wavelet(wavelet) m = np.mean(data[c]) a_orig = data[c] - m # the original signal, initially a = a_orig ca = [] # all the approximations cd = [] # all the details sqrt2 = np.sqrt(2.0) for i in range(nbands): (a, d) = pywt.dwt(a, w, mode) f = pow(sqrt2, i + 1) ca.append(a / f) cd.append(d / f) if 1 == 0: # this will build full reconstructed signals at every level rec_a = [] # reconstructed approximations rec_d = [] # reconstructed details for i, coeff in enumerate(ca): coeff_list = [coeff, None] + [None] * i rec_a.append(pywt.waverec(coeff_list, w)) for i, coeff in enumerate(cd): coeff_list = [None, coeff] + [None] * i rec_d.append(pywt.waverec(coeff_list, w)) else: rec_a = ca rec_d = cd # Use the details and last approximation to create all the power-of-2 freq bands f_labels = ['A0'] wavelet_scale = {} wavelet_scale['A0'] = 0 f_limit = {} f_limit['A0'] = srate / 2.0 fs = [srate] freqband = [a_orig] # A0 is the original signal N = len(a_orig) f = srate / 4.0 for j, r in enumerate(rec_a): freq_name = 'A' + str(j + 1) wavelet_scale[freq_name] = j + 1 f_limit[freq_name] = f f = f / 2.0 f_labels.append(freq_name) freqband.append(r[0:N]) # wavelet approximation for this band f = srate / 2.0 for j, r in enumerate(rec_d): freq_name = 'D' + str(j + 1) wavelet_scale[freq_name] = j + 1 f_limit[freq_name] = f f = f / 2.0 f_labels.append(freq_name) freqband.append(r[0:N]) # wavelet details for this band #-------------------------------------------------------------------- # Compute features on each of the frequency bands #-------------------------------------------------------------------- for f in all_features: D[ch][f] = {} #---------------------- # Feature set 1: Power for i, y in enumerate(freqband): v = bandpower(y) D[ch]["Power"][f_labels[i]] = v #---------------------- # Feature set 2: Sample Entropy, Hurst parameter, DFA, Lyapunov exponents D[ch]["SampE"][f_labels[i]] = nolds.sampen(y) try: D[ch]["hurst_rs"][f_labels[i]] = nolds.hurst_rs(y) except: D[ch]["hurst_rs"][f_labels[i]] = 0.0 try: D[ch]["dfa"][f_labels[i]] = nolds.dfa(y) except: D[ch]["dfa"][f_labels[i]] = 0.0 try: D[ch]["cd"][f_labels[i]] = nolds.corr_dim(y, embedding) except: D[ch]["cd"][f_labels[i]] = 0.0 try: #lyap = nolds.lyap_e(y, emb_dim= embedding) lyap0 = nolds.lyap_r(y, emb_dim=embedding) except: #lyap = [0.0, 0.0, 0.0] lyap0 = 0.0 D[ch]["lyap0"][f_labels[i]] = lyap0 #---------------------- # Feature set 3: Recurrence Quantitative Analysis (RQA) # This routine seems to be incredibly slow and may need improvement rqa_features = [ "RR", "DET", "LAM", "L_entr", "L_max", "L_mean", "TT" ] pyRQA_names = ['recurrence_rate', 'determinism', 'laminarity', 'entropy_diagonal_lines', \ 'longest_diagonal_line','average_diagonal_line', 'trapping_time' ] # First check to see if RQA values are needed at all compute_RQA = False for r in rqa_features: if r in all_features: compute_RQA = True break if compute_RQA: #for i, y in enumerate(freqband): settings = Settings( y, embedding_dimension=embedding, time_delay=tdelay, neighbourhood=FixedRadius(tau) #similarity_measure=EuclideanMetric, #theiler_corrector=1, #min_diagonal_line_length=2, #min_vertical_line_length=2, #min_white_vertical_line_length=2) ) computation = RQAComputation.create(settings, verbose=False) result = computation.run() # We have to pull out each value w = f_labels[i] D[ch]["RR"][w] = result.recurrence_rate D[ch]["DET"][w] = result.determinism D[ch]["LAM"][w] = result.laminarity D[ch]["L_entr"][w] = result.entropy_diagonal_lines D[ch]["L_max"][w] = result.longest_diagonal_line D[ch]["L_mean"][w] = result.average_diagonal_line D[ch]["TT"][w] = result.trapping_time # Write results from first channel to the screen, to give # visual feedback that the code is running w = f_labels[i] sys.stdout.write("%10s %6s " % (ch, w)) for dyn_inv in all_features: # D[ch].keys(): v = D[ch][dyn_inv][w] sys.stdout.write(" %8.3f " % (v)) sys.stdout.write("\n") return D, srate, wavelet_scale, f_limit
elif column == 8: emb_dim = 5 min_neighbors = 4 elif column == 9: emb_dim = 5 min_neighbors = 5 elif column == 10: emb_dim = 7 min_neighbors = 2 elif column == 11: emb_dim = 7 min_neighbors = 3 elif column == 12: emb_dim = 7 min_neighbors = 4 elif column == 13: emb_dim = 7 min_neighbors = 5 # Do the calculation and put it on a specific cell r = nolds.lyap_r(x, emb_dim=emb_dim, lag=lag, min_tsep=None, min_neighbors=min_neighbors, trajectory_len=trajectory_len) active.cell(row=row, column=column).value = r # Save workbook to write workbook.save("../data/chaos_data/results_presentation.xlsx") workbook.close()
def eeg_fractal_dim(epochs, entropy=True, hurst=True, dfa=False, lyap_r=False, lyap_e=False): """ """ clock = Time() df = epochs.to_data_frame(index=["epoch", "time", "condition"]) # Separate indexes index = df.index.tolist() epochs = [] times = [] events = [] for i in index: epochs.append(i[0]) times.append(i[1]) events.append(i[2]) data = {} if entropy == True: data["Entropy"] = {} if hurst == True: data["Hurst"] = {} if dfa == True: data["DFA"] = {} if lyap_r == True: data["Lyapunov_R"] = {} if lyap_e == True: data["Lyapunov_E"] = {} clock.reset() for epoch in set(epochs): subset = df.loc[epoch] if entropy == True: data["Entropy"][epoch] = [] if hurst == True: data["Hurst"][epoch] = [] if dfa == True: data["DFA"][epoch] = [] if lyap_r == True: data["Lyapunov_R"][epoch] = [] if lyap_e == True: data["Lyapunov_E"][epoch] = [] for channel in subset: if entropy == True: data["Entropy"][epoch].append(nolds.sampen(subset[channel])) if hurst == True: data["Hurst"][epoch].append(nolds.hurst_rs(subset[channel])) if dfa == True: data["DFA"][epoch].append(nolds.dfa(subset[channel])) if lyap_r == True: data["Lyapunov_R"][epoch].append(nolds.lyap_r(subset[channel])) if lyap_e == True: data["Lyapunov_E"][epoch].append(nolds.lyap_e(subset[channel])) if entropy == True: data["Entropy"][epoch] = np.mean(data["Entropy"][epoch]) if hurst == True: data["Hurst"][epoch] = np.mean(data["Hurst"][epoch]) if dfa == True: data["DFA"][epoch] = np.mean(data["DFA"][epoch]) if lyap_r == True: data["Lyapunov_R"][epoch] = np.mean(data["Lyapunov_R"][epoch]) if lyap_e == True: data["Lyapunov_E"][epoch] = np.mean(data["Lyapunov_E"][epoch]) time = clock.get(reset=False)/1000 time = time/(epoch+1) time = (time * (len(set(epochs))-epoch))/60 print(str(round((epoch+1)/len(set(epochs))*100,2)) + "% complete, remaining time: " + str(round(time, 2)) + 'min') df = pd.DataFrame.from_dict(data) list_events = [] for i in range(len(events)): list_events.append(events[i] + "_" + str(epochs[i])) list_events = list_events[np.where(find_following_duplicates(list_events))] list_events = [re.sub('_\d+', '', i) for i in list_events] df["Epoch"] = list_events return(df)
import nolds import numpy as np data = np.loadtxt("Rossler_reservoir_states_node100_no1.txt")[:40000] lyapunov_max = np.zeros((5,1)) for i in range(4): lyapunov_max[i] = np.array([i+1, nolds.lyap_r(data = data[:10000*(i+1)], emb_dim =5)]) np.savetxt("Rossler_reservoir_maximum_lyapunov_emb4_node1.txt", lyapunov_max) print("maximun lyapunov exponent of Rossler is ") print(lyapunov_max)