def calc(images, adversarial_images, measure='sampen'):
    """Calculate and returns the nonlinear measure of both original and adversarial images.
    
    Set measure to what you want to calculate.
    'sampen'  :  Sample entropy
    'frac'    :  Correlation/Fractal dimension
    'hurst'   :  Hurst exponent
    'lyapr'   :  Largest Lyapunov exponent using Rosenstein et al. methods
    
    Docs      :  https://cschoel.github.io/nolds/
    
    If the adversarial image is found to be NaN, we output 0.
    The reason some adversarial iamges are NaN is because
    adversarial generation were unsuccessful for them.
    There is a maximum iteration one can set for adversarial
    generation, the program outputs NaN when the max iteration
    is reached before an adversarial perturbation is found.
    
    For more info look at "adversarial_gen.ipynb"
    """

    imageCalc_data = []
    advimageCalc_data = []

    for i in tqdm(range(len(images))):
        image = images[i]
        image = image.flatten()
        advimage = adversarial_images[i]
        advimage = advimage.flatten()

        if measure == 'sampen':
            imageCalc_data.append(nolds.sampen(image))
            if np.isnan(np.sum(advimage)):
                advimageCalc_data.append(0)
            else:
                advimageCalc_data.append(nolds.samepn(advimage))

        elif measure == 'frac':
            imageCalc_data.append(nolds.corr_dim(image, 1))
            if np.isnan(np.sum(advimage)):
                advimageCalc_data.append(0)
            else:
                advimageCalc_data.append(nolds.corr_dim(advimage, 1))

        elif measure == 'hurst':
            imageCalc_data.append(nolds.hurst_rs(image))
            if np.isnan(np.sum(advimage)):
                advimageCalc_data.append(0)
            else:
                advimageCalc_data.append(nolds.hurst_rs(advimage))

        elif measure == 'lyapr':
            imageCalc_data.append(nolds.lyap_r(image))
            if np.isnan(np.sum(advimage)):
                advimageCalc_data.append(0)
            else:
                advimageCalc_data.append(nolds.lyap_r(advimage))

    return imageCalc_data, advimageCalc_data
コード例 #2
0
def hurst_exp(timeseries, nvals=None, nodata=-9999):
    """Computes the Hurst Exponent (HE) by a standard \
    rescaled range (R/S) approach.
    HE is a self-similarity measure that assesses long-range \
    dependence in a time series. It can be used to determine whether the \
    time series is more, less, or equally likely to increase if it has \
    increased in previous steps.

    :param timeseries: Time series.
    :type timeseries: numpy.ndarray
    
    :param nvals: Sizes of subseries to use.
    :type nvals: int
    
    :param nodata: nodata of the time series. Default is -9999.
    :type nodata: int

    :return hurst: The Hurst Expoent (HE).

    .. Note::

        This function was adapted from the package Nolds. Due to time series \
        characteristcs we use by default the 'RANSAC' \
        fitting method as it is more robust to outliers.
        For more details regarding the hurst implementation, check Nolds \
        documentation page.
    """
    import nolds
    ts = fixseries(timeseries, nodata)

    return truncate(nolds.hurst_rs(ts, nvals))
コード例 #3
0
ファイル: features.py プロジェクト: AntoinePassemiers/NIH-EEG
 def process(self, signals):
     features = np.empty(self.__len__(), dtype=np.float64)
     for i in range(self.__len__()):
         has_nan = checkDropOutsByChannel(signals[:, i])
         features[i] = nolds.hurst_rs(signals[:,
                                              i]) if not has_nan else np.nan
     return features
コード例 #4
0
ファイル: global_stats.py プロジェクト: FelSiq/ts-pymfe-tests
    def ft_exp_hurst(cls, ts: np.ndarray) -> float:
        """Estimation of the Hurst exponent.

        Check `nolds.hurst_rs` documentation for a clear explanation about
        the underlying function.

        Parameters
        ----------
        ts : :obj:`np.ndarray`
            One-dimensional time-series values.

        Returns
        -------
        float
            Estimation of the hurst exponent.

        References
        ----------
        .. [1] H. E. Hurst, The problem of long-term storage in reservoirs,
            International Association of Scientific Hydrology. Bulletin, vol.
            1, no. 3, pp. 13–27, 1956.
        .. [2] H. E. Hurst, A suggested statistical model of some time series
            which occur in nature, Nature, vol. 180, p. 494, 1957.
        .. [3] R. Weron, Estimating long-range dependence: finite sample
            properties and confidence intervals, Physica A: Statistical
            Mechanics and its Applications, vol. 312, no. 1, pp. 285–299,
            2002.
        .. [4] "nolds" Python package: https://pypi.org/project/nolds/
        """
        return nolds.hurst_rs(data=ts)
コード例 #5
0
def get_features(sound_lib,sr,sound_scipy,sound_praat):
    features = []
    
    # Features extracted using parselmouth and pratt
    f0min = 75; f0max = 500;                                                            # Limits of human speach in Hz
    pitch = call(sound_praat, "To Pitch", 0.0, f0min, f0max)                            # create a praat pitch object
    harmonicity = call(sound_praat, "To Harmonicity (cc)", 0.01, 75, 0.1, 1.0)          # create a praat harmonicity object
    pointProcess = call(sound_praat, "To PointProcess (periodic, cc)", f0min, f0max)    # create a praat pointProcess object
    unit = "Hertz"
    
    features.append(call(pitch, "Get mean", 0, 0, unit))                                                        # F0 - Central Frequency
    features.append(call(pitch, "Get standard deviation", 0 ,0, unit))                                          # F0 - std
    features.append(call(pointProcess, "Get jitter (local)", 0, 0, 0.0001, 0.02, 1.3))                          # Relative jitter 
    features.append(call(pointProcess, "Get jitter (local, absolute)", 0, 0, 0.0001, 0.02, 1.3))                # Absolute jitter
    features.append(call(pointProcess, "Get jitter (rap)", 0, 0, 0.0001, 0.02, 1.3))                            # Relative average perturbation
    features.append(call(pointProcess, "Get jitter (ppq5)", 0, 0, 0.0001, 0.02, 1.3))                           # 5-point period pertubation quotient ( ppq5 )
    features.append(call(pointProcess, "Get jitter (ddp)", 0, 0, 0.0001, 0.02, 1.3))                            # Difference of differences of periods ( ddp )
    features.append(call([sound_praat, pointProcess], "Get shimmer (local)" , 0, 0, 0.0001, 0.02, 1.3, 1.6))    # Relative Shimmer
    features.append(call([sound_praat, pointProcess], "Get shimmer (local_dB)", 0, 0, 0.0001, 0.02, 1.3, 1.6))  # Relative Shimmer dB
    features.append(call([sound_praat, pointProcess], "Get shimmer (apq3)", 0, 0, 0.0001, 0.02, 1.3, 1.6))      # Shimmer (apq3)
    features.append(call([sound_praat, pointProcess], "Get shimmer (apq5)", 0, 0, 0.0001, 0.02, 1.3, 1.6))      # Shimmer (apq5)
    features.append(call([sound_praat, pointProcess], "Get shimmer (apq11)", 0, 0, 0.0001, 0.02, 1.3, 1.6))     # Shimmer (apq11)
    features.append(call([sound_praat, pointProcess], "Get shimmer (dda)", 0, 0, 0.0001, 0.02, 1.3, 1.6))       # Shimmer (dda)
    features.append(call(harmonicity, "Get mean", 0, 0))                                                        # Harmonic Noise Ratio 
    
    
    # Features extracted using librosa
    features.append(librosa.feature.spectral_flatness(sound_lib))           # Spectral Flatness
    features.append(librosa.feature.rms(sound_lib))                         # Volume
    features.append(librosa.feature.zero_crossing_rate(sound_lib))          # Zero Crossing Rate
    features.append(librosa.feature.spectral_centroid(sound_lib,sr))        # Spectral Centroind
    features.append(librosa.feature.spectral_bandwidth(sound_lib,sr))       # Spectral Bandwidth
    features.append(librosa.feature.spectral_contrast(sound_lib,sr))        # Spectral Contrast
    features.append(librosa.feature.spectral_rolloff(sound_lib,sr))         # Spectral Rolloff
    features.append(librosa.feature.mfcc(sound_lib,sr))                     # Mel-Frequency Cepstral Coefficients - MFCC 
    features.append(librosa.feature.tonnetz(sound_lib,sr))                  # Tonnetz
    features.append(librosa.feature.chroma_stft(sound_lib,sr))              # Spectrogram
    features.append(librosa.feature.chroma_cqt(sound_lib,sr))               # Constant-Q Chromagram
    features.append(librosa.feature.chroma_cens(sound_lib,sr))              # Chroma Energy Normalized
    
    # tempogram feature might be useless as it is too redundant - un comment it if you find it usefull
    #features.append(librosa.feature.tempogram(sound_lib,sr))               # Tempogram: local autocorrelation of the onset strength envelope 
    
    # Features extracted using scipy
    features.append(scipy.stats.skew(sound_lib))                            # Skewness
    entropy = get_entropy(sound_scipy)
    features.append(entropy[0])                                             # Entropy Left Channel
    features.append(entropy[1])                                             # Entropy Right Channel
    
    # Features extracted by nolds (Chaos/Dynamical Systems Theory) - comment this if you didnt download nolds
    features.append(nolds.hurst_rs(sound_lib))                              # The hurst exponent is a measure of the “long-term memory” of a time series
    
    # Please dont use this even if you downloaded nolds
    # The following features require extremely long computation time and dont run by normal means, please dont use them to save yourself from having a headache. #I cant gurantite they will even coverage (dependents on the leangth of the audio file)#
    #features.append(nolds.dfa(sound_lib))                                  # Performs a detrended fluctuation analysis (DFA) on the given data
    #features.append(nolds.lyap_r(sound_lib))                               # Estimates the largest Lyapunov exponent using the algorithm of Rosenstein
    #features.append(nolds.lyap_e(sound_lib))                               # Estimates the Lyapunov exponents for the given data using the algorithm of Eckmann
    #features.append(nolds.corr_dim(sound_lib,1))                           # Calculates the correlation dimension with the Grassberger-Procaccia algorithm
    
    return features
コード例 #6
0
def hurst(x: np.ndarray) -> float:
    """ Hurst exponent
    :param x: a 1-d numeric vector
    :return: numeric scalar
    """
    out = nolds.hurst_rs(x)

    return out
コード例 #7
0
ファイル: Hurst.py プロジェクト: Ernestyj/PyProj
def computeMovingHurst(dataSeries, window=233):
    dataLen = len(dataSeries)
    if dataLen<window:
        print 'window length is bigger than data length'
        return
    hursts = np.zeros(dataLen)
    hursts[0:window] = np.NaN
    for i in range(dataLen-window):
        hursts[window+i] = nolds.hurst_rs(dataSeries[i:i+window])
    return pd.Series(hursts, index=dataSeries.index)
コード例 #8
0
def computeMovingHurst(dataSeries, window=233):
    dataLen = len(dataSeries)
    if dataLen < window:
        print 'window length is bigger than data length'
        return
    hursts = np.zeros(dataLen)
    hursts[0:window] = np.NaN
    for i in range(dataLen - window):
        hursts[window + i] = nolds.hurst_rs(dataSeries[i:i + window])
    return pd.Series(hursts, index=dataSeries.index)
コード例 #9
0
def sparate(a_column):
    a = int(len(a_column) / 10)
    b = []
    for i in range(10):
        c = []
        for j in range(a):
            c.append(a_column[i * a + j])
        d = nolds.hurst_rs(c)
        b.append(d)
    return b
コード例 #10
0
    def hurst(self, xy=False):
        '''
		Returns the Hurst exponent of the data
		TODO Only seems to work one dimensionally
		if xy=True function returns an array for the x and the y value of the exponent, otherwise it is averaged across both dimensions

		'''
        if not self.cleaned:
            self.removeNoise()

        hurstExp = np.array(
            [hurst_rs(self.points[:, 0]),
             hurst_rs(self.points[:, 1])])

        self.hurstExp = np.mean(hurstExp)

        if xy:
            return hurstExp
        else:
            return self.hurstExp
コード例 #11
0
def load_feature(s):
    rw = [lwalk(i) for i in s]
    sd = [np.std(i) for i in rw]
    dfa = [nolds.dfa(i) for i in rw]
    hurst = [nolds.hurst_rs(i) for i in rw]
    sampen = [nolds.sampen(i) for i in rw]
    ac = [autocorrelation(i, 100) for i in rw]
    rvntsl = [ratio_value_number_to_time_series_length(i) for i in rw]
    ac_200 = [autocorrelation(i, 200) for i in rw]
    ac_300 = [autocorrelation(i, 300) for i in rw]
    lyapr = [nolds.lyap_r(i) for i in rw]
    inpv = pd.DataFrame(
        [sd, dfa, hurst, sampen, ac, rvntsl, ac_200, ac_300, lyapr])
    return inpv.transpose()
コード例 #12
0
ファイル: fractal.py プロジェクト: M3nin0/stmetrics
def hurst_exp(timeseries):
    """Hurst exponent is a self-similarity measure that assess long-range \
    dependence in a time series. The hurst exponent is a measure of the \
    “long-term memory” of a time series.
    It can be used to determine whether the time series is more, less, or \
    equally likely to increase if it has increased in previous steps.

    :param timeseries: Your time series.
    :type timeseries: numpy.ndarray

    :return hurst: Hurst expoent.

    .. Note::
        This function was adapted from the package Nolds.
    """
    ts = utils.fixseries(timeseries)

    return utils.truncate(nolds.hurst_rs(ts))
コード例 #13
0
def hurst_exp(series):
    """
    Hurst exponent.
    Hurst Exponent is a self-similarity measure that assess long-range dependence in a time series.
    
    Keyword arguments:
    series : numpy.array
        One dimensional time series.
    Returns
    -------
    hurst : float
        Hurst exponent.
    
    The hurst exponent is a measure of the “long-term memory” of a time series. 
    It can be used to determine whether the time series is more, less, or equally likely to increase if it has increased in previous steps. 
    This property makes the Hurst exponent especially interesting for the analysis of stock data.
    """

    h = nolds.hurst_rs(series)
    return h
コード例 #14
0
ファイル: stats.py プロジェクト: jamesprinc3/data-analysis
    def get_hurst_exponent_over_time(trades, st, et, step_minutes,
                                     window_minutes):
        num_steps = ((et - st).total_seconds() / 60) / step_minutes
        hurst_exps = []
        times = []
        for i in range(0, int(num_steps)):
            iter_st = st + datetime.timedelta(minutes=step_minutes * i)
            iter_et = iter_st + datetime.timedelta(minutes=window_minutes)

            window = DataSplitter.get_between(trades, iter_st, iter_et)
            prices = np.asarray(window['price'].dropna(), dtype=np.float32)

            if len(prices) == 0:
                continue

            hurst_exp = nolds.hurst_rs(prices)
            # hurst_exp = nolds.dfa(prices) - 1
            print(hurst_exp)
            if 0 < hurst_exp < 1:
                hurst_exps.append(hurst_exp)
                times.append(iter_st)
            else:
                pass
        return times, hurst_exps
コード例 #15
0
def complexity(signal,
               sampling_rate=1000,
               shannon=True,
               sampen=True,
               multiscale=True,
               spectral=True,
               svd=True,
               correlation=True,
               higushi=True,
               petrosian=True,
               fisher=True,
               hurst=True,
               dfa=True,
               lyap_r=False,
               lyap_e=False,
               emb_dim=2,
               tolerance="default",
               k_max=8,
               bands=None,
               tau=1):
    """
    Computes several chaos/complexity indices of a signal (including entropy, fractal dimensions, Hurst and Lyapunov exponent etc.).

    Parameters
    ----------
    signal : list or array
        List or array of values.
    sampling_rate : int
        Sampling rate (samples/second).
    shannon : bool
        Computes Shannon entropy.
    sampen : bool
        Computes approximate sample entropy (sampen) using Chebychev and Euclidean distances.
    multiscale : bool
        Computes multiscale entropy (MSE). Note that it uses the 'euclidean' distance.
    spectral : bool
        Computes Spectral Entropy.
    svd : bool
        Computes the Singular Value Decomposition (SVD) entropy.
    correlation : bool
        Computes the fractal (correlation) dimension.
    higushi : bool
        Computes the Higushi fractal dimension.
    petrosian : bool
        Computes the Petrosian fractal dimension.
    fisher : bool
        Computes the Fisher Information.
    hurst : bool
        Computes the Hurst exponent.
    dfa : bool
        Computes DFA.
    lyap_r : bool
        Computes Positive Lyapunov exponents (Rosenstein et al. (1993) method).
    lyap_e : bool
        Computes Positive Lyapunov exponents (Eckmann et al. (1986) method).
    emb_dim : int
        The embedding dimension (*m*, the length of vectors to compare). Used in sampen, fisher, svd and fractal_dim.
    tolerance : float
        Distance *r* threshold for two template vectors to be considered equal. Default is 0.2*std(signal). Used in sampen and fractal_dim.
    k_max : int
        The maximal value of k used for Higushi fractal dimension. The point at which the FD plateaus is considered a saturation point and that kmax value should be selected (Gómez, 2009). Some studies use a value of 8 or 16 for ECG signal and other 48 for MEG.
    bands : int
        Used for spectral density. A list of numbers delimiting the bins of the frequency bands. If None the entropy is computed over the whole range of the DFT (from 0 to `f_s/2`).
    tau : int
        The delay. Used for fisher, svd, lyap_e and lyap_r.

    Returns
    ----------
    complexity : dict
        Dict containing values for each indices.


    Example
    ----------
    >>> import neurokit as nk
    >>> import numpy as np
    >>>
    >>> signal = np.sin(np.log(np.random.sample(666)))
    >>> complexity = nk.complexity(signal)

    Notes
    ----------
    *Details*

    - **Entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content.

      - *Shannon entropy*: Shannon entropy was introduced by Claude E. Shannon in his 1948 paper "A Mathematical Theory of Communication". Shannon entropy provides an absolute limit on the best possible average length of lossless encoding or compression of an information source.
      - *Sample entropy (sampen)*: Measures the complexity of a time-series, based on approximate entropy. The sample entropy of a time series is defined as the negative natural logarithm of the conditional probability that two sequences similar for emb_dim points remain similar at the next point, excluding self-matches. A lower value for the sample entropy therefore corresponds to a higher probability indicating more self-similarity.
      - *Multiscale entropy*: Multiscale entropy (MSE) analysis is a new method of measuring the complexity of finite length time series.
      - *SVD Entropy*: Indicator of how many vectors are needed for an adequate explanation of the data set. Measures feature-richness in the sense that the higher the entropy of the set of SVD weights, the more orthogonal vectors are required to adequately explain it.

    - **fractal dimension**: The term *fractal* was first introduced by Mandelbrot in 1983. A fractal is a set of points that when looked at smaller scales, resembles the whole set. The concept of fractak dimension (FD) originates from fractal geometry. In traditional geometry, the topological or Euclidean dimension of an object is known as the number of directions each differential of the object occupies in space. This definition of dimension works well for geometrical objects whose level of detail, complexity or *space-filling* is the same. However, when considering two fractals of the same topological dimension, their level of *space-filling* is different, and that information is not given by the topological dimension. The FD emerges to provide a measure of how much space an object occupies between Euclidean dimensions. The FD of a waveform represents a powerful tool for transient detection. This feature has been used in the analysis of ECG and EEG to identify and distinguish specific states of physiologic function. Many algorithms are available to determine the FD of the waveform (Acharya, 2005).

      - *Correlation*: A measure of the fractal (or correlation) dimension of a time series which is also related to complexity. The correlation dimension is a characteristic measure that can be used to describe the geometry of chaotic attractors. It is defined using the correlation sum C(r) which is the fraction of pairs of points X_i in the phase space whose distance is smaller than r.
      - *Higushi*: Higuchi proposed in 1988 an efficient algorithm for measuring the FD of discrete time sequences. As the reconstruction of the attractor phase space is not necessary, this algorithm is simpler and faster than D2 and other classical measures derived from chaos theory. FD can be used to quantify the complexity and self-similarity of a signal. HFD has already been used to analyse the complexity of brain recordings and other biological signals.
      - *Petrosian Fractal Dimension*: Provide a fast computation of the FD of a signal by translating the series into a binary sequence.

    - **Other**:

      - *Fisher Information*:  A way of measuring the amount of information that an observable random variable X carries about an unknown parameter θ of a distribution that models X. Formally, it is the variance of the score, or the expected value of the observed information.
      - *Hurst*: The Hurst exponent is a measure of the "long-term memory" of a time series. It can be used to determine whether the time series is more, less, or equally likely to increase if it has increased in previous steps. This property makes the Hurst exponent especially interesting for the analysis of stock data.
      - *DFA*: DFA measures the Hurst parameter H, which is very similar to the Hurst exponent. The main difference is that DFA can be used for non-stationary processes (whose mean and/or variance change over time).
      - *Lyap*: Positive Lyapunov exponents indicate chaos and unpredictability. Provides the algorithm of Rosenstein et al. (1993) to estimate the largest Lyapunov exponent and the algorithm of Eckmann et al. (1986) to estimate the whole spectrum of Lyapunov exponents.

    *Authors*

    - Dominique Makowski (https://github.com/DominiqueMakowski)
    - Christopher Schölzel (https://github.com/CSchoel)
    - tjugo (https://github.com/nikdon)
    - Quentin Geissmann (https://github.com/qgeissmann)

    *Dependencies*

    - nolds
    - numpy

    *See Also*

    - nolds package: https://github.com/CSchoel/nolds
    - pyEntropy package: https://github.com/nikdon/pyEntropy
    - pyrem package: https://github.com/gilestrolab/pyrem

    References
    -----------
    - Accardo, A., Affinito, M., Carrozzi, M., & Bouquet, F. (1997). Use of the fractal dimension for the analysis of electroencephalographic time series. Biological cybernetics, 77(5), 339-350.
    - Pierzchalski, M. Application of Higuchi Fractal Dimension in Analysis of Heart Rate Variability with Artificial and Natural Noise. Recent Advances in Systems Science.
    - Acharya, R., Bhat, P. S., Kannathal, N., Rao, A., & Lim, C. M. (2005). Analysis of cardiac health using fractal dimension and wavelet transformation. ITBM-RBM, 26(2), 133-139.
    - Richman, J. S., & Moorman, J. R. (2000). Physiological time-series analysis using approximate entropy and sample entropy. American Journal of Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049.
    - Costa, M., Goldberger, A. L., & Peng, C. K. (2005). Multiscale entropy analysis of biological signals. Physical review E, 71(2), 021906.
    """

    if tolerance == "default":
        tolerance = 0.2 * np.std(signal)

    # Initialize results storing
    complexity = {}

    # ------------------------------------------------------------------------------

    # Shannon
    if shannon is True:
        try:
            complexity["Entropy_Shannon"] = entropy_shannon(signal)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute Shannon entropy."
            )
            complexity["Entropy_Shannon"] = np.nan

    # Sampen
    if sampen is True:
        try:
            complexity["Entropy_Sample"] = nolds.sampen(signal,
                                                        emb_dim,
                                                        tolerance,
                                                        dist="chebychev",
                                                        debug_plot=False,
                                                        plot_file=None)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute sample entropy (sampen)."
            )
            complexity["Entropy_Sample"] = np.nan

    # multiscale
    if multiscale is True:
        try:
            complexity["Entropy_Multiscale"] = entropy_multiscale(
                signal, emb_dim, tolerance)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute Multiscale Entropy (MSE)."
            )
            complexity["Entropy_Multiscale"] = np.nan

    # spectral
    if spectral is True:
        try:
            complexity["Entropy_Spectral"] = entropy_spectral(
                signal, sampling_rate=sampling_rate, bands=bands)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute Spectral Entropy."
            )
            complexity["Entropy_Spectral"] = np.nan

    # SVD
    if svd is True:
        try:
            complexity["Entropy_SVD"] = entropy_svd(signal,
                                                    tau=tau,
                                                    emb_dim=emb_dim)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute SVD Entropy."
            )
            complexity["Entropy_SVD"] = np.nan

# ------------------------------------------------------------------------------
# fractal_dim
    if correlation is True:
        try:
            complexity["Fractal_Dimension_Correlation"] = nolds.corr_dim(
                signal,
                emb_dim,
                rvals=None,
                fit="RANSAC",
                debug_plot=False,
                plot_file=None)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute fractal_dim."
            )
            complexity["Fractal_Dimension_Correlation"] = np.nan

    # higushi
    if higushi is True:
        try:
            complexity["Fractal_Dimension_Higushi"] = fd_higushi(signal, k_max)
        except:
            print("NeuroKit warning: complexity(): Failed to compute higushi.")
            complexity["Fractal_Dimension_Higushi"] = np.nan

    # petrosian
    if petrosian is True:
        try:
            complexity["Fractal_Dimension_Petrosian"] = fd_petrosian(signal)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute petrosian.")
            complexity["Fractal_Dimension_Petrosian"] = np.nan

# ------------------------------------------------------------------------------

# Fisher
    if fisher is True:
        try:
            complexity["Fisher_Information"] = fisher_info(signal,
                                                           tau=tau,
                                                           emb_dim=emb_dim)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute Fisher Information."
            )
            complexity["Fisher_Information"] = np.nan

    # Hurst
    if hurst is True:
        try:
            complexity["Hurst"] = nolds.hurst_rs(signal,
                                                 nvals=None,
                                                 fit="RANSAC",
                                                 debug_plot=False,
                                                 plot_file=None)
        except:
            print("NeuroKit warning: complexity(): Failed to compute hurst.")
            complexity["Hurst"] = np.nan

    # DFA
    if dfa is True:
        try:
            complexity["DFA"] = nolds.dfa(signal,
                                          nvals=None,
                                          overlap=True,
                                          order=1,
                                          fit_trend="poly",
                                          fit_exp="RANSAC",
                                          debug_plot=False,
                                          plot_file=None)
        except:
            print("NeuroKit warning: complexity(): Failed to compute dfa.")
            complexity["DFA"] = np.nan

    # Lyap_r
    if lyap_r is True:
        try:
            complexity["Lyapunov_R"] = nolds.lyap_r(signal,
                                                    emb_dim=10,
                                                    lag=None,
                                                    min_tsep=None,
                                                    tau=tau,
                                                    min_vectors=20,
                                                    trajectory_len=20,
                                                    fit="RANSAC",
                                                    debug_plot=False,
                                                    plot_file=None)
        except:
            print("NeuroKit warning: complexity(): Failed to compute lyap_r.")
            complexity["Lyapunov_R"] = np.nan

    # Lyap_e
    if lyap_e is True:
        try:
            result = nolds.lyap_e(signal,
                                  emb_dim=10,
                                  matrix_dim=4,
                                  min_nb=None,
                                  min_tsep=0,
                                  tau=tau,
                                  debug_plot=False,
                                  plot_file=None)
            for i, value in enumerate(result):
                complexity["Lyapunov_E_" + str(i)] = value
        except:
            print("NeuroKit warning: complexity(): Failed to compute lyap_e.")
            complexity["Lyapunov_E"] = np.nan

    return (complexity)
コード例 #16
0
window_size = 100
window_size = 2000
emb_dim = 4
rolling = rolling_window(df.logR_ask, window_size, 10)
rolling = rolling_window(df_std.logR_ask, window_size, window_size)
rolling = rolling_window(df_QN_laplace_std.values.transpose()[0], window_size, window_size)
rolling_ns = rolling_window(df.ask, window_size, 10)
rolling_ts = rolling_window(df.index, window_size, 10)
df_ = pd.DataFrame(rolling)

sw_1 = rolling[1]
sw_1_ns = rolling[1]
nolds.lyap_r(sw_1, emb_dim = emb_dim)
nolds.lyap_e(sw_1, emb_dim = emb_dim)
nolds.sampen(sw_1, emb_dim= emb_dim)
nolds.hurst_rs(sw_1)
nolds.corr_dim(sw_1, emb_dim=emb_dim)
nolds.dfa(sw_1)
ent.shannon_entropy(sw_1) # is this even valid? we do not have any p_i states i ALSO IGNORES TEMPORAL ORDER - Practical consideration of permutation entropy
ent.sample_entropy(sw_1, sample_length = 10) #what is sample length?
#ent.multiscale_entropy(sw_1, sample_length = 10, tolerance = 0.1*np.std(sw_1)) # what is tolerance?

                      "Practical considerations of permutation entropy: A Tutorial review - how to choose parameters in permutation entropy"
ent.permutation_entropy(sw_1, m=8, delay = emd_dim )  #Reference paper above 
#ent.composite_multiscale_entropy()
lempel_ziv_complexity(sw_1)
gzip_compress_ratio(sw_1_ns, 9)


#https://www.researchgate.net/post/How_can_we_find_out_which_value_of_embedding_dimensions_is_more_accurate
#when choosing emb_dim for Takens, each dimension should have at least 10 dp ==> 10^1 == 1D, 10^2 == 2D, ..., 10^6 == 6D 
コード例 #17
0
 def hurst(self, S):
     if S.ndim > 1:
         return np.array([self.hurst(signal) for signal in S])
     else:
         return nolds.hurst_rs(S)
コード例 #18
0
def hurst(
    df1, df2
):  #calculating the Hurst exponent and plotiing R/S statistic on log-log plot
    print('Hurst exponent: ', nolds.hurst_rs(df1, debug_plot='True'))
    print('Hurst exponent: ', nolds.hurst_rs(df2, debug_plot='True'))
コード例 #19
0
%matplotlib inline

from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score

data = pd.read_csv('dataset-2007.csv', usecols=['wind direction at 100m (deg)', 'wind speed at 100m (m/s)', 'air temperature at 2m (K)', 'surface air pressure (Pa)', 'density at hub height (kg/m^3)'], skiprows=3)
data.columns = ['direction', 'speed', 'temp', 'pressure', 'density']
data.head(3)

D=data['speed'].values
T=D[0:105120:12]
F=T[0:2000]

h = nolds.dfa(F)
h

RS=nolds.hurst_rs(F)
RS

# calculate standard deviation of differenced series using various lags
lags = range(2, 20)
tau = [sqrt(std(subtract(F[lag:], F[:-lag]))) for lag in lags]

# plot on log-log scale
plot(log(lags), log(tau)); show()

# calculate Hurst as slope of log-log plot
m = polyfit(log(lags), log(tau), 1)
hurst = m[0]*2.0
hurst

#farctal dimension (correlation dimension)= slope of the line fitted to log(r) vs log(C(r))
コード例 #20
0
import nolds
from pandas import read_csv
from openpyxl import load_workbook

workbook = load_workbook(filename="../data/chaos_data/results_presentation.xlsx")
worksheets = workbook.sheetnames

for sheet in worksheets:
    # Activate worksheet to write dataframe
    active = workbook[sheet]

    # load dataset
    series = read_csv('../data/datasets/' + sheet + '_final_week.csv',
                      header=0, index_col=0, parse_dates=True, squeeze=True)
    x = series.values
    x = x.astype('float32')
    # x = np.fromiter(series.values, dtype="float32")

    hurst_exponent = nolds.hurst_rs(x)
    print("Hurst exponent: " + str(hurst_exponent))

workbook.close()
コード例 #21
0
    def processSingleFile(self, file):
        self.logger.log(
            "INFO",
            "EXTRACTING FEATURE {} FROM FILE: {}".format(self.algorithm, file))

        res = self.readFile(file)

        if (self.algorithm == 'DfaMeanCorr'):
            feature = np.zeros([378, 1])
        else:
            feature = np.zeros([26, 1])

        if (res["error"]):
            return (feature)
        if (res["data"].shape[0] == 0):
            return (feature)
        if (res["data"].shape[1] < 10):
            return (feature)
        data = self.prepareMetrics(res["data"])
        try:
            if (self.algorithm == 'correlation'):
                feature = self.correlation(data)

            elif (self.algorithm == 'dfa'):
                feature = np.zeros([26, 1])
                for i in range(0, data.shape[1]):
                    feature[i] = nolds.dfa(data[:, i])

            elif (self.algorithm == 'sampen'):
                feature = np.zeros([26, 1])
                for i in range(0, data.shape[1]):
                    feature[i] = nolds.sampen(data[:,i],emb_dim=2,\
         tolerance=0.2*np.std(data[:,i]))

            elif (self.algorithm == 'hurst'):
                feature = np.zeros([26, 1])
                for i in range(0, data.shape[1]):
                    feature[i] = nolds.hurst_rs(data[:, i])

            elif (self.algorithm == 'DfaMeanCorr'):
                cache_filename = file + ".pkl"
                if (os.path.exists(cache_filename)):
                    self.logger.log("INFO", "CACHE FOUND, USING IT")
                    file = open(cache_filename, 'rb')
                    cache = pickle.load(file)
                else:
                    cache = {}
                if (not "corr" in cache):
                    featureCorr = self.correlation(data)
                    cache["corr"] = featureCorr
                else:
                    featureCorr = cache["corr"]
                if (not "cons" in cache):
                    featureConsump = self.consumption(res["data"])
                    cache["featureConsump"] = featureConsump
                else:
                    featureConsump = cache["cons"]
                if (not "dfa" in cache):
                    featureDfa = np.zeros([26, 1])
                    for i in range(0, int(data.shape[1])):
                        featureDfa[i] = nolds.dfa(data[:, i])
                    cache["dfa"] = featureDfa
                else:
                    featureDfa = cache["dfa"]
                if (not "entropy" in cache):
                    featureEntropy = np.zeros([26, 1])
                    for i in range(0, int(data.shape[1])):
                        featureEntropy[i] = nolds.sampen(data[:, i], 2)
                    cache["entropy"] = featureEntropy

                with open(cache_filename, 'wb') as output:
                    pickle.dump(cache, output, pickle.HIGHEST_PROTOCOL)
                feature = np.vstack(
                    [featureDfa, featureConsump, featureEntropy, featureCorr])

        except Exception as e:
            self.logger.log("ERROR","ERROR EXTRACTING FEATURE {} FROM FILE: {}, SKIPPING"\
       .format(self.algorithm,file))
            print(e)
            traceback.print_exc()
            for i in range(0, feature.shape[0]):
                feature[i] = 0
        return (feature)
コード例 #22
0
ファイル: test.py プロジェクト: mornydew/PyProj
#!/usr/bin/env python
# -*- coding: utf-8 -*-


def printMsg():
    print 'Hello'


# printMsg()

import nolds
import numpy as np

rwalk = np.cumsum(np.random.random(1000))
h = nolds.hurst_rs(rwalk)
print h
コード例 #23
0
 def GetEntropy(self, farry):
     resultlist = []
     resultlist.append(nolds.sampen(farry))
     resultlist.append(nolds.lyap_r(farry))
     resultlist.append(nolds.hurst_rs(farry))
     resultlist.append(nolds.dfa(farry))
コード例 #24
0
def hurst_exponent(y):
    # Hurst exponent
    return nolds.hurst_rs(y)
コード例 #25
0
def eeg_fractal_dim(epochs, entropy=True, hurst=True, dfa=False, lyap_r=False, lyap_e=False):
    """
    """
    clock = Time()

    df = epochs.to_data_frame(index=["epoch", "time", "condition"])

    # Separate indexes
    index = df.index.tolist()
    epochs = []
    times = []
    events = []
    for i in index:
        epochs.append(i[0])
        times.append(i[1])
        events.append(i[2])



    data = {}
    if entropy == True:
        data["Entropy"] = {}
    if hurst == True:
        data["Hurst"] = {}
    if dfa == True:
        data["DFA"] = {}
    if lyap_r == True:
        data["Lyapunov_R"] = {}
    if lyap_e == True:
        data["Lyapunov_E"] = {}


    clock.reset()
    for epoch in set(epochs):
        subset = df.loc[epoch]

        if entropy == True:
            data["Entropy"][epoch] = []
        if hurst == True:
            data["Hurst"][epoch] = []
        if dfa == True:
            data["DFA"][epoch] = []
        if lyap_r == True:
            data["Lyapunov_R"][epoch] = []
        if lyap_e == True:
            data["Lyapunov_E"][epoch] = []



        for channel in subset:
            if entropy == True:
                data["Entropy"][epoch].append(nolds.sampen(subset[channel]))
            if hurst == True:
                data["Hurst"][epoch].append(nolds.hurst_rs(subset[channel]))
            if dfa == True:
                data["DFA"][epoch].append(nolds.dfa(subset[channel]))
            if lyap_r == True:
                data["Lyapunov_R"][epoch].append(nolds.lyap_r(subset[channel]))
            if lyap_e == True:
                data["Lyapunov_E"][epoch].append(nolds.lyap_e(subset[channel]))

        if entropy == True:
            data["Entropy"][epoch] = np.mean(data["Entropy"][epoch])
        if hurst == True:
            data["Hurst"][epoch] = np.mean(data["Hurst"][epoch])
        if dfa == True:
            data["DFA"][epoch] = np.mean(data["DFA"][epoch])
        if lyap_r == True:
            data["Lyapunov_R"][epoch] = np.mean(data["Lyapunov_R"][epoch])
        if lyap_e == True:
            data["Lyapunov_E"][epoch] = np.mean(data["Lyapunov_E"][epoch])


        time = clock.get(reset=False)/1000
        time = time/(epoch+1)
        time = (time * (len(set(epochs))-epoch))/60
        print(str(round((epoch+1)/len(set(epochs))*100,2)) + "% complete, remaining time: " + str(round(time, 2)) + 'min')

    df = pd.DataFrame.from_dict(data)

    list_events = []
    for i in range(len(events)):
        list_events.append(events[i] + "_" + str(epochs[i]))

    list_events = list_events[np.where(find_following_duplicates(list_events))]
    list_events = [re.sub('_\d+', '', i) for i in list_events]
    df["Epoch"] = list_events
    return(df)
コード例 #26
0
def hurstrs(y): return nolds.hurst_rs(y)
def dfa(y): return nolds.dfa(y)
コード例 #27
0
def hurstrs(y):
    return nolds.hurst_rs(y)
コード例 #28
0
ファイル: test.py プロジェクト: Ernestyj/PyProj
        if df is None:
            df = tempDF
        else:
            df = df.append(tempDF)
    pathName = "temp.csv"
    resultDF = df[startDay:endDay]
    resultDF.to_csv(pathName)
    return pathName, resultDF


# 路径
baseDir = "E:\\Downloads\\Data\\"
# baseDir = '/Users/eugene/Downloads/data/'
# 股票代码
instrument = "000001.SH"
# 开始时间
startDay = "2013-01-01"
# 结束时间
endDay = "2015-06-06"

pathName, df = readAndReWriteCSV(baseDir, instrument, startDay, endDay)

print nolds.hurst_rs(df["PctChange"])

import HurstExponent

# print HurstExponent.computeHurst(df['PctChange'])
# print HurstExponent.computeHurstExpecPeters(df['PctChange'])

print HurstExponent.hurst(df["Adj Close"])
コード例 #29
0
ファイル: fractal.py プロジェクト: asliusar/WaveletQuotes
import nolds
import numpy as np

rwalk = np.cumsum(np.random.random(1000))
print("fractal {}".format(nolds.dfa(rwalk)))
print("Lup {}".format(nolds.lyap_e(rwalk)))
print("Lup {}".format(nolds.lyap_r(rwalk)))
print("Hurst {}".format(nolds.hurst_rs(rwalk)))

コード例 #30
0
def complexity(signal,
               shannon=True,
               sampen=True,
               multiscale=True,
               fractal_dim=True,
               hurst=True,
               dfa=True,
               lyap_r=False,
               lyap_e=False,
               emb_dim=2,
               tolerance="default"):
    """
    Returns several chaos/complexity indices of a signal (including entropy, fractal dimensions, Hurst and Lyapunov exponent etc.).

    Parameters
    ----------
    signal : list or array
        List or array of values.
    shannon : bool
        Computes Shannon entropy.
    sampen : bool
        Computes approximate sample entropy (sampen) using Chebychev and Euclidean distances.
    multiscale : bool
        Computes multiscale entropy (MSE). Note that it uses the 'euclidean' distance.
    fractal_dim : bool
        Computes the fractal (correlation) dimension.
    hurst : bool
        Computes the Hurst exponent.
    dfa : bool
        Computes DFA.
    lyap_r : bool
        Computes Positive Lyapunov exponents (Rosenstein et al. (1993) method).
    lyap_e : bool
        Computes Positive Lyapunov exponents (Eckmann et al. (1986) method).
    emb_dim : int
        The embedding dimension (*m*, the length of vectors to compare). Used in sampen and fractal_dim.
    tolerance : float
        Distance *r* threshold for two template vectors to be considered equal. Default is 0.2*std(signal). Used in sampen and fractal_dim.

    Returns
    ----------
    complexity : dict
        Dict containing values for each indices.


    Example
    ----------
    >>> import neurokit as nk
    >>> import numpy as np
    >>>
    >>> signal = np.sin(np.log(np.random.sample(666)))
    >>> complexity = nk.complexity(signal)

    Notes
    ----------
    *Details*

    - **shannon entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content.
    - **sample entropy (sampen)**: Measures the complexity of a time-series, based on approximate entropy. The sample entropy of a time series is defined as the negative natural logarithm of the conditional probability that two sequences similar for emb_dim points remain similar at the next point, excluding self-matches. A lower value for the sample entropy therefore corresponds to a higher probability indicating more self-similarity.
    - **multiscale entropy**: Multiscale entropy (MSE) analysis is a new method of measuring the complexity of finite length time series.
    - **fractal dimension**: A measure of the fractal (or correlation) dimension of a time series which is also related to complexity. The correlation dimension is a characteristic measure that can be used to describe the geometry of chaotic attractors. It is defined using the correlation sum C(r) which is the fraction of pairs of points X_i in the phase space whose distance is smaller than r.
    - **hurst**: The Hurst exponent is a measure of the "long-term memory" of a time series. It can be used to determine whether the time series is more, less, or equally likely to increase if it has increased in previous steps. This property makes the Hurst exponent especially interesting for the analysis of stock data.
    - **dfa**: DFA measures the Hurst parameter H, which is very similar to the Hurst exponent. The main difference is that DFA can be used for non-stationary processes (whose mean and/or variance change over time).
    - **lyap**: Positive Lyapunov exponents indicate chaos and unpredictability. Provides the algorithm of Rosenstein et al. (1993) to estimate the largest Lyapunov exponent and the algorithm of Eckmann et al. (1986) to estimate the whole spectrum of Lyapunov exponents.


    *Authors*

    - Christopher Schölzel (https://github.com/CSchoel)
    - tjugo (https://github.com/nikdon)
    - Dominique Makowski (https://github.com/DominiqueMakowski)

    *Dependencies*

    - nolds
    - numpy

    *See Also*

    - nolds package: https://github.com/CSchoel/nolds
    - pyEntropy package: https://github.com/nikdon/pyEntropy

    References
    -----------
    - Richman, J. S., & Moorman, J. R. (2000). Physiological time-series analysis using approximate entropy and sample entropy. American Journal of Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049.
    - Costa, M., Goldberger, A. L., & Peng, C. K. (2005). Multiscale entropy analysis of biological signals. Physical review E, 71(2), 021906.
    """

    if tolerance == "default":
        tolerance = 0.2 * np.std(signal)

    # Initialize results storing
    complexity = {}

    # Shannon
    if shannon is True:
        try:
            complexity["Shannon_Entropy"] = entropy_shannon(signal)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute Shannon entropy."
            )
            complexity["Shannon_Entropy"] = np.nan

    # Sampen
    if sampen is True:
        try:
            complexity["Sample_Entropy_Chebychev"] = nolds.sampen(
                signal,
                emb_dim,
                tolerance,
                dist="chebychev",
                debug_plot=False,
                plot_file=None)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute sample entropy (sampen) using chebychev distance."
            )
            complexity["Sample_Entropy_Chebychev"] = np.nan
        try:
            complexity["Sample_Entropy_Euclidean"] = nolds.sampen(
                signal,
                emb_dim,
                tolerance,
                dist="euclidean",
                debug_plot=False,
                plot_file=None)
        except:
            try:
                complexity["Sample_Entropy_Euclidean"] = nolds.sampen(
                    signal,
                    emb_dim,
                    tolerance,
                    dist="euler",
                    debug_plot=False,
                    plot_file=None)
            except:
                print(
                    "NeuroKit warning: complexity(): Failed to compute sample entropy (sampen) using euclidean distance."
                )
                complexity["Sample_Entropy_Euclidean"] = np.nan

    # multiscale
    if multiscale is True:
        try:
            complexity["Multiscale_Entropy"] = entropy_multiscale(
                signal, emb_dim, tolerance)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute Multiscale Entropy (MSE)."
            )
            complexity["Multiscale_Entropy"] = np.nan

    # fractal_dim
    if fractal_dim is True:
        try:
            complexity["Fractal_Dimension"] = nolds.corr_dim(signal,
                                                             emb_dim,
                                                             rvals=None,
                                                             fit="RANSAC",
                                                             debug_plot=False,
                                                             plot_file=None)
        except:
            print(
                "NeuroKit warning: complexity(): Failed to compute fractal_dim."
            )
            complexity["Fractal_Dimension"] = np.nan

    # Hurst
    if hurst is True:
        try:
            complexity["Hurst"] = nolds.hurst_rs(signal,
                                                 nvals=None,
                                                 fit="RANSAC",
                                                 debug_plot=False,
                                                 plot_file=None)
        except:
            print("NeuroKit warning: complexity(): Failed to compute hurst.")
            complexity["Hurst"] = np.nan

    # DFA
    if dfa is True:
        try:
            complexity["DFA"] = nolds.dfa(signal,
                                          nvals=None,
                                          overlap=True,
                                          order=1,
                                          fit_trend="poly",
                                          fit_exp="RANSAC",
                                          debug_plot=False,
                                          plot_file=None)
        except:
            print("NeuroKit warning: complexity(): Failed to compute dfa.")
            complexity["DFA"] = np.nan

    # Lyap_r
    if lyap_r is True:
        try:
            complexity["Lyapunov_R"] = nolds.lyap_r(signal,
                                                    emb_dim=10,
                                                    lag=None,
                                                    min_tsep=None,
                                                    tau=1,
                                                    min_vectors=20,
                                                    trajectory_len=20,
                                                    fit="RANSAC",
                                                    debug_plot=False,
                                                    plot_file=None)
        except:
            print("NeuroKit warning: complexity(): Failed to compute lyap_r.")
            complexity["Lyapunov_R"] = np.nan

    # Lyap_e
    if lyap_e is True:
        try:
            result = nolds.lyap_e(signal,
                                  emb_dim=10,
                                  matrix_dim=4,
                                  min_nb=None,
                                  min_tsep=0,
                                  tau=1,
                                  debug_plot=False,
                                  plot_file=None)
            for i, value in enumerate(result):
                complexity["Lyapunov_E_" + str(i)] = value
        except:
            print("NeuroKit warning: complexity(): Failed to compute lyap_e.")
            complexity["Lyapunov_E"] = np.nan

    return (complexity)
コード例 #31
0
def process_data(data, channelNames, srate):
    global f_labels, processed_channel_names

    # Default RQA parameters
    embedding = 10  # Embedding dimension
    tdelay = 2  # Time delay
    tau = 30  # threshold

    # Multiscaling is accomplished with a wavelet transform
    # Options for basis functions: ['haar', 'db', 'sym', 'coif', 'bior', 'rbio', 'dmey']
    #wavelet = 'haar'
    wavelet = 'db4'
    mode = 'cpd'
    #mode = pywt.Modes.smooth

    # Simple array for entropy value
    ent = np.zeros(1)

    # Determine the number of levels required so that
    # the lowest level approximation is roughly the
    # delta band (freq range 0-4 Hz)

    if srate <= 128: levels = 4
    elif srate <= 256: levels = 5
    elif srate <= 512:  # subsample
        srate = srate / 2.0
        n = len(data[0])
        data = data[0:, 0:n:2]
        levels = 5
    elif srate <= 1024:
        srate = srate / 4.0
        n = len(data[0])
        data = data[0:, 0:n:4]
        levels = 5
    nbands = levels

    wavelet_scale = {}
    f_limit = {}

    # The following function returns the highest level (ns) approximation
    # in dec[0], then details for level ns in dec[1]. Each successive
    # level of detail coefficients is in dec[2] through dec[ns].
    #
    #   level       approximation       details
    #   0           original signal     --
    #   1                -              dec[ns]
    #   2                -              dec[ns-1]
    #   3                -              dec[ns-2]
    #   i              -                dec[ns-i+1]
    #   ns          dec[0]              dec[1]

    WRITE_RP_IMAGE_FILE = False

    # Print screen headers
    sys.stdout.write("%10s %6s " % ("Sensor", "Freq"))
    for f in all_features:
        sys.stdout.write(" %8s " % (f))
    sys.stdout.write("\n")

    D = {}

    for c, ch in enumerate(channelNames):
        if ch in master_channel_list:
            processed_channel_names.append(ch)

            # Create a raw recurrence plot image for the original signal from this channel
            if WRITE_RP_IMAGE_FILE:
                rp_plot_name = filename + "_" + ch + "_" + "rp" + ".png"
                print("            write rp image file ", rp_plot_name)
                settings = Settings(data[c],
                                    embedding_dimension=embedding,
                                    time_delay=tdelay,
                                    neighbourhood=FixedRadius(0))
                #computation = RQAComputation.create(settings, verbose=False)
                rp_computation = RecurrencePlotComputation.create(
                    settings, verbose=False)
                result = rp_computation.run()
                ImageGenerator.save_recurrence_plot(
                    result.recurrence_matrix_reverse, rp_plot_name)

            D[ch] = {}

            #--------------------------------------------------------------------
            # Get the wavelet decomposition. See pywavelet (or pywt) documents.
            # Deconstruct the waveforms
            # S = An + Dn + Dn-1 + ... + D1
            #--------------------------------------------------------------------
            w = pywt.Wavelet(wavelet)
            m = np.mean(data[c])
            a_orig = data[c] - m  # the original signal, initially
            a = a_orig

            ca = []  # all the approximations
            cd = []  # all the details
            sqrt2 = np.sqrt(2.0)
            for i in range(nbands):
                (a, d) = pywt.dwt(a, w, mode)
                f = pow(sqrt2, i + 1)
                ca.append(a / f)
                cd.append(d / f)

            if 1 == 0:  # this will build full reconstructed signals at every level
                rec_a = []  # reconstructed approximations
                rec_d = []  # reconstructed details
                for i, coeff in enumerate(ca):
                    coeff_list = [coeff, None] + [None] * i
                    rec_a.append(pywt.waverec(coeff_list, w))
                for i, coeff in enumerate(cd):
                    coeff_list = [None, coeff] + [None] * i
                    rec_d.append(pywt.waverec(coeff_list, w))
            else:
                rec_a = ca
                rec_d = cd

            # Use the details and last approximation to create all the power-of-2 freq bands
            f_labels = ['A0']
            wavelet_scale = {}
            wavelet_scale['A0'] = 0
            f_limit = {}
            f_limit['A0'] = srate / 2.0
            fs = [srate]
            freqband = [a_orig]  # A0 is the original signal
            N = len(a_orig)
            f = srate / 4.0
            for j, r in enumerate(rec_a):
                freq_name = 'A' + str(j + 1)
                wavelet_scale[freq_name] = j + 1
                f_limit[freq_name] = f
                f = f / 2.0
                f_labels.append(freq_name)
                freqband.append(r[0:N])  # wavelet approximation for this band

            f = srate / 2.0
            for j, r in enumerate(rec_d):
                freq_name = 'D' + str(j + 1)
                wavelet_scale[freq_name] = j + 1
                f_limit[freq_name] = f
                f = f / 2.0
                f_labels.append(freq_name)
                freqband.append(r[0:N])  # wavelet details for this band

            #--------------------------------------------------------------------
            # Compute features on each of the frequency bands
            #--------------------------------------------------------------------
            for f in all_features:
                D[ch][f] = {}

            #----------------------
            # Feature set 1: Power
            for i, y in enumerate(freqband):
                v = bandpower(y)
                D[ch]["Power"][f_labels[i]] = v

                #----------------------
                # Feature set 2: Sample Entropy, Hurst parameter, DFA, Lyapunov exponents
                D[ch]["SampE"][f_labels[i]] = nolds.sampen(y)

                try:
                    D[ch]["hurst_rs"][f_labels[i]] = nolds.hurst_rs(y)
                except:
                    D[ch]["hurst_rs"][f_labels[i]] = 0.0

                try:
                    D[ch]["dfa"][f_labels[i]] = nolds.dfa(y)
                except:
                    D[ch]["dfa"][f_labels[i]] = 0.0

                try:
                    D[ch]["cd"][f_labels[i]] = nolds.corr_dim(y, embedding)
                except:
                    D[ch]["cd"][f_labels[i]] = 0.0

                try:
                    #lyap = nolds.lyap_e(y, emb_dim= embedding)
                    lyap0 = nolds.lyap_r(y, emb_dim=embedding)
                except:
                    #lyap = [0.0, 0.0, 0.0]
                    lyap0 = 0.0
                D[ch]["lyap0"][f_labels[i]] = lyap0

                #----------------------
                # Feature set 3: Recurrence Quantitative Analysis (RQA)
                # This routine seems to be incredibly slow and may need improvement
                rqa_features = [
                    "RR", "DET", "LAM", "L_entr", "L_max", "L_mean", "TT"
                ]
                pyRQA_names = ['recurrence_rate', 'determinism', 'laminarity', 'entropy_diagonal_lines', \
                               'longest_diagonal_line','average_diagonal_line', 'trapping_time'   ]

                # First check to see if RQA values are needed at all
                compute_RQA = False
                for r in rqa_features:
                    if r in all_features:
                        compute_RQA = True
                        break

                if compute_RQA:
                    #for i, y in enumerate(freqband):
                    settings = Settings(
                        y,
                        embedding_dimension=embedding,
                        time_delay=tdelay,
                        neighbourhood=FixedRadius(tau)
                        #similarity_measure=EuclideanMetric,
                        #theiler_corrector=1,
                        #min_diagonal_line_length=2,
                        #min_vertical_line_length=2,
                        #min_white_vertical_line_length=2)
                    )
                    computation = RQAComputation.create(settings,
                                                        verbose=False)
                    result = computation.run()

                    # We have to pull out each value
                    w = f_labels[i]
                    D[ch]["RR"][w] = result.recurrence_rate
                    D[ch]["DET"][w] = result.determinism
                    D[ch]["LAM"][w] = result.laminarity
                    D[ch]["L_entr"][w] = result.entropy_diagonal_lines
                    D[ch]["L_max"][w] = result.longest_diagonal_line
                    D[ch]["L_mean"][w] = result.average_diagonal_line
                    D[ch]["TT"][w] = result.trapping_time

                    # Write results from first channel to the screen, to give
                    # visual feedback that the code is running

                w = f_labels[i]
                sys.stdout.write("%10s %6s " % (ch, w))
                for dyn_inv in all_features:  # D[ch].keys():
                    v = D[ch][dyn_inv][w]
                    sys.stdout.write(" %8.3f " % (v))
                sys.stdout.write("\n")

    return D, srate, wavelet_scale, f_limit
コード例 #32
0
ファイル: Stat_Fcns.py プロジェクト: tfz2101/ML_Trading
 def hurstExp(self, data):
     price_data = data[:,0]
     return hurst_rs(price_data)
コード例 #33
0
ファイル: test.py プロジェクト: Ernestyj/PyProj
#!/usr/bin/env python
# -*- coding: utf-8 -*-

def printMsg():
    print 'Hello'

# printMsg()

import nolds
import numpy as np

rwalk = np.cumsum(np.random.random(1000))
h = nolds.hurst_rs(rwalk)
print h