Ejemplo n.º 1
0
The correct time delay is 10, and the averaged local deformation has a clear
local minimum near that value. Moreover, the resulting plot resembles Fig. 9
from Buzug & Pfister, 1992, as expected.
"""

import matplotlib.pyplot as plt
import numpy as np

from nolitsa import data, dimension

sample = 0.01

x = data.lorenz(length=10000,
                x0=None,
                sigma=10.0,
                beta=8.0 / 3.0,
                rho=28.0,
                step=0.001,
                sample=sample,
                discard=1000)[1][:, 0]

dim = np.arange(2, 7, 1)
maxtau = 60

ilds = dimension.ild(x,
                     dim=dim,
                     qmax=10,
                     maxtau=maxtau,
                     rp=0.04,
                     nrefp=0.02,
                     k=None)
Ejemplo n.º 2
0
    returned.

    Parameters
    ----------
    x : array
        1D scalar data set.

    Returns
    -------
    i : array
        Array containing location of all local minima.
    """
    return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1


x = data.lorenz()[1][:, 0]

# Compute autocorrelation and delayed mutual information.
lag = np.arange(100)
r = delay.acorr(x, maxtau=100)
i = delay.dmi(x, maxtau=100)

# While looking for local minima in the DMI curve, it's useful to do an
# SMA to remove "kinky" minima.
i_delay = localmin(noise.sma(i, hwin=1)) + 1
r_delay = np.argmax(r < 1.0 / np.e)

print(r'Minima of delayed mutual information = %s' % i_delay)
print(r'Autocorrelation time = %d' % r_delay)

plt.figure(1)
Ejemplo n.º 3
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""D2 of the Lorenz system.

The estimates here match the "accepted" value of 2.068 quite closely.
"""

import numpy as np
import matplotlib.pyplot as plt
from nolitsa import d2, data, utils

x = utils.rescale(data.lorenz(length=5000)[1][:, 0])

dim = np.arange(1, 10 + 1)
tau = 5

plt.title('Local $D_2$ vs $r$ for Lorenz attractor')
plt.xlabel(r'Distance $r$')
plt.ylabel(r'Local $D_2$')

for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=50):
    plt.semilogx(r[3:-3], d2.d2(r, c), color='#4682B4')

plt.semilogx(utils.gprange(0.001, 1.0, 100),
             2.068 * np.ones(100),
             color='#000000')
plt.show()
Ejemplo n.º 4
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Illustration of end-point mismatch.

As one can clearly see, there are spurious high-frequency oscillations
in the surrogate series generated with the second data set (whose
end-points don't match).  These high-frequency oscillations appear as a
sort of "crinkliness" spread throughout the time series.
"""

from nolitsa import data, surrogates

import matplotlib.pyplot as plt
import numpy as np

x = data.lorenz(x0=[-13.5, -16.0, 31.0], length=(2**12))[1][:, 0]

# Maximum mismatch occurs for the segment (537, 3662).
# Minimum mismatch occurs for the segment (422, 3547).
# end, d = surrogates.mismatch(x, length=1024)

plt.subplot(211)
plt.title(r'Original time series')
plt.ylabel(r'Measurement $x(t)$')

plt.plot(np.arange(3800), x[100:3900], '--')
plt.plot(np.arange(437, 3562), x[537:3662])

plt.subplot(212)
plt.xlabel(r'Time $t$')
plt.ylabel(r'Measurement $x(t)$')
Ejemplo n.º 5
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""Maximum Lyapunov exponent for the Lorenz system.

Our estimate is quite close to the "accepted" value of 1.50.
Cf. Fig. 2 of Rosenstein et al. (1993).
"""

from nolitsa import data, lyapunov
import numpy as np
import matplotlib.pyplot as plt

sample = 0.01
x0 = [0.62225717, -0.08232857, 30.60845379]
x = data.lorenz(length=5000, sample=sample, x0=x0,
                sigma=16.0, beta=4.0, rho=45.92)[1][:, 0]

# Choose appropriate Theiler window.
window = 60

# Time delay.
tau = 13

# Embedding dimension.
dim = [5]

d = lyapunov.mle_embed(x, dim=dim, tau=tau, maxt=300, window=window)[0]
t = np.arange(300)

plt.title('Maximum Lyapunov exponent for the Lorenz system')
plt.xlabel(r'Time $t$')
Ejemplo n.º 6
0
    s : float
        Coefficient of skewness of the distribution of t-increments.

    Notes
    -----
    The skew statistic is often touted to have good distinguishing power
    between nonlinearity and linearity.  But it is known to fail
    miserably in both cases (i.e., it often judges nonlinear series as
    linear and vice-versa) and should be avoided for serious analysis.
    """
    dx = x[t:] - x[:-t]
    dx = dx - np.mean(dx)
    return np.mean(dx**3) / np.mean(dx**2)**1.5


x = data.lorenz(length=(2**12))[1][:, 0]

plt.figure(1)

plt.subplot(121)
plt.title('Actual')
plt.xlabel(r'$x(t)$')
plt.ylabel(r'$x(t + \tau)$')
plt.plot(x[:-5], x[5:])

plt.subplot(122)
plt.title('Reversed')
plt.xlabel(r'$\hat{x}(t)$')
plt.ylabel(r'$\hat{x}(t + \tau)$')
x_rev = x[::-1]
plt.plot(x_rev[:-5], x_rev[5:])