def _embed_solo(t, x, ws, ss): """Embed the time series for each window""" n = len(t) nw = int(np.floor(float(n - ws) / float(ss))) tm = np.empty(nw, dtype="object") m, tau = [np.zeros(nw, dtype="int") for i in range(2)] xw = np.zeros((nw, ws), dtype="float") maxlag = 150 maxdim = 10 R = 0.025 pb = _progressbar_start(max_value=nw, pbar_on=args.verbose) for i in range(nw): start = i * ss end = start + ws x_ = x[start:end] xw[i] = x_ # get mi mi, mi_lags = rc.mi(x_, maxlag, pbar_on=False) mi_filt, _ = utils.boxfilter(mi, filter_width=3, estimate="mean") try: tau[i] = rc.first_minimum(mi_filt) except ValueError: tau[i] = 1 # FNN fnn, dims = rc.fnn(x_, tau[i], maxdim=maxdim, r=R, pbar_on=False) m[i] = dims[rc.first_zero(fnn)] tm[i] = t[start] + (t[end] - t[start]) / 2 _progressbar_update(pb, i) _progressbar_finish(pb) return tm, xw, m, tau
def fnn(x, tau, maxdim, r=0.10, pbar_on=True): """ Returns the number of false nearest neighbours up to max dimension. """ # initialize params sd = x.std() r = r * (x.max() - x.min()) e = sd / r fnn = np.zeros(maxdim) dims = np.arange(1, maxdim + 1, dtype="int") # ensure that (m-1) tau is not greater than N = length(x) N = len(x) K = (maxdim + 1 - 1) * tau if K >= N: m_c = N / tau i = np.where(dims >= m_c) fnn[i] = np.nan j = np.where(dims < m_c) dims = dims[j] # get first values of distances for m = 1 d_m, k_m = mindist(x, 1, tau) # loop over dimensions and get FNN values pb = _progressbar_start(max_value=maxdim + 1, pbar_on=pbar_on) for m in dims: # get minimum distances for one dimension higher d_m1, k_m1 = mindist(x, m + 1, tau) # remove those indices in the m-dimensional calculations which cannot # occur in the m+1-dimensional arrays as the m+1-dimensional arrays are # smaller cond1 = k_m[1] > k_m1[0][-1] cond2 = k_m[0] > k_m1[0][-1] j = np.where(~(cond1 + cond2))[0] k_m_ = (k_m[0][j], k_m[1][j]) d_k_m, d_k_m1 = d_m[k_m_], d_m1[k_m_] n_m1 = d_k_m.shape[0] # calculate quantities in Eq. 3.8 of Kantz, Schreiber (2004) 2nd Ed. j = d_k_m > 0. y = np.zeros(n_m1, dtype="float") y[j] = (d_k_m1[j] / d_k_m[j] > e) # should be r instead of e = sd / r w = (e > d_k_m) num = float((y * w).sum()) den = float(w.sum()) # assign FNN value depending on whether denominator is zero if den != 0.: fnn[m - 1] = num / den else: fnn[m - 1] = np.nan # assign higher dimensional values to current one before next iteration d_m, k_m = d_m1, k_m1 _progressbar_update(pb, m) _progressbar_finish(pb) return fnn, dims
def mi(x, maxlag, binrule="fd", pbar_on=True): """ Returns the self mutual information of a time series up to max. lag. """ # initialize variables n = len(x) lags = np.arange(0, maxlag, dtype="int") mi = np.zeros(len(lags)) # loop over lags and get MI pb = _progressbar_start(max_value=maxlag, pbar_on=pbar_on) for i, lag in enumerate(lags): # extract lagged data y1 = x[:n - lag].copy() y2 = x[lag:].copy() # use np.histogram to get individual entropies H1, be1 = entropy1d(y1, binrule) H2, be2 = entropy1d(y2, binrule) H12, _, _ = entropy2d(y1, y2, [be1, be2]) # use the entropies to estimate MI mi[i] = H1 + H2 - H12 _progressbar_update(pb, i) _progressbar_finish(pb) return mi, lags
def _embed_pair(t, x, y, ws, ss): """Determines common embedding parameters for both time series""" n = len(t) nw = int(np.floor(float(n - ws) / float(ss))) tm = np.empty(nw, dtype="object") m, tau = [np.zeros(nw, dtype="int") for i in range(2)] xw, yw = [np.zeros((nw, ws), dtype="float") for i in range(2)] maxlag = 150 maxdim = 10 R = 0.025 pb = _progressbar_start(max_value=nw, pbar_on=args.verbose) for i in range(nw): start = i * ss end = start + ws x_ = x[start:end] y_ = y[start:end] xw[i] = x_ yw[i] = y_ # get mi mi1, mi_lags1 = rc.mi(x_, maxlag, pbar_on=False) mi_filt1, _ = utils.boxfilter(mi1, filter_width=3, estimate="mean") tau1 = rc.first_minimum(mi_filt1) mi2, mi_lags2 = rc.mi(y_, maxlag, pbar_on=False) mi_filt2, _ = utils.boxfilter(mi2, filter_width=3, estimate="mean") tau2 = rc.first_minimum(mi_filt2) tau[i] = int(max(tau1, tau2)) # FNN fnn1, dims1 = rc.fnn(x_, tau[i], maxdim=maxdim, r=R, pbar_on=False) m1 = dims1[rc.first_zero(fnn1)] fnn2, dims2 = rc.fnn(y_, tau[i], maxdim=maxdim, r=R, pbar_on=False) m2 = dims2[rc.first_zero(fnn2)] m[i] = int(max(m1, m2)) tm[i] = t[start] + (t[end] - t[start]) / 2 _progressbar_update(pb, i) _progressbar_finish(pb) return tm, xw, yw, m, tau
if np.all((yR_[1:] - yR_[:-1]) < TOL): yR_eq_.extend(yR_) else: d2y = np.diff(np.sign(np.diff(yR_))) iipos = np.where(d2y == -2.)[0] + 1 if len(iipos) > 0: yR_eq_.extend(yR_[iipos]) iineg = np.where(d2y == 2.)[0] + 1 if len(iineg) > 0: yR_eq_.extend(yR_[iineg]) _progressbar_update(pb, count) count += 1 np.random.shuffle(yR_eq_) yR_eq.append(yR_eq_[-250:]) yR_eq = np.array(yR_eq) _progressbar_finish(pb) # Henon # ----- print("Henon ...") TH = np.arange(0, 5000, 1) aH = np.arange(0.0, 1.40001, 0.005) nH = len(aH) mH = 100 xH_eq = [] kH = 10 pb = _progressbar_start(max_value=nH * kH, pbar_on=True) count = 0 for i in range(nH): params = (aH[i], 0.30) xH_eq_ = []
def _get_data(): """ Estimates Lyapunov, DET, and SPL for Henon map. """ # Henon map time series print("Henon map time series ...") t = np.arange(0, 10000, 1) a = np.linspace(1.0, 1.4, na).reshape(na, 1) b = 0.30 nt = len(t) x, y = [np.zeros((nt, na, ns)) for i in range(2)] x[0, :, :] = 1E-1 * np.random.rand(na, ns) y[0, :, :] = 1E-1 * np.random.rand(na, ns) pb = _progressbar_start(max_value=nt, pbar_on=True) LPV = np.zeros((na, ns)) for i in range(1, nt): x[i, :, :] = 1. - a * x[i - 1, :, :]**2 + y[i - 1, :, :] y[i, :, :] = b * x[i - 1, :, :] if i >= nt / 2: LPV[:, :] += np.log(np.fabs(-2. * a * x[i - 1, :, :])) _progressbar_update(pb, i) _progressbar_finish(pb) xH_eq = x[-neq:, :, :] LPV /= float(nt) # estimate embedding parameters print("embedding parameters ...") tau, m = np.ones(na, dtype="int"), 2 * np.ones(na, dtype="int") # DET print("DET ...") RR = 0.30 DET = np.zeros((na, ns)) pb = _progressbar_start(max_value=ns * na, pbar_on=True) k = 0 for j in range(ns): for i in range(na): R = rc.rp(xH_eq[:, i, j], m=m[i], tau=tau[i], e=RR, norm="euclidean", threshold_by="frr") DET[i, j] = rqa.det(R, lmin=2, hist=None, verb=False) del R _progressbar_update(pb, k) k += 1 _progressbar_finish(pb) # SPL print("SPL ...") SPL = np.zeros((na, ns)) pb = _progressbar_start(max_value=ns * na, pbar_on=True) k = 0 for j in range(ns): for i in range(na): A = rc.rn(xH_eq[:, i, j], m=m[i], tau=tau[i], e=RR, norm="euclidean", threshold_by="frr") G = ig.Graph.Adjacency(A.tolist(), mode=ig.ADJ_UNDIRECTED) pl_hist = G.path_length_hist(directed=False) SPL[i, j] = pl_hist.mean del A, G _progressbar_update(pb, k) k += 1 _progressbar_finish(pb) # save output FN = DATPATH + "det_spl_lpv_na%d_ns%s_neq%d" % (na, ns, neq) np.savez(FN, DET=DET, SPL=SPL, LPV=LPV, t=t, a=a, b=b, x=x, y=y) print("saved to %s.npz" % FN) return None
def surrogates(x, ns, method, params=None, verbose=False): """ Returns m random surrogates using the specified method. """ nx = len(x) xs = np.zeros((ns, nx)) if method == "iaaft": # iAAFT # as per the steps given in Lancaster et al., Phys. Rep (2018) fft, ifft = np.fft.fft, np.fft.ifft TOL = 1E-6 MSE_0 = 100 MSE_K = 1000 MAX_ITER = 10000 ii = np.arange(nx) x_amp = np.abs(fft(x)) x_srt = np.sort(x) pb = _progressbar_start(max_value=ns, pbar_on=verbose) for k in range(ns): # 1) Generate random shuffle of the data count = 0 ri = np.random.permutation(ii) r_prev = x[ri] MSE_prev = MSE_0 # while not np.all(rank_prev == rank_curr) and (count < MAX_ITER): while (np.abs(MSE_K - MSE_prev) > TOL) * (count < MAX_ITER): MSE_prev = MSE_K # 2) FFT current iteration yk, and then invert it but while # replacing the amplitudes with the original amplitudes but # keeping the angles from the FFT-ed version of the random phi_r_prev = np.angle(fft(r_prev)) r = ifft(x_amp * np.exp(phi_r_prev * 1j), nx) # 3) rescale zk to the original distribution of x # rank_prev = rank_curr ind = np.argsort(r) r[ind] = x_srt MSE_K = (np.abs(x_amp - np.abs(fft(r)))).mean() r_prev = r # repeat until rank(z_k+1) = rank(z_k) count += 1 if count >= MAX_ITER: print("maximum number of iterations reached!") xs[k] = np.real(r) _progressbar_update(pb, k) _progressbar_finish(pb) elif method == "twins": # twin surrogates # 1. Estimate RP according to given parameters R = rp(x, m=params["m"], tau=params["tau"], e=params["eps"], norm=params["norm"], threshold_by=params["thr_by"]) # import matplotlib.pyplot as pl # pl.imshow(R, origin="lower", cmap=pl.cm.gray_r, interpolation="none") # pl.show() # 2. Get embedded vectors xe = embed(x, params["m"], params["tau"]) ne = len(xe) assert ne == len(R), "Something is wrong!" # 2. Identify twins _printmsg("identify twins ...", verbose) is_twin = [] twins = [] TOL = np.floor((params["tol"] * float(nx)) / 100.).astype("int") pb = _progressbar_start(max_value=ne, pbar_on=verbose) R_ = R.T for i in range(ne): diff = R_ == R_[i] j = np.sum(diff, axis=1) >= (ne - TOL) j = np.where(j)[0].tolist() j.remove(i) if len(j) > 0: is_twin.append(i) twins.append(j) _progressbar_update(pb, i) _progressbar_finish(pb) # 3. Generate surrogates _printmsg("generate surrogates ...", verbose) all_idx = range(ne) start_idx = np.random.choice(np.arange(ne), size=ns) xs[:, 0] = xe[start_idx, 0] pb = _progressbar_start(max_value=ns, pbar_on=verbose) for i in range(ns): j = 1 k = start_idx[i] while j < nx: if k not in is_twin: k += 1 else: twins_k = twins[is_twin.index(k)] others = list(set(all_idx).difference(set(twins_k))) l = np.random.choice(others) k = np.random.choice(np.r_[l, twins_k]) if k >= ne: k = np.random.choice(np.arange(ne), size=1) xs[i, j] = xe[k, 0] j += 1 _progressbar_update(pb, i) _progressbar_finish(pb) elif method == "shuffle": # simple random shuffling k = np.arange(nx) for i in range(ns): j = np.random.permutation(k) xs[i] = x[j] return xs
def _get_rmd(): """Estimates the RMD between ENSO and PDO""" # load data utils._printmsg("load data ...", args.verbose) t, x_enso, x_pdo = _load_indices() x = { "enso": x_enso, "pdo": x_pdo, } names = ["enso", "pdo"] # recurrence plot parameters EPS = 0.30 thrby = "frr" # embedding parameters utils._printmsg("embedding parameters ...", args.verbose) n = len(t) m, tau = {}, {} R = {} maxlag = 150 maxdim = 20 r_fnn = 0.0010 for name in names: if args.verbose: print("\t for %s" % name.upper()) # get embedding parameters ## get mi mi, mi_lags = rc.mi(x[name], maxlag, pbar_on=False) # mi, mi_lags = rc.acf(x[name], maxlag) mi_filt, _ = utils.boxfilter(mi, filter_width=3, estimate="mean") try: tau[name] = rc.first_minimum(mi_filt) except ValueError: tau[name] = 1 ## FNN fnn, dims = rc.fnn(x[name], tau[name], maxdim=maxdim, r=r_fnn, pbar_on=False) m[name] = dims[rc.first_zero(fnn)] # take the maximum delay and the maximum embedding dimension tau = np.max([tau["enso"], tau["pdo"]]).astype("int") m = np.max([m["enso"], m["pdo"]]).astype("int") # get surrogates utils._printmsg("surrogates ...", args.verbose) ns = args.nsurr SURR = {} params = { "m": m, "tau": tau, "eps": EPS, "norm": "euclidean", "thr_by": thrby, "tol": 2. } for name in names: utils._printmsg("\t for %s" % name.upper(), args.verbose) # SURR[name] = rc.surrogates(x[name], ns, "iaaft", verbose=args.verbose) SURR[name] = rc.surrogates(x[name], ns, "twins", params, verbose=args.verbose) # get RMD for original data utils._printmsg("RMD for original data ...", args.verbose) ws, ss = args.window_size, args.step_size nw = int(np.floor(float(n - ws) / float(ss))) tm = np.empty(nw, dtype="object") for name in names: R[name] = rc.rp( x[name], m=m, tau=tau, e=EPS, norm="euclidean", threshold_by=thrby, ) rmd = np.zeros(nw) pb = _progressbar_start(max_value=nw, pbar_on=args.verbose) for i in range(nw): start = i * ss end = start + ws Rw_enso = R["enso"][start:end, start:end] Rw_pdo = R["pdo"][start:end, start:end] rmd[i] = rqa.rmd(Rw_enso, Rw_pdo) tm[i] = t[start] + (t[end] - t[start]) / 2 _progressbar_update(pb, i) _progressbar_finish(pb) # get RMD for surrogate data utils._printmsg("RMD for surrogates ...", args.verbose) Rs = {} rmdsurr = np.zeros((ns, nw), dtype="float") pb = _progressbar_start(max_value=ns, pbar_on=args.verbose) for k in range(ns): for name in names: xs = SURR[name][k] Rs[name] = rc.rp( xs, m=m, tau=tau, e=EPS, norm="euclidean", threshold_by=thrby, ) for i in range(nw): start = i * ss end = start + ws Rsw_enso = Rs["enso"][start:end, start:end] Rsw_pdo = Rs["pdo"][start:end, start:end] rmdsurr[k, i] = rqa.rmd(Rsw_enso, Rsw_pdo) _progressbar_update(pb, k) _progressbar_finish(pb) # get each individual array out of dict to avoid NumPy import error SURR_enso = SURR["enso"] SURR_pdo = SURR["pdo"] tm = np.array([date.toordinal() for date in tm]) # save output EPS = int(EPS * 100) FN = DATPATH + "rmd_WS%d_SS%d_EPS%dpc_NSURR%d" \ % (ws, ss, EPS, ns) np.savez( FN, rmd=rmd, tm=tm, rmdsurr=rmdsurr, SURR_enso=SURR_enso, SURR_pdo=SURR_pdo, ) if args.verbose: print("output saved to: %s.npz" % FN) return None
def _get_spl(): """ Estimates the average shortest path length SPL for the indices. """ # load data utils._printmsg("load data ...", args.verbose) t, x_enso, x_pdo = _load_indices() x = { "enso": x_enso, "pdo": x_pdo, } names = ["enso", "pdo"] # get surrogates utils._printmsg("iAAFT surrogates ...", args.verbose) ns = args.nsurr SURR = {} for name in names: utils._printmsg("\t for %s" % name.upper(), args.verbose) SURR[name] = rc.surrogates(x[name], ns, "iaaft", verbose=args.verbose) # recurrence plot parameters EPS, LMIN = 0.30, 3 thrby = "frr" # get SPL for original data utils._printmsg("SPL for original data ...", args.verbose) n = len(t) ws, ss = args.window_size, args.step_size nw = int(np.floor(float(n - ws) / float(ss))) tm = np.empty(nw, dtype="object") m, tau = {}, {} A = {} maxlag = 150 maxdim = 20 r_fnn = 0.0010 SPL = {} for name in names: if args.verbose: print("\t for %s" % name.upper()) # get embedding parameters ## get mi mi, mi_lags = rc.mi(x[name], maxlag, pbar_on=False) # mi, mi_lags = rc.acf(x[name], maxlag) mi_filt, _ = utils.boxfilter(mi, filter_width=3, estimate="mean") try: tau[name] = rc.first_minimum(mi_filt) except ValueError: tau[name] = 1 ## FNN fnn, dims = rc.fnn(x[name], tau[name], maxdim=maxdim, r=r_fnn, pbar_on=False) m[name] = dims[rc.first_zero(fnn)] A[name] = rc.rn( x[name], m=m[name], tau=tau[name], e=EPS, norm="euclidean", threshold_by=thrby, ) A_ = A[name] G_ = ig.Graph.Adjacency(A_.tolist(), mode=ig.ADJ_UNDIRECTED) nw = len(tm) spl = np.zeros(nw) pb = _progressbar_start(max_value=nw, pbar_on=args.verbose) for i in range(nw): start = i * ss end = start + ws Gw = G_.subgraph(vertices=G_.vs[start:end]) pl_hist = Gw.path_length_hist(directed=False) spl[i] = pl_hist.mean tm[i] = t[start] + (t[end] - t[start]) / 2 _progressbar_update(pb, i) _progressbar_finish(pb) SPL[name] = spl # get SPL for surrogate data utils._printmsg("SPL for surrogates ...", args.verbose) SPLSURR = {} for name in names: utils._printmsg("\tfor %s" % name.upper(), args.verbose) xs = SURR[name] y = np.diff(xs, axis=0) splsurr = np.zeros((ns, nw), dtype="float") pb = _progressbar_start(max_value=ns, pbar_on=args.verbose) for k in range(ns): As = rc.rp( xs[k], m=m[name], tau=tau[name], e=EPS, norm="euclidean", threshold_by=thrby, ) Gs = ig.Graph.Adjacency(As.tolist(), mode=ig.ADJ_UNDIRECTED) for i in range(nw): start = i * ss end = start + ws Gw = Gs.subgraph(vertices=Gs.vs[start:end]) pl_hist = Gw.path_length_hist(directed=False) splsurr[k, i] = pl_hist.mean _progressbar_update(pb, k) _progressbar_finish(pb) SPLSURR[name] = splsurr # get each individual array out of dict to avoid NumPy import error SPL_enso = SPL["enso"] SPL_pdo = SPL["pdo"] SPLSURR_enso = SPLSURR["enso"] SPLSURR_pdo = SPLSURR["pdo"] SURR_enso = SURR["enso"] SURR_pdo = SURR["pdo"] tm = np.array([date.toordinal() for date in tm]) # save output EPS = int(EPS * 100) FN = DATPATH + "spl_WS%d_SS%d_EPS%dpc_LMIN%d_NSURR%d" \ % (ws, ss, EPS, LMIN, ns) np.savez(FN, SPL_enso=SPL_enso, SPL_pdo=SPL_pdo, SPLSURR_enso=SPLSURR_enso, SPLSURR_pdo=SPLSURR_pdo, SURR_enso=SURR_enso, SURR_pdo=SURR_pdo, tm=tm) if args.verbose: print("output saved to: %s.npz" % FN) return None
def _get_det(): """ Estimates the determinism DET for the indices. """ # load data utils._printmsg("load data ...", args.verbose) t, x_enso, x_pdo = _load_indices() x = { "enso": x_enso, "pdo": x_pdo, } names = ["enso", "pdo"] # get surrogates utils._printmsg("iAAFT surrogates ...", args.verbose) ns = args.nsurr SURR = {} for name in names: utils._printmsg("\t for %s" % name.upper(), args.verbose) SURR[name] = rc.surrogates(x[name], ns, "iaaft", verbose=args.verbose) # recurrence plot parameters EPS, LMIN = 0.30, 3 thrby = "frr" # get DET for original data utils._printmsg("DET for original data ...", args.verbose) n = len(t) ws, ss = args.window_size, args.step_size nw = int(np.floor(float(n - ws) / float(ss))) tm = np.empty(nw, dtype="object") m, tau = {}, {} R = {} maxlag = 150 maxdim = 20 r_fnn = 0.0010 DET = {} for name in names: if args.verbose: print("\t for %s" % name.upper()) # get embedding parameters ## get mi mi, mi_lags = rc.mi(x[name], maxlag, pbar_on=False) # mi, mi_lags = rc.acf(x[name], maxlag) mi_filt, _ = utils.boxfilter(mi, filter_width=3, estimate="mean") try: tau[name] = rc.first_minimum(mi_filt) except ValueError: tau[name] = 1 ## FNN fnn, dims = rc.fnn(x[name], tau[name], maxdim=maxdim, r=r_fnn, pbar_on=False) m[name] = dims[rc.first_zero(fnn)] R[name] = rc.rp( x[name], m=m[name], tau=tau[name], e=EPS, norm="euclidean", threshold_by=thrby, ) R_ = R[name] nw = len(tm) det = np.zeros(nw) pb = _progressbar_start(max_value=nw, pbar_on=args.verbose) for i in range(nw): start = i * ss end = start + ws Rw = R_[start:end, start:end] det[i] = rqa.det(Rw, lmin=LMIN, hist=None, verb=False) tm[i] = t[start] + (t[end] - t[start]) / 2 _progressbar_update(pb, i) _progressbar_finish(pb) DET[name] = det # get DET for surrogate data utils._printmsg("DET for surrogates ...", args.verbose) DETSURR = {} for name in names: utils._printmsg("\tfor %s" % name.upper(), args.verbose) xs = SURR[name] y = np.diff(xs, axis=0) detsurr = np.zeros((ns, nw), dtype="float") pb = _progressbar_start(max_value=ns, pbar_on=args.verbose) for k in range(ns): Rs = rc.rp( xs[k], m=m[name], tau=tau[name], e=EPS, norm="euclidean", threshold_by=thrby, ) for i in range(nw): start = i * ss end = start + ws Rw = Rs[start:end, start:end] detsurr[k, i] = rqa.det(Rw, lmin=LMIN, hist=None, verb=False) _progressbar_update(pb, k) _progressbar_finish(pb) DETSURR[name] = detsurr # get each individual array out of dict to avoid NumPy import error DET_enso = DET["enso"] DET_pdo = DET["pdo"] DETSURR_enso = DETSURR["enso"] DETSURR_pdo = DETSURR["pdo"] SURR_enso = SURR["enso"] SURR_pdo = SURR["pdo"] tm = np.array([date.toordinal() for date in tm]) # save output EPS = int(EPS * 100) FN = DATPATH + "det_WS%d_SS%d_EPS%dpc_LMIN%d_NSURR%d" \ % (ws, ss, EPS, LMIN, ns) np.savez(FN, DET_enso=DET_enso, DET_pdo=DET_pdo, DETSURR_enso=DETSURR_enso, DETSURR_pdo=DETSURR_pdo, SURR_enso=SURR_enso, SURR_pdo=SURR_pdo, tm=tm) if args.verbose: print("output saved to: %s.npz" % FN) return None
def _get_data(): """ Estimates Lyapunov, DET, and SPL for Henon map. """ # Henon map time series print("Henon map time series ...") t = np.arange(0, 10000, 1) a = np.linspace(1.28, 1.32, na).reshape(na, 1) j, k = (1 * na) / 8, ns / 2 print "a = ", a[j] # sys.exit() b = 0.30 nt = len(t) x, y = [np.zeros((nt, na, ns)) for i in range(2)] x[0, :, :] = 1E-2 * np.random.rand(na, ns) y[0, :, :] = 1E-2 * np.random.rand(na, ns) pb = _progressbar_start(max_value=nt, pbar_on=True) LPV = np.zeros((na, ns)) for i in range(1, nt): x[i, :, :] = 1. - a * x[i - 1, :, :]**2 + y[i - 1, :, :] y[i, :, :] = b * x[i - 1, :, :] if i >= nt / 2: LPV[:, :] += np.log(np.fabs(-2. * a * x[i - 1, :, :])) _progressbar_update(pb, i) _progressbar_finish(pb) xH_eq = x[-neq:, :, :] LPV /= float(nt) print("RP ...") RR = 0.30 y = xH_eq[:, j, k].flatten() R = rc.rp(y, m=2, tau=1, e=RR, norm="euclidean", threshold_by="frr") DET = rqa.det(R, lmin=2, hist=None, verb=False) print DET print("plot...") pl.subplot(211) pl.plot(y, alpha=0.5) pl.subplot(212) pl.imshow(R, cmap=pl.cm.gray_r, origin="lower", interpolation="none") pl.show() sys.exit() print("plot data ...") xplot = np.zeros((na, neq * ns)) for i in range(na): xplot[i] = xH_eq[:, i, :].flatten() print("plot ...") fig = pl.figure(figsize=[21., 12.], facecolor="none") ax = fig.add_axes([0.10, 0.10, 0.80, 0.80]) ax.plot(a, xplot, "o", ms=1.00, alpha=0.25, rasterized=True, mfc="k", mec="none") print("prettify ...") ax.tick_params(labelsize=14, size=8) ax.tick_params(size=5, which="minor") # ax.set_xticks(np.arange(1.0, 1.401, 0.05), minor=False) # ax.set_xticks(np.arange(1.0, 1.401, 0.01), minor=True) ax.grid(which="both") # ax.set_xlim(1.0, 1.4) print("save figure ...") FN = "../plots/" + __file__[2:-3] + ".png" fig.savefig(FN, rasterized=True, dpi=100) print("figure saved to: %s" % FN) sys.exit() # estimate embedding parameters print("embedding parameters ...") tau, m = np.ones(na, dtype="int"), 2 * np.ones(na, dtype="int") # DET print("DET ...") RR = 0.25 DET = np.zeros((na, ns)) pb = _progressbar_start(max_value=ns * na, pbar_on=True) k = 0 for j in range(ns): for i in range(na): R = rc.rp(xH_eq[:, i, j], m=m[i], tau=tau[i], e=RR, norm="euclidean", threshold_by="frr") DET[i, j] = rqa.det(R, lmin=2, hist=None, verb=False) del R _progressbar_update(pb, k) k += 1 _progressbar_finish(pb) # SPL print("SPL ...") SPL = np.zeros((na, ns)) pb = _progressbar_start(max_value=ns * na, pbar_on=True) k = 0 for j in range(ns): for i in range(na): A = rc.rn(xH_eq[:, i, j], m=m[i], tau=tau[i], e=RR, norm="euclidean", threshold_by="frr") G = ig.Graph.Adjacency(A.tolist(), mode=ig.ADJ_UNDIRECTED) pl_hist = G.path_length_hist(directed=False) SPL[i, j] = pl_hist.mean del A, G _progressbar_update(pb, k) k += 1 _progressbar_finish(pb) # save output FN = DATPATH + "det_spl_lpv_na%d_ns%s_neq%d" % (na, ns, neq) np.savez(FN, DET=DET, SPL=SPL, LPV=LPV, t=t, a=a, b=b, x=x, y=y) print("saved to %s.npz" % FN) return None