コード例 #1
0
def _embed_solo(t, x, ws, ss):
    """Embed the time series for each window"""
    n = len(t)
    nw = int(np.floor(float(n - ws) / float(ss)))
    tm = np.empty(nw, dtype="object")
    m, tau = [np.zeros(nw, dtype="int") for i in range(2)]
    xw = np.zeros((nw, ws), dtype="float")
    maxlag = 150
    maxdim = 10
    R = 0.025
    pb = _progressbar_start(max_value=nw, pbar_on=args.verbose)
    for i in range(nw):
        start = i * ss
        end = start + ws
        x_ = x[start:end]
        xw[i] = x_
        # get mi
        mi, mi_lags = rc.mi(x_, maxlag, pbar_on=False)
        mi_filt, _ = utils.boxfilter(mi, filter_width=3, estimate="mean")
        try:
            tau[i] = rc.first_minimum(mi_filt)
        except ValueError:
            tau[i] = 1
        # FNN
        fnn, dims = rc.fnn(x_, tau[i], maxdim=maxdim, r=R, pbar_on=False)
        m[i] = dims[rc.first_zero(fnn)]
        tm[i] = t[start] + (t[end] - t[start]) / 2
        _progressbar_update(pb, i)
    _progressbar_finish(pb)
    return tm, xw, m, tau
コード例 #2
0
def _get_data():
    """
    Runs the analysis and saves the data to be used later for plotting.
    """
    # get the Roessler trajectory
    print("Roessler trajectory ...")
    t = np.linspace(0., 1000., 100000.)
    params = (0.432, 2., 4.)
    x0 = (1., 3., 5.)
    pos = tm.roessler(x0, t, params)
    i_eq = 90000
    x, y, z = pos[i_eq:, 0], pos[i_eq:, 1], pos[i_eq:, 2]
    t = t[i_eq:]

    # set the X component as our measured signal
    s = x.copy()

    # get mi
    print("MI ...")
    maxlag = np.where(t <= (t[0] + 10.))[0][-1].astype("int")
    mi, mi_lags = rc.mi(s, maxlag)
    mi_filt, _ = utils.boxfilter(mi, filter_width=25, estimate="mean")
    tau_mi = rc.first_minimum(mi_filt)
    print("FNN ...")
    M = 10
    R = 0.50
    fnn_mi, dims_mi = rc.fnn(s, tau_mi, maxdim=M, r=R)
    m_mi = dims_mi[rc.first_zero(fnn_mi)]

    # save data
    print("save output ...")
    FN = "../data/delay_embedding/results"
    np.savez(FN,
             x=x,
             y=y,
             z=z,
             t=t,
             params=params,
             x0=x0,
             i_eq=i_eq,
             s=s,
             maxlag=maxlag,
             mi=mi,
             mi_lags=mi_lags,
             mi_filt=mi_filt,
             tau_mi=tau_mi,
             M=M,
             R=R,
             fnn_mi=fnn_mi,
             dims_mi=dims_mi,
             m_mi=m_mi)
    print("saved to: %s.npz" % FN)
    return None
コード例 #3
0
def _embed_pair(t, x, y, ws, ss):
    """Determines common embedding parameters for both time series"""
    n = len(t)
    nw = int(np.floor(float(n - ws) / float(ss)))
    tm = np.empty(nw, dtype="object")
    m, tau = [np.zeros(nw, dtype="int") for i in range(2)]
    xw, yw = [np.zeros((nw, ws), dtype="float") for i in range(2)]
    maxlag = 150
    maxdim = 10
    R = 0.025
    pb = _progressbar_start(max_value=nw, pbar_on=args.verbose)
    for i in range(nw):
        start = i * ss
        end = start + ws
        x_ = x[start:end]
        y_ = y[start:end]
        xw[i] = x_
        yw[i] = y_
        # get mi
        mi1, mi_lags1 = rc.mi(x_, maxlag, pbar_on=False)
        mi_filt1, _ = utils.boxfilter(mi1, filter_width=3, estimate="mean")
        tau1 = rc.first_minimum(mi_filt1)
        mi2, mi_lags2 = rc.mi(y_, maxlag, pbar_on=False)
        mi_filt2, _ = utils.boxfilter(mi2, filter_width=3, estimate="mean")
        tau2 = rc.first_minimum(mi_filt2)
        tau[i] = int(max(tau1, tau2))
        # FNN
        fnn1, dims1 = rc.fnn(x_, tau[i], maxdim=maxdim, r=R, pbar_on=False)
        m1 = dims1[rc.first_zero(fnn1)]
        fnn2, dims2 = rc.fnn(y_, tau[i], maxdim=maxdim, r=R, pbar_on=False)
        m2 = dims2[rc.first_zero(fnn2)]
        m[i] = int(max(m1, m2))
        tm[i] = t[start] + (t[end] - t[start]) / 2
        _progressbar_update(pb, i)
    _progressbar_finish(pb)
    return tm, xw, yw, m, tau
コード例 #4
0
    Xnoise = np.random.randn(500)
    Tnoise = np.arange(1, 501)
    Rnoise = rc.rp(Xnoise, m=1, tau=1, e=0.1,
                   metric="euclidean", threshold_by="frr")

    # load the Nino 3.4 data, estimate embedding parameters and get RP
    # load Nino 3.4 index data
    D = np.loadtxt("../data/enso/nino.txt", delimiter=",", skiprows=5)
    Y, M = D[:, 0], D[:, 1]
    Xnino = D[:, -1]
    # convert time info to datetime array
    Tnino = []
    for y, m in zip(Y, M):
        Tnino.append(dt.datetime(int(y), int(m), 15))
    Tnino = np.array(Tnino)
    mi, lags = rc.mi(Xnino, maxlag=100)
    i = rc.first_minimum(mi)
    tau = lags[i]
    fnn, dims = rc.fnn(Xnino, tau, maxdim=20, r=0.01)
    i = rc.first_zero(fnn)
    m = dims[i]
    Rnino = rc.rp(Xnino, m, tau, e=0.1,
                  metric="euclidean", threshold_by="frr")

    # load the FordA data, estimate embedding parameters and get RP
    D = np.loadtxt("../data/fordA/FordA_TEST.txt", delimiter=",")
    k = 1325
    Xford = D[k, 1:]
    Tford = np.arange(Xford.shape[0])
    mi, lags = rc.mi(Xford, maxlag=100)
    i = rc.first_minimum(mi)
コード例 #5
0
 # B. Superposed harmonics RP
 print("B. Superposed harmonics RP ...")
 N = 500
 t = np.arange(N)
 T1, T2, T3 = 10., 50., 75.
 A1, A2, A3 = 1., 1.5, 2.
 # T1, T2 = 10., 50.
 # A1, A2 = 1., 1.5
 twopi = 2. * np.pi
 H1 = A1 * np.sin((twopi * t) / T1)
 H2 = A2 * np.sin((twopi * t) / T2)
 H3 = A3 * np.sin((twopi * t) / T3)
 x = H1 + H2 + H3
 # get mi
 maxlag = 150
 mi, mi_lags = rc.mi(x, maxlag)
 mi_filt, _ = utils.boxfilter(mi, filter_width=3, estimate="mean")
 tau_mi = rc.first_minimum(mi_filt)
 # FNN
 M = 20
 R = 0.025
 fnn_mi, dims_mi = rc.fnn(x, tau_mi, maxdim=M, r=R)
 m_mi = dims_mi[rc.first_zero(fnn_mi)]
 R = rc.rp(x,
           m=m_mi,
           tau=tau_mi,
           e=0.15,
           norm="euclidean",
           threshold_by="frr",
           normed=True)
 ax2.imshow(R,
コード例 #6
0
def _get_communities():
    """
    Identifies the optimal community structure based on modularity.
    """
    # load data
    utils._printmsg("load data ...", args.verbose)
    t, x_enso, x_pdo = _load_indices()
    x = {
        "enso": x_enso,
        "pdo": x_pdo,
    }
    names = ["enso", "pdo"]

    # recurrence plot parameters
    EPS = 0.30
    thrby = "frr"

    # embedding parameters
    utils._printmsg("embedding parameters ...", args.verbose)
    n = len(t)
    m, tau = {}, {}
    R = {}
    maxlag = 150
    maxdim = 20
    r_fnn = 0.0010
    for name in names:
        if args.verbose: print("\t for %s" % name.upper())
        # get embedding parameters
        ## get mi
        mi, mi_lags = rc.mi(x[name], maxlag, pbar_on=False)
        # mi, mi_lags = rc.acf(x[name], maxlag)
        mi_filt, _ = utils.boxfilter(mi, filter_width=3, estimate="mean")
        try:
            tau[name] = rc.first_minimum(mi_filt)
        except ValueError:
            tau[name] = 1
        ## FNN
        fnn, dims = rc.fnn(x[name],
                           tau[name],
                           maxdim=maxdim,
                           r=r_fnn,
                           pbar_on=False)
        m[name] = dims[rc.first_zero(fnn)]

    # # print out embedding dimensions for documentation in the paper
    # print m
    # print tau
    # sys.exit()

    # identify communities using modularity
    utils._printmsg("communities based on modularity ...", args.verbose)
    COMM = {}
    for name in names:
        utils._printmsg("\tfor %s" % name.upper(), args.verbose)
        A = rc.rn(x[name],
                  m=m[name],
                  tau=tau[name],
                  e=EPS,
                  norm="euclidean",
                  threshold_by="frr",
                  normed=True)

        # optimize modularity
        utils._printmsg("\toptimize modularity ...", args.verbose)
        G = ig.Graph.Adjacency(A.tolist(), mode=ig.ADJ_UNDIRECTED)
        dendro = G.community_fastgreedy()
        # dendro = G.community_edge_betweenness(directed=False)
        clust = dendro.as_clustering()
        # clust = G.community_multilevel()
        mem = clust.membership
        COMM[name] = mem

    # get each individual array out of dict to avoid  NumPy import error
    x_enso = x["enso"]
    x_pdo = x["pdo"]
    COMM_enso = COMM["enso"]
    COMM_pdo = COMM["pdo"]
    t = np.array([date.toordinal() for date in t])

    # save output
    EPS = int(EPS * 100)
    FN = DATPATH + "communities_EPS%d" \
                   % EPS
    np.savez(FN,
             x_enso=x_enso,
             x_pdo=x_pdo,
             t=t,
             COMM_enso=COMM_enso,
             COMM_pdo=COMM_pdo,
             m=m,
             tau=tau,
             e=EPS,
             thrby=thrby)
    if args.verbose: print("output saved to: %s.npz" % FN)

    return None
コード例 #7
0
def _get_rmd():
    """Estimates the RMD between ENSO and PDO"""
    # load data
    utils._printmsg("load data ...", args.verbose)
    t, x_enso, x_pdo = _load_indices()
    x = {
        "enso": x_enso,
        "pdo": x_pdo,
    }
    names = ["enso", "pdo"]

    # recurrence plot parameters
    EPS = 0.30
    thrby = "frr"

    # embedding parameters
    utils._printmsg("embedding parameters ...", args.verbose)
    n = len(t)
    m, tau = {}, {}
    R = {}
    maxlag = 150
    maxdim = 20
    r_fnn = 0.0010
    for name in names:
        if args.verbose: print("\t for %s" % name.upper())
        # get embedding parameters
        ## get mi
        mi, mi_lags = rc.mi(x[name], maxlag, pbar_on=False)
        # mi, mi_lags = rc.acf(x[name], maxlag)
        mi_filt, _ = utils.boxfilter(mi, filter_width=3, estimate="mean")
        try:
            tau[name] = rc.first_minimum(mi_filt)
        except ValueError:
            tau[name] = 1
        ## FNN
        fnn, dims = rc.fnn(x[name],
                           tau[name],
                           maxdim=maxdim,
                           r=r_fnn,
                           pbar_on=False)
        m[name] = dims[rc.first_zero(fnn)]
    # take the maximum delay and the maximum embedding dimension
    tau = np.max([tau["enso"], tau["pdo"]]).astype("int")
    m = np.max([m["enso"], m["pdo"]]).astype("int")

    # get surrogates
    utils._printmsg("surrogates ...", args.verbose)
    ns = args.nsurr
    SURR = {}
    params = {
        "m": m,
        "tau": tau,
        "eps": EPS,
        "norm": "euclidean",
        "thr_by": thrby,
        "tol": 2.
    }
    for name in names:
        utils._printmsg("\t for %s" % name.upper(), args.verbose)
        # SURR[name] = rc.surrogates(x[name], ns, "iaaft", verbose=args.verbose)
        SURR[name] = rc.surrogates(x[name],
                                   ns,
                                   "twins",
                                   params,
                                   verbose=args.verbose)

    # get RMD for original data
    utils._printmsg("RMD for original data ...", args.verbose)
    ws, ss = args.window_size, args.step_size
    nw = int(np.floor(float(n - ws) / float(ss)))
    tm = np.empty(nw, dtype="object")
    for name in names:
        R[name] = rc.rp(
            x[name],
            m=m,
            tau=tau,
            e=EPS,
            norm="euclidean",
            threshold_by=thrby,
        )
    rmd = np.zeros(nw)
    pb = _progressbar_start(max_value=nw, pbar_on=args.verbose)
    for i in range(nw):
        start = i * ss
        end = start + ws
        Rw_enso = R["enso"][start:end, start:end]
        Rw_pdo = R["pdo"][start:end, start:end]
        rmd[i] = rqa.rmd(Rw_enso, Rw_pdo)
        tm[i] = t[start] + (t[end] - t[start]) / 2
        _progressbar_update(pb, i)
    _progressbar_finish(pb)

    # get RMD for surrogate data
    utils._printmsg("RMD for surrogates ...", args.verbose)
    Rs = {}
    rmdsurr = np.zeros((ns, nw), dtype="float")
    pb = _progressbar_start(max_value=ns, pbar_on=args.verbose)
    for k in range(ns):
        for name in names:
            xs = SURR[name][k]
            Rs[name] = rc.rp(
                xs,
                m=m,
                tau=tau,
                e=EPS,
                norm="euclidean",
                threshold_by=thrby,
            )
        for i in range(nw):
            start = i * ss
            end = start + ws
            Rsw_enso = Rs["enso"][start:end, start:end]
            Rsw_pdo = Rs["pdo"][start:end, start:end]
            rmdsurr[k, i] = rqa.rmd(Rsw_enso, Rsw_pdo)
        _progressbar_update(pb, k)
    _progressbar_finish(pb)

    # get each individual array out of dict to avoid  NumPy import error
    SURR_enso = SURR["enso"]
    SURR_pdo = SURR["pdo"]
    tm = np.array([date.toordinal() for date in tm])

    # save output
    EPS = int(EPS * 100)
    FN = DATPATH + "rmd_WS%d_SS%d_EPS%dpc_NSURR%d" \
                   % (ws, ss, EPS, ns)
    np.savez(
        FN,
        rmd=rmd,
        tm=tm,
        rmdsurr=rmdsurr,
        SURR_enso=SURR_enso,
        SURR_pdo=SURR_pdo,
    )
    if args.verbose: print("output saved to: %s.npz" % FN)

    return None
コード例 #8
0
def _get_spl():
    """
    Estimates the average shortest path length SPL for the indices.
    """
    # load data
    utils._printmsg("load data ...", args.verbose)
    t, x_enso, x_pdo = _load_indices()
    x = {
        "enso": x_enso,
        "pdo": x_pdo,
    }
    names = ["enso", "pdo"]

    # get surrogates
    utils._printmsg("iAAFT surrogates ...", args.verbose)
    ns = args.nsurr
    SURR = {}
    for name in names:
        utils._printmsg("\t for %s" % name.upper(), args.verbose)
        SURR[name] = rc.surrogates(x[name], ns, "iaaft", verbose=args.verbose)

    # recurrence plot parameters
    EPS, LMIN = 0.30, 3
    thrby = "frr"

    # get SPL for original data
    utils._printmsg("SPL for original data ...", args.verbose)
    n = len(t)
    ws, ss = args.window_size, args.step_size
    nw = int(np.floor(float(n - ws) / float(ss)))
    tm = np.empty(nw, dtype="object")
    m, tau = {}, {}
    A = {}
    maxlag = 150
    maxdim = 20
    r_fnn = 0.0010
    SPL = {}
    for name in names:
        if args.verbose: print("\t for %s" % name.upper())
        # get embedding parameters
        ## get mi
        mi, mi_lags = rc.mi(x[name], maxlag, pbar_on=False)
        # mi, mi_lags = rc.acf(x[name], maxlag)
        mi_filt, _ = utils.boxfilter(mi, filter_width=3, estimate="mean")
        try:
            tau[name] = rc.first_minimum(mi_filt)
        except ValueError:
            tau[name] = 1
        ## FNN
        fnn, dims = rc.fnn(x[name],
                           tau[name],
                           maxdim=maxdim,
                           r=r_fnn,
                           pbar_on=False)
        m[name] = dims[rc.first_zero(fnn)]
        A[name] = rc.rn(
            x[name],
            m=m[name],
            tau=tau[name],
            e=EPS,
            norm="euclidean",
            threshold_by=thrby,
        )
        A_ = A[name]
        G_ = ig.Graph.Adjacency(A_.tolist(), mode=ig.ADJ_UNDIRECTED)
        nw = len(tm)
        spl = np.zeros(nw)
        pb = _progressbar_start(max_value=nw, pbar_on=args.verbose)
        for i in range(nw):
            start = i * ss
            end = start + ws
            Gw = G_.subgraph(vertices=G_.vs[start:end])
            pl_hist = Gw.path_length_hist(directed=False)
            spl[i] = pl_hist.mean
            tm[i] = t[start] + (t[end] - t[start]) / 2
            _progressbar_update(pb, i)
        _progressbar_finish(pb)
        SPL[name] = spl

    # get SPL for surrogate data
    utils._printmsg("SPL for surrogates ...", args.verbose)
    SPLSURR = {}
    for name in names:
        utils._printmsg("\tfor %s" % name.upper(), args.verbose)
        xs = SURR[name]
        y = np.diff(xs, axis=0)
        splsurr = np.zeros((ns, nw), dtype="float")
        pb = _progressbar_start(max_value=ns, pbar_on=args.verbose)
        for k in range(ns):
            As = rc.rp(
                xs[k],
                m=m[name],
                tau=tau[name],
                e=EPS,
                norm="euclidean",
                threshold_by=thrby,
            )
            Gs = ig.Graph.Adjacency(As.tolist(), mode=ig.ADJ_UNDIRECTED)
            for i in range(nw):
                start = i * ss
                end = start + ws
                Gw = Gs.subgraph(vertices=Gs.vs[start:end])
                pl_hist = Gw.path_length_hist(directed=False)
                splsurr[k, i] = pl_hist.mean
            _progressbar_update(pb, k)
        _progressbar_finish(pb)
        SPLSURR[name] = splsurr

    # get each individual array out of dict to avoid  NumPy import error
    SPL_enso = SPL["enso"]
    SPL_pdo = SPL["pdo"]
    SPLSURR_enso = SPLSURR["enso"]
    SPLSURR_pdo = SPLSURR["pdo"]
    SURR_enso = SURR["enso"]
    SURR_pdo = SURR["pdo"]
    tm = np.array([date.toordinal() for date in tm])

    # save output
    EPS = int(EPS * 100)
    FN = DATPATH + "spl_WS%d_SS%d_EPS%dpc_LMIN%d_NSURR%d" \
                   % (ws, ss, EPS, LMIN, ns)
    np.savez(FN,
             SPL_enso=SPL_enso,
             SPL_pdo=SPL_pdo,
             SPLSURR_enso=SPLSURR_enso,
             SPLSURR_pdo=SPLSURR_pdo,
             SURR_enso=SURR_enso,
             SURR_pdo=SURR_pdo,
             tm=tm)
    if args.verbose: print("output saved to: %s.npz" % FN)

    return None
コード例 #9
0
def _get_det():
    """
    Estimates the determinism DET for the indices.
    """
    # load data
    utils._printmsg("load data ...", args.verbose)
    t, x_enso, x_pdo = _load_indices()
    x = {
        "enso": x_enso,
        "pdo": x_pdo,
    }
    names = ["enso", "pdo"]

    # get surrogates
    utils._printmsg("iAAFT surrogates ...", args.verbose)
    ns = args.nsurr
    SURR = {}
    for name in names:
        utils._printmsg("\t for %s" % name.upper(), args.verbose)
        SURR[name] = rc.surrogates(x[name], ns, "iaaft", verbose=args.verbose)

    # recurrence plot parameters
    EPS, LMIN = 0.30, 3
    thrby = "frr"

    # get DET for original data
    utils._printmsg("DET for original data ...", args.verbose)
    n = len(t)
    ws, ss = args.window_size, args.step_size
    nw = int(np.floor(float(n - ws) / float(ss)))
    tm = np.empty(nw, dtype="object")
    m, tau = {}, {}
    R = {}
    maxlag = 150
    maxdim = 20
    r_fnn = 0.0010
    DET = {}
    for name in names:
        if args.verbose: print("\t for %s" % name.upper())
        # get embedding parameters
        ## get mi
        mi, mi_lags = rc.mi(x[name], maxlag, pbar_on=False)
        # mi, mi_lags = rc.acf(x[name], maxlag)
        mi_filt, _ = utils.boxfilter(mi, filter_width=3, estimate="mean")
        try:
            tau[name] = rc.first_minimum(mi_filt)
        except ValueError:
            tau[name] = 1
        ## FNN
        fnn, dims = rc.fnn(x[name],
                           tau[name],
                           maxdim=maxdim,
                           r=r_fnn,
                           pbar_on=False)
        m[name] = dims[rc.first_zero(fnn)]
        R[name] = rc.rp(
            x[name],
            m=m[name],
            tau=tau[name],
            e=EPS,
            norm="euclidean",
            threshold_by=thrby,
        )
        R_ = R[name]
        nw = len(tm)
        det = np.zeros(nw)
        pb = _progressbar_start(max_value=nw, pbar_on=args.verbose)
        for i in range(nw):
            start = i * ss
            end = start + ws
            Rw = R_[start:end, start:end]
            det[i] = rqa.det(Rw, lmin=LMIN, hist=None, verb=False)
            tm[i] = t[start] + (t[end] - t[start]) / 2
            _progressbar_update(pb, i)
        _progressbar_finish(pb)
        DET[name] = det

    # get DET for surrogate data
    utils._printmsg("DET for surrogates ...", args.verbose)
    DETSURR = {}
    for name in names:
        utils._printmsg("\tfor %s" % name.upper(), args.verbose)
        xs = SURR[name]
        y = np.diff(xs, axis=0)
        detsurr = np.zeros((ns, nw), dtype="float")
        pb = _progressbar_start(max_value=ns, pbar_on=args.verbose)
        for k in range(ns):
            Rs = rc.rp(
                xs[k],
                m=m[name],
                tau=tau[name],
                e=EPS,
                norm="euclidean",
                threshold_by=thrby,
            )
            for i in range(nw):
                start = i * ss
                end = start + ws
                Rw = Rs[start:end, start:end]
                detsurr[k, i] = rqa.det(Rw, lmin=LMIN, hist=None, verb=False)
            _progressbar_update(pb, k)
        _progressbar_finish(pb)
        DETSURR[name] = detsurr

    # get each individual array out of dict to avoid  NumPy import error
    DET_enso = DET["enso"]
    DET_pdo = DET["pdo"]
    DETSURR_enso = DETSURR["enso"]
    DETSURR_pdo = DETSURR["pdo"]
    SURR_enso = SURR["enso"]
    SURR_pdo = SURR["pdo"]
    tm = np.array([date.toordinal() for date in tm])

    # save output
    EPS = int(EPS * 100)
    FN = DATPATH + "det_WS%d_SS%d_EPS%dpc_LMIN%d_NSURR%d" \
                   % (ws, ss, EPS, LMIN, ns)
    np.savez(FN,
             DET_enso=DET_enso,
             DET_pdo=DET_pdo,
             DETSURR_enso=DETSURR_enso,
             DETSURR_pdo=DETSURR_pdo,
             SURR_enso=SURR_enso,
             SURR_pdo=SURR_pdo,
             tm=tm)
    if args.verbose: print("output saved to: %s.npz" % FN)

    return None