def correlated_ts(c,delta_t = 0.1,N=1000):
    # parameters for coupled oscillator
    K,D = 1.0,1.0
    data1 = langevin.time_series(A=1/K, D=D, delta_t=delta_t, N=N)
    data2 = langevin.time_series(A=1/(K+np.abs(c)), D=D, delta_t=delta_t, N=N)
    x1 = (data1 + data2)/2
    if c>0:
        x2 = (data1 - data2)/2
    else:
        x2 = (data2-data1)/2

    return x1,x2
# initial prior
# both D and A have mean 1 and std 10
alpha_A = 2.1
beta_A = 1.1
alpha_D = 0.1
beta_D = 0.1

# compile model for reuse
sm = lcm.OU_DA()
sm.samples = 100000

result_array = None

for i in range(M):
    print("***** Iteration ", i, " *****")
    data = langevin.time_series(A=A, D=D, delta_t=delta_t, N=N)
    # calculate autocorrelation function
    f = np.fft.rfft(data)
    acf = np.fft.irfft(f * np.conjugate(f))
    acf = np.fft.fftshift(acf) / N
    autocorr = acf[int(N / 2):]

    y = autocorr[:min(int(N / 2), P)]
    t = np.arange(min(int(N / 2), P))

    mod = ExponentialModel()
    pars = mod.guess(y, x=t)
    try:
        out = mod.fit(y, pars, x=t)
    except:
        fit_results = np.zeros(4)
# A have mean 1 and std 10
# B is uniform prior over [0,1] interval
alpha_A=2.1
beta_A=1.1
alpha_B=1
beta_B=1

# compile model for reuse
sm = lcm.OU_BA()
sm.samples=100000

result_array = None

for i in range(M):
    print("***** Iteration ",i," *****")
    data = langevin.time_series(A=A, D=D, delta_t=delta_t, N=N)

    data_results = [data[0]**2, data[-1]**2, np.sum(data[1:-2]**2), np.sum(data[:-1]*data[1:])]

    # calculate autocorrelation function
    f = np.fft.rfft(data)
    acf = np.fft.irfft(f * np.conjugate(f))
    acf = np.fft.fftshift(acf) / N
    autocorr = acf[int(N / 2):]

    y = autocorr[:min(int(N / 2), P)]
    t = np.arange(min(int(N / 2), P))

    mod = ExponentialModel()
    pars = mod.guess(y, x=t)
    try:
Beispiel #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-d',
                        '--output',
                        action='store',
                        default="./test",
                        help='output file name')
    parser.add_argument('-a',
                        '--amplitude',
                        action='store',
                        type=float,
                        default=1.0,
                        help='amplitude of OU')
    parser.add_argument('-r',
                        '--tau',
                        action='store',
                        type=float,
                        default=1.0,
                        help='relaxation time tau of OU')
    parser.add_argument('-w',
                        '--noise',
                        action='store',
                        type=float,
                        default=1.0,
                        help='power of the noise')
    parser.add_argument('-n',
                        '--length',
                        action='store',
                        type=int,
                        default=2000,
                        help='length of simulation in timesteps')
    parser.add_argument('-s',
                        '--samples',
                        action='store',
                        type=int,
                        default=2000,
                        help='MCMC samples per run')
    parser.add_argument('-b',
                        '--dtstart',
                        action='store',
                        type=float,
                        default=0.01,
                        help='delta t range start')
    parser.add_argument('-e',
                        '--dtend',
                        action='store',
                        type=float,
                        default=2.0,
                        help='delta t range end')
    parser.add_argument('-l',
                        '--dtlength',
                        action='store',
                        type=int,
                        default=50,
                        help='delta t range number of points')
    parser.add_argument('-u',
                        '--duplicates',
                        action='store',
                        type=int,
                        default=1,
                        help='delta t range number of points')

    # get arguments
    arg = parser.parse_args()

    filename = arg.output
    a = arg.amplitude
    tau = arg.tau
    pn = arg.noise
    n = arg.length
    mc_samples = arg.samples
    dtstart = arg.dtstart
    dtend = arg.dtend
    dtlength = arg.dtlength
    dupl = arg.duplicates

    delta_t_list = np.linspace(dtstart, dtend, dtlength)

    result_array = None
    for delta_t in delta_t_list:
        print(delta_t)
        for i in range(dupl):
            data = langevin.time_series(A=a, D=a / tau, delta_t=delta_t, N=n)
            dataN = data + np.random.normal(loc=0.0, scale=np.sqrt(pn), size=n)
            with pm.Model() as model:
                B = pm.Beta('B', alpha=5.0, beta=1.0)
                A = pm.Uniform('A', lower=0, upper=5)
                sigma = pm.Uniform('sigma', lower=0, upper=5)

                path = Ornstein_Uhlenbeck('path', A=A, B=B, shape=len(dataN))
                dataObs = pm.Normal('dataObs',
                                    mu=path,
                                    sigma=sigma,
                                    observed=dataN)
                trace = pm.sample(mc_samples, cores=4)

            a_mean = trace['A'].mean()
            b_mean = trace['B'].mean()
            a_std = trace['A'].std()
            b_std = trace['B'].std()
            sigma_mean = trace['sigma'].mean()
            sigma_std = trace['sigma'].std()
            avgpath = np.mean(trace['path'], axis=0)
            stddiff = np.std(data - avgpath)
            stdpath = np.std(trace['path'], axis=0).mean()

            results = [
                delta_t, a_mean, a_std, b_mean, b_std, sigma_mean, sigma_std,
                stddiff, stdpath
            ]
            if result_array is None:
                result_array = results
            else:
                result_array = np.vstack((result_array, results))

    column_names = [
        "delta_t", "a_mean", "a_std", "b_mean", "b_std", "sigma_mean",
        "sigma_std", "stddiff", "stdpath"
    ]
    df = pd.DataFrame(result_array, columns=column_names)
    df.to_csv(filename + '.csv', index=False)
import numpy as np
import matplotlib.pyplot as plt, seaborn as sns
import lmfit as lm
from scipy import signal
from lmfit.models import ExponentialModel
import pandas as pd
from itertools import accumulate
import langevin

A1 = 1.0
A2 = 0.5
D = 1.0
delta_t=0.5

datadir='results/delta05/corr2/'

N=10000 # length of data set

P=1000 # range to fit acf

x1 = langevin.time_series(A1,D,delta_t,N)
x2 = langevin.time_series(A2,D,delta_t,N)

xx1 = (x1+x2)/2.0
xx2 = (x1-x2)/2.0

df = pd.DataFrame({'x1':xx1, 'x2':xx2})
df.to_csv(datadir+'data.csv',index=False)