コード例 #1
0
def run(t,
        re_0=8.0,
        ri_0=12.0,
        J_ee=2.1,
        J_ie=1.9,
        J_ei=1.5,
        J_ii=1.1,
        tau_e=10e-3,
        tau_i=30e-3,
        I_e=12,
        I_i=8,
        sigma=1,
        dt=1e-4):

    rs_0 = asarray([re_0, ri_0, re_0, ri_0, re_0, ri_0])

    # !
    times = create_times((0, t), dt)

    # If sigma > 0: we re-define xjw as a stochastic ODE.
    g = partial(ornstein_uhlenbeck, sigma=sigma, loc=[0, 1])  # re/i locs
    f = partial(xjw,
                J_ee=J_ee,
                J_ei=J_ei,
                J_ie=J_ie,
                J_ii=J_ii,
                tau_e=tau_e,
                tau_i=tau_i,
                I_e=I_e,
                I_i=I_i)

    rs = itoint(f, g, rs_0, times)

    return times, rs
コード例 #2
0
def main():

    alpha = np.linspace(0.001,0.1,N)
    kappa = np.linspace(2,0.02,N)
    avgdev = np.linspace(0.1,0.001,N)
    rho = np.linspace(-0.95,-0.4,N)
    tspan = np.linspace(0,10,1001)
    xo = np.array([0,0.05])

    foda_men = h5py.File('stochastic_heston.h5','a')
    foda_men.require_dataset("Heston",((20*N**4),1001,2),dtype='float32')

    i=1
    for al in alpha:
        for k in kappa:
            for m in avgdev:
                for r in rho:
                    def f(x,t):
                        return np.array([0,-al*(x[1]-m)])

                    def G(x,t):
                        return np.array([[x[1],0],[k*r,k*np.sqrt(1-r*r)]])
                    for j in range(20):
                        foda_men["Heston"][i,:,:] = sdeint.itoint(f, G, xo, tspan)

                        i += 1
                        if (i%100 == 0):
                            print(i)
    print(i)
コード例 #3
0
def burgers(t: Type[np.ndarray], N: int) -> Type[np.ndarray]:
    #
    # Compute the bla bla bla
    #
    # Input:
    #   t
    #   N
    #
    # Output:
    #   output
    #
    B = np.diag([0])
    # diagonal, so independent Brownian motions
    T = t[-1]
    result = []

    def f(x, t):
        res = np.sqrt(x**2 - 16 * (T - t))
        return -4 / (x + res) if (res.imag > 0) else -4 / (x - res)

    for j in range(0, N, 1):
        bremen = sdeint.itoint(f, lambda x, t: B, -5 + 10 * j / N + 1j / 1000,
                               t)
        munich = bremen[-1]
        result.append(np.complex(munich[0]))

    output = np.empty(len(result), np.complex128)
    for i in range(len(result)):
        output[i] = np.complex(result[i])

    return output
コード例 #4
0
def test_mismatched_f():
    tspan = np.arange(0.0, 2000.0, 0.002)
    y0 = np.zeros(3)
    f = lambda y, t: np.array([1.0, 2.0, 3.0, 4.0])
    G = lambda y, t: np.ones((3, 3))
    with pytest.raises(sdeint.SDEValueError):
        y = sdeint.itoint(f, G, y0, tspan)
コード例 #5
0
ファイル: test_integrate.py プロジェクト: yoavram/sdeint
def test_mismatched_f():
    tspan = np.arange(0.0, 2000.0, 0.002)
    y0 = np.zeros(3)
    f = lambda y, t: np.array([1.0, 2.0, 3.0, 4.0])
    G = lambda y, t: np.ones((3, 3))
    with pytest.raises(sdeint.SDEValueError):
        y = sdeint.itoint(f, G, y0, tspan)
コード例 #6
0
def integrate_sde(init_states, t, f, sigma, num_t=100, **interp_kwargs):
    try:
        from sdeint import itoint
    except:
        raise ImportError("Please install sdeint using `pip install sdeint`")

    init_states = np.atleast_2d(init_states)
    n, d = init_states.shape

    if isarray(t):
        if len(t) == 1:
            t = np.linspace(0, t[0], num_t)
        elif len(t) == 2:
            t = np.linspace(t[0], t[-1], num_t)
    else:
        t = np.linspace(0, t, num_t)

    if callable(sigma):
        D_func = sigma
    elif isarray(sigma):
        if sigma.ndim == 1:
            sigma = np.diag(sigma)
        D_func = lambda x, t: sigma
    else:
        sigma = sigma * np.eye(d)
        D_func = lambda x, t: sigma

    trajs = []
    for y0 in init_states:
        y = itoint(lambda x, t: f(x), D_func, y0, t)
        trajs.append(y)
    if n == 1:
        trajs = trajs[0]

    return np.array(trajs)
コード例 #7
0
    def solve(self, time_vector=np.linspace(0, 10, 10000)):

        soln = sdeint.itoint(self.rate_of_experience, self.noise, self.experiences_of_choices, time_vector)
        low_values_indices = soln < 0  # Where values are low
        soln[low_values_indices] = 0

        self.plot(soln,time_vector)
コード例 #8
0
ファイル: test_integrate.py プロジェクト: yoavram/sdeint
def test_ito_1D_additive():
    tspan = np.arange(0.0, 2000.0, 0.002)
    y0 = 0.0
    f = lambda y, t: -1.0 * y
    G = lambda y, t: 0.2
    y = sdeint.itoint(f, G, y0, tspan)
    assert(np.isclose(np.mean(y), 0.0, rtol=0, atol=1e-02))
    assert(np.isclose(np.var(y), 0.2*0.2/2, rtol=1e-01, atol=0))
コード例 #9
0
def test_ito_1D_additive():
    tspan = np.arange(0.0, 2000.0, 0.002)
    y0 = 0.0
    f = lambda y, t: -1.0 * y
    G = lambda y, t: 0.2
    y = sdeint.itoint(f, G, y0, tspan)
    assert (np.isclose(np.mean(y), 0.0, rtol=0, atol=1e-02))
    assert (np.isclose(np.var(y), 0.2 * 0.2 / 2, rtol=1e-01, atol=0))
コード例 #10
0
ファイル: kuramoto_local.py プロジェクト: razib764/pyPTE
def repetitive_sims(K, N, theta0, omega, Nsims):
    solutions = list()
    for _ in itertools.repeat(None, Nsims):
        f, G = kuramoto(omega, K, N, 1)
        tspan = np.linspace(0, t_end, steps)
        solution = itoint(f, G, theta0, tspan)
        solutions.append(solution)
        solution = np.mod(solution, 2 * np.pi)
        solution -= np.pi
    return solutions
コード例 #11
0
def calc_for_oscillation_with_Ito(m, init_cond, alpha, Tmax, ito, int_finish,
                                  step):
    tspan = np.arange(0.0, int_finish + step, step)
    forcing = -1.0 * m.Ft(tspan)
    parameters = {}
    parameters['Tmax'] = Tmax
    parameters['alpha'] = alpha
    m.update_parameters(parameters)

    def G(y, t):
        return np.array([[ito, 0.0, 0.0, 0.0], [0.0, ito, 0.0, 0.0],
                         [0.0, 0.0, ito, 0.0], [0.0, 0.0, 0.0, ito]])

    result = sdeint.itoint(m.rhs_ode, G, init_cond, tspan)
    return tspan, result, forcing
コード例 #12
0
    def solve(self, time_vector=np.linspace(0, 10, 10000)):

        t = time_vector
        soln = sdeint.itoint(self.rate_of_experience, self.noise, self.experiences_of_choices, time_vector)
        low_values_indices = soln < 0  # Where values are low
        soln[low_values_indices] = 0
        #print(soln)

        # for i in range(len(self.orbits_for_exp_at_node_1[0])):
        #     if self.orbits_for_exp_at_node_1[0][i] < 0:
        #         self.orbits_for_exp_at_node_1[0][i] = 0
        #     if self.orbits_for_exp_at_node_1[1][i] < 0:
        #         self.orbits_for_exp_at_node_1[1][i] = 0

        self.plot(soln,t)
コード例 #13
0
ファイル: ode_int.py プロジェクト: johnlees/competition_model
def integrate_piece(t, N_end, K, r_res, r_chal, a_RC, a_CR, mode='ode'):
    if mode == 'sde':
        f = dN_dt_stochatic(K, r_res, r_chal, a_RC, a_CR)
        G = brownian(K, r_res, r_chal, a_RC, a_CR)
        N = sdeint.itoint(f, G, N_end, t)
    elif mode == 'ctmc':
        t, N = gillespie(t[0], t[-1], N_end[0], N_end[1], K, r_res, r_chal,
                         a_RC, a_CR)
    else:
        N = integrate.odeint(dN_dt,
                             N_end,
                             t,
                             args=(K, r_res, r_chal, a_RC, a_CR),
                             Dfun=d2N_dt2)

    return (t, N)
コード例 #14
0
 def simulate(self, initial_state: Union[float, list, np.array],
              initial_time: float, final_time: float,
              t_delta_integration: float) -> Tuple[np.array, np.array]:
     """
     Integrate the system using an sdeint built-in SDE solver.
     :param initial_state: initial state of the system;
     :param initial_time: initial time of the simulation;
     :param final_time: final time of the simulation;
     :param t_delta_integration: time between integration intervals.
     :return: a numpy array with dimensions [n_times, self.dim].
     """
     f = self._build_f()
     g = self._build_g()
     t_int = np.arange(initial_time, final_time, t_delta_integration)
     system_int = sdeint.itoint(f, g, initial_state, t_int)
     return system_int.T, t_int.reshape(-1, 1)
    def simulate(self, method="BDF", atol=1.e-5, rtol=1.e-6, G=None):

        if not self.initial_eq:
            print("Equations are not initialised.")
            return

        if not self.initial_state:
            print("Initial conditions are missing.")
            return

        # set up ode system
        def ode_fun(t, y):

            r = y[0]
            dr = y[1]
            Q = y[2:]

            M = self.pms_sym.M_mod(r, dr, Q)
            F = self.pms_sym.F_mod(r, dr, Q)
            v = self.pms_sym.v_mod(r, dr, Q)

            return np.concatenate([np.array([dr, F / M]), v])

        if G is not None:
            if self.t_eval is None:
                self.t_eval = np.linspace(0, self.t_end, 1000)

            res = si.itoint(lambda y, t: ode_fun(t, y), G, self.y0,
                            self.t_eval)

            self.sol = lambda: 0
            self.sol.message = "SDE integrator finished."
            self.sol.y = res.T
            self.sol.t = self.t_eval

        else:
            self.sol = solve_ivp(ode_fun, [0, self.t_end],
                                 self.y0,
                                 method=method,
                                 atol=atol,
                                 rtol=rtol,
                                 t_eval=self.t_eval)

        self.r = self.sol.y[0, :]
        self.dr = self.sol.y[1, :]
        self.Q = self.sol.y[2:, :]
コード例 #16
0
def kuramoto(t, N, omega=10, K=3, sigma=0.01, dt=1e-3):
    """Simulate a Kuramoto model."""

    times = np.linspace(0, t, int(t / dt))

    omegas = np.repeat(omega, N)
    theta0 = np.random.uniform(-np.pi * 2, np.pi * 2, size=N)

    # Define the model
    def _f(theta, t):
        # In classic kuramoto...
        # each oscillator gets the same wieght K
        # normalized by the number of oscillators
        c = K / N

        # and all oscillators are connected to all
        # oscillators
        theta = np.atleast_2d(theta)  # opt for broadcasting
        W = np.sum(np.sin(theta - theta.T), 1)

        ep = np.random.normal(0, sigma, N)

        return omega + ep + (c * W)

    # And the Ito noise term
    def _G(_, t):
        return np.diag(np.ones(N) * sigma)

    # Integrate!
    thetas = itoint(_f, _G, theta0, times)
    thetas = np.mod(thetas, 2 * np.pi)
    thetas -= np.pi

    # Move to time domain from the unit circle
    waves = []
    for n in range(N):
        th = thetas[:, n]
        f = omegas[n]

        wave = np.sin(f * 2 * np.pi * times + th)
        waves.append(wave)
    waves = np.vstack(waves)

    return waves
コード例 #17
0
def simulate(theta0, T, omegas, K, N, sigma, p, dt):
    """Simulate a Kuramoto model."""

    times = np.linspace(0, T, int(T / dt))

    def G(_, t):
        return np.diag(np.ones(N) * sigma)

    if np.allclose(p, 1):

        def f(theta, t):
            return kuramoto(theta, t, omegas, K, N, sigma)
    else:

        def f(theta, t):
            return onoff(theta, t, omegas, K, N, sigma, p)

    thetas = itoint(f, G, theta0, times)
    thetas = np.mod(thetas, 2 * np.pi)
    thetas -= np.pi

    return thetas, times
コード例 #18
0
def multiple_driving_functions(x0: RealNumber,
                               t: RealNumber,
                               kappa: RealNumber = 1.0) -> List[RealNumber]:
    #number of slits: it will be the number of starting points
    nslits = len(x0)
    #nslits independent Brownian motions with scaling factor kappa
    B = np.diag(np.zeros(nslits, dtype=np.complex) + np.sqrt(kappa / nslits))

    #defining the equation...
    def f(x, t):
        res = np.zeros(nslits, dtype=np.complex)
        for k in range(0, nslits):
            for m in range(0, nslits):
                if k != m:
                    res[k] += 2 / (
                        x[k] - x[m]
                    )  #res = [ res[k]+2/(x[k]-x[m]) for k in range(0, Nslits) for m in range(0, Nslits, 1) if k!=m
        return res

    #solving the SDE
    result = sdeint.itoint(f, lambda x, t: B, x0, t)
    #returning the nslits driving functions
    return [list(i) for i in zip(*result)]
コード例 #19
0
    def solve(self, **kwargs):
        """ Solves initial value problem """

        if (len(self.sol) == 0):
            # Solve only if not already solved
            if(self.method == 'EuMa'):
                self.sol_cartesian = sdeint.itoEuler(self.f, self.G, self.y0, self.ts, **kwargs)

            elif (self.method == 'Ito'):
                self.sol_cartesian = sdeint.itoint(self.f, self.G, self.y0, self.ts, **kwargs)

            elif (self.method == 'Strato'):
                self.sol_cartesian = sdeint.stratint(self.f, self.G, self.y0, self.ts, **kwargs)

            else:
                raise ValueError('Only supported methods are EuMa, Ito and Strato')
        else:
            # Do nothing
            pass

        if self.topology == 'cartesian':
            self.sol = self.sol_cartesian
        elif self.topology == 'torus':
            self.sol = np.mod(self.sol_cartesian, 2*np.pi)
コード例 #20
0
import numpy as np
import sdeint

A = np.array([[0., -0.], [0., -0.]], dtype=np.complex128)

B = np.hstack([np.diag([1.0, 1.0]), np.diag([1.0j, 1.0j])
               ])  # diagonal, so independent driving Wiener processes

tspan = np.linspace(0.0, 100.0, 10001)
x0 = np.array([0., 0.], dtype=np.complex128)


def f(x, t):
    return A.dot(x)


def G(x, t):
    return B


result = sdeint.itoint(f, G, x0, tspan)
print(result)
コード例 #21
0
ファイル: langevin.py プロジェクト: sevenseank/sdexplore
tspan = N / fs
#t = np.linspace(0.0, 200.0, 10001)
t = np.linspace(0.0, tspan, N + 1)
x0 = 0.1


def f_regular(x, t):
    return a * x - b * x**3 + A * np.cos(omega * t + phi)
    #return A*np.cos(omega*t+phi)


def f_noise(x, t):
    return sigma


result = sdeint.itoint(f_regular, f_noise, x0, t)

x = result[:, 0]

spectrum = np.abs(np.fft.fft(x))**2
#spectrum = np.log(spectrum)
time_step = t[1] - t[0]
freqs = np.fft.fftfreq(x.size, time_step)
#idx = np.argsort(freqs)
idx = np.arange(N / 2)

plt.subplot(3, 1, 1)
plt.plot(t, x)
plt.subplot(3, 1, 2)
plt.plot(freqs[idx], spectrum[idx])
plt.subplot(3, 1, 3)
コード例 #22
0
ファイル: load_models.py プロジェクト: ddarmon/simESN
def load_model_data_io(model_name, N, ds_by = None, dim = None):
	if model_name == 'starx':
		p_true = (1, 2)
		model_type = 'nlar'

		burnin = 100

		Ntot = burnin + N

		c = 0.8
		d = 1
		a1 = 2
		a0 = 1
		b1 = 0.5
		b0 = -0.5
		s = 1

		Y = numpy.zeros(Ntot)
		X = numpy.zeros(Ntot)

		epsX = numpy.random.randn(Ntot)
		epsY = numpy.random.randn(Ntot)

		for t in range(1, Ntot):
			w = norm.cdf(Y[t-1])

			Y[t] = c*Y[t-1] + d*epsX[t]
			X[t] = w*(b1*X[t-1] + a1*epsY[t]) + (1-w)*(b0*X[t-1] + a0*epsY[t])

		Y = Y[burnin:]
		X = X[burnin:]

	if model_name == 'shenon':
		p_true = (2, 2)
		model_type = 'nlar'

		burnin = 100

		Ntot = burnin + N

		# C = 0.6 # Used in STE paper.
		C = 0.2
		# C = 0.01

		# s = 0.004 # Used in STE paper
		# s = 2*0.004

		sY = 0.008
		sX = 0.008

		Y = numpy.zeros((2, Ntot))
		X = numpy.zeros((2, Ntot))

		noise = numpy.random.randn(Ntot*4).reshape(4, -1)

		noise[:2, :] = noise[0:2, :]*sX
		noise[2:, :] = noise[0:2, :]*sY

		for t in range(1, Ntot):
			Y[0, t] = 1.4 - Y[0, t-1]**2 + 0.3*Y[1, t-1] + noise[0, t]
			Y[1, t] = Y[0, t-1] + noise[1, t]

			X[0, t] = 1.4 - (C*Y[0, t-1] + (1-C)*X[0, t-1])*X[0, t-1] + 0.3*X[1, t-1] + noise[2, t]
			X[1, t] = X[0, t-1] + noise[3, t]

			Z = numpy.concatenate((Y[:, t], X[:, t]))
			Znorm = numpy.linalg.norm(Z)

			if Znorm >= 4:
				J = int(numpy.random.choice(1000))

				Y[:, t] = Y[:, J]
				X[:, t] = X[:, J]

		Y = Y[0, burnin:]
		X = X[0, burnin:]

	if 'lorenz96' in model_name:
		model_type = 'nlar'
		p_true = numpy.inf
		# h = 0.01
		h = 0.05

		if dim == None:
			dim = 47

		if ds_by == None:
			# ds_by = 5
			ds_by = 2

		ttot = N*h*ds_by
		tburn = 100
		tf = ttot + tburn

		tspan = numpy.linspace(0.0, tf, int(tf/h))
		x0 = numpy.random.rand(dim)

		def F(X, t):
			d = len(X)
			dX = numpy.zeros(d)

			for k in range(d):
				dX[k] = (X[(k + 1) % d] - X[(k - 2) % d])*X[(k - 1) % d] - X[k] + 5

			return dX

		def G(x, t):
			return B

		# dyn_noise = 0
		dyn_noise = 0.5

		B = numpy.diag([dyn_noise]*dim)

		N_biggest = 1000

		if N > N_biggest:
			num_segments = int(N / N_biggest)+1
			tp_per_segment = int(N_biggest*ds_by)
			for seg_ind in range(num_segments):
				# print("On segment {} of {}...".format(seg_ind+1, num_segments))
				result_cur = sdeint.itoint(F, G, x0, tspan[seg_ind*tp_per_segment:(seg_ind+1)*tp_per_segment])

				if seg_ind == 0:
					result = result_cur
				else:
					result = numpy.concatenate((result, result_cur))

				x0 = result[-1, :]
		else:
			result = sdeint.itoint(F, G, x0, tspan)		

		tspan = tspan[int(tburn/h)::ds_by]
		result = result[int(tburn/h)::ds_by, :]

		Y = result[:, 0]
		X = result[:, 1]


	return Y, X, p_true, model_type
コード例 #23
0
ファイル: load_models.py プロジェクト: ddarmon/simESN
def load_model_data(model_name, N, ds_by = None, dt = None):
	"""
	Generate time series from various model systems, including:
	
		arch
		(s)logistic
		(s)henon
		(s)lorenz
		(s)rossler
		(s)tent
		setar

	where the 's' prefix indicates a stochastic 
	difference / differential equation  version of the standard
	deterministic model.


	Parameters
	----------
	model_name : str
			The name of the model to use. See function description
			for list of available models.

	N : int
			The number of time points to simulate from the model
			system.

	ds_by : int
			Specify the amount to downsample a continuous-time
			time series by. By default, use the hard coded values.

	Returns
	-------
	x : numpy.array
			The time series.
	p_true : int
			The true model order of the system. If not
			a Markov model, p_true = numpy.inf
	model_type : str
			The model type.

	"""

	if model_name == 'arch':
		# Simulate from an ARCH(1) model:
		model_type = 'arch'
		p_true = 1
		x = numpy.zeros(N)
		u = numpy.random.randn(N)

		x[:1] = u[:1]

		for n in range(1, x.shape[0]):
			x[n] = numpy.sqrt(0.5 + 0.5*x[n-1]**2)*u[n]

		# Simulate from an ARCH(2) model:
		# p_true = 2

		# x = numpy.zeros(N)
		# u = numpy.random.randn(N)

		# x[:2] = u[:2]

		# for n in range(2, x.shape[0]):
		# 	x[n] = numpy.sqrt(0.5 + 0.5*x[n-1]**2 + 0.5*x[n-2]**2)*u[n]

	if model_name == 'slogistic' or model_name == 'logistic':
		# NLAR(1) model from:
		# 
		# **Estimation of conditional densities and sensitivity measures in nonlinear dynamical systems**
		model_type = 'nlar'
		p_true = 1
		x = numpy.zeros(N)

		if model_name == 'slogistic':
			noise = (numpy.random.rand(N, 48) - 0.5)/2./16.
			noise = noise.sum(1)
		else:
			noise = numpy.zeros(N)

		x[0] = numpy.random.rand(1)
		for t in range(1, x.shape[0]):
			x[t] = 3.68*x[t-1]*(1 - x[t-1]) + 0.4*noise[t]

			if x[t] <= 0 or x[t] >= 1:
				x[t] = numpy.random.rand(1)

	if model_name == 'shenon' or model_name == 'henon':
		# NLAR(2) model from:
		# 
		# **On prediction and chaos in stochastic systems**
		#
		# Namely, stochastic Henon.
		#
		# Also on page 188 (dp 200) of *Chaos: A Statistical Perpsective*.
		model_type = 'nlar'
		p_true = 2

		burnin = 1000

		x = numpy.zeros(N + burnin)

		if model_name == 'shenon':
			# From original paper:
			noise = (numpy.random.rand(N + burnin, 48) - 0.5)/2.
			noise = noise.sum(1)
		else:
			noise = numpy.zeros(N + burnin)

		x[:2] = 10*numpy.random.rand(1) - 5
		for t in range(2, x.shape[0]):
			x[t] = 6.8 - 0.19*x[t-1]**2 + 0.28*x[t-2] + 0.2*noise[t]

			if x[t] < -10:
				x[t] = 10*numpy.random.rand(1) - 5

		x_w_burnin = x.copy()
		x = x[burnin:]

	if model_name == 'stent' or model_name == 'tent':
		# Tent map with noise as from the logistic map.
		# This is also a SETAR(1; 1, 1) model, in the
		# vein of Tong.

		model_type = 'nlar'
		p_true = 1

		x = numpy.zeros(N)

		if model_name == 'stent':
			noise = (numpy.random.rand(N, 48) - 0.5)/2./16.
			noise = noise.sum(1)
		else:
			noise = numpy.zeros(N)

		# No noise:
		# noise = numpy.zeros(N)

		x[0] = numpy.random.rand(1)
		for t in range(1, x.shape[0]):
			if x[t-1] < 0.5:
				x[t] = 3.68/2.*x[t-1] + 0.4*noise[t]
			else:
				x[t] = 3.68/2. - 3.68/2.*x[t-1] + 0.4*noise[t]

			if x[t] <= 0:
			   x[t] = numpy.random.rand(1)

	if model_name == 'setar':
		model_type = 'setar'

		# Simulate from a SETAR(2; 1, 1) model:

		# p_true = 1

		# x = numpy.zeros(N)
		# u = numpy.random.randn(N)

		# x[:1] = u[:1]

		# for n in range(1, x.shape[0]):
		# 	# From "Sieve bootstrap for time series" by Buhlmann in Bernoulli
		# 	if x[n-1] <= 0:
		# 		x[n] = 1.5 - 0.9*x[n-1] + u[n]
		# 	else:
		# 		x[n] = -0.4 - 0.6*x[n-1] + u[n]

			# if x[n-1] <= 0:
			# 	x[n] = 1.5 - 0.9*x[n-1] + 0.5*u[n]
			# else:
			# 	x[n] = -0.4 - 0.6*x[n-1] + u[n]

			# if x[n-1] <= 0:
			# 	x[n] = 1.5 - 0.9*x[n-1] + 0.5*u[n]
			# else:
			# 	x[n] = -0.4 + 0.1*x[n-1] + u[n]

		# Simulate from a SETAR(2; 2, 2) model:
		# p_true = 2
		# x = numpy.zeros(N)
		# u = numpy.random.randn(N)

		# x[:2] = u[:2]

		# for n in range(2, x.shape[0]):
		# 	if x[n-1] <= 0:
		# 		x[n] = 1.5 - 0.9*x[n-1] + 0.2*x[n-2] + u[n]
		# 	else:
		# 		x[n] = -0.4 - 0.6*x[n-1] + 0.1*x[n-2] + u[n]

		# Simulate from a SETAR(2; 2, 2) model used to model 
		# the Canadian Lynx data from Tong's *Non-linear 
		# Time Series*, p. 178:
		p_true = 2
		x = numpy.zeros(N)
		u = numpy.random.randn(N)

		x[:2] = 2*numpy.random.rand(2) + 2

		for n in range(2, x.shape[0]):
		  if x[n-2] <= 3.25:
			  x[n] = 0.62 + 1.25*x[n-1] - 0.43*x[n-2] + numpy.sqrt(0.0381)*u[n]
		  else:
			  x[n] = 2.25 + 1.52*x[n-1] - 1.24*x[n-2] + numpy.sqrt(0.0626)*u[n]



	if 'lorenz' in model_name:
		model_type = 'nlar'

		p_true = 4

		if dt == None:
			h = 0.05
		else:
			h = dt

		if ds_by == None:
			ds_by = 2

		ttot = N*h*ds_by
		tburn = 20
		tf = ttot + tburn

		tspan = numpy.linspace(0.0, tf, int(tf/h))
		x0 = numpy.array([1.0, 1.0, 1.0])

		params = [10., 28., 8./3.] # The parameters [s, r, b] for the canonical Lorenz equations

		def F(X, t):
			s = params[0]
			r = params[1]
			b = params[2]

			x = X[0]
			y = X[1]
			z = X[2]

			dX = numpy.array([s*(y - x), x*(r - z) - y, x*y - b*z])

			return dX

		def G(x, t):
			return B

		dim = 3

		if model_name == 'slorenz':
			dyn_noise = 2
		else:
			dyn_noise = 0

		B = numpy.diag([dyn_noise]*dim)

		result = sdeint.itoint(F, G, x0, tspan)

		tspan = tspan[int(tburn/h)::ds_by]
		result = result[int(tburn/h)::ds_by, :]

		x = result[:, 0]

	if 'rossler' in model_name:
		model_type = 'nlar'

		p_true = 4

		h = 0.05

		if ds_by == None:
			ds_by = 5

		ttot = N*h*ds_by
		tburn = 100
		tf = ttot + tburn

		tspan = numpy.linspace(0.0, tf, int(tf/h))
		x0 = numpy.array([1.0, 1.0, 1.0])

		params = [0.1, 0.1, 14]

		def F(X, t):
			a = params[0]
			b = params[1]
			c = params[2]

			x = X[0]
			y = X[1]
			z = X[2]

			dX = numpy.array([-y - z, x + a*y, b + z*(x - c)])

			return dX

		def G(x, t):
			return B

		dim = 3

		if model_name == 'srossler':
			dyn_noise = 0.1
		else:
			dyn_noise = 0.0

		B = numpy.diag([dyn_noise]*dim)
		B[2, 2] = 0

		result = sdeint.itoint(F, G, x0, tspan)

		tspan = tspan[int(tburn/h)::ds_by]
		result = result[int(tburn/h)::ds_by, :]

		x = result[:, 0]
		y = result[:, 1]
		z = result[:, 2]

	if 'loriei' in model_name:
		model_type = 'nlar'

		p_true = 4

		# dt = 0.005 # Used in spenra paper
		dt = 0.05

		N_sim = int(N*2/dt)

		# To generate 

		threshold_val = 50

		if model_name == 'loriei':
			model_name = 'lorenz'
		else:
			model_name = 'slorenz'

		x, p_true, model_type = load_model_data(model_name, N_sim, ds_by = 1, dt = dt)

		time = numpy.linspace(0, N_sim*dt, num = N_sim)

		s = x + 25

		S = numpy.cumsum(s)*dt

		crossing_times = []

		for iei_ind in range(1, N):
			cur_thresh = iei_ind*threshold_val

			where_out = numpy.where((S - cur_thresh > 0))

			try:
				cross_ind = where_out[0][0]

				cross_time = time[cross_ind]

				# plt.axhline(cur_thresh)
				# plt.axvline(cross_time)

				y0 = S[cross_ind - 1]; y1 = S[cross_ind]
				x0 = time[cross_ind - 1]; x1 = time[cross_ind]

				m = (y1 - y0)/float(x1 - x0)

				xc = (cur_thresh - y0)/m + x0

				# plt.plot(xc, cur_thresh, '.', color = 'green')

				crossing_times.append(xc)
			except:
				break

		iei = numpy.diff(crossing_times)
		x = iei

	if model_name == 'snanopore':
		a=1.
		b=1.
		c=1.
		km=5.
		kp=1.
		gamx=1.
		gamy=100.
		alpha=0.1
		beta=0.1

		model_type = 'nlar'

		p_true = numpy.inf

		if ds_by == None:
			ds_by = 2

		delta_t=0.25
		T_final = ds_by*N*delta_t

		M = int(T_final/delta_t + 1)

		tspan = numpy.linspace(0.0, T_final, M)
		x0 = numpy.array([1.0, -0.5])

		def f(z, t):
		#z=[x,y]
			dx=-(1/gamx)*(a*numpy.power(z[0],3)-b*z[0]+c*z[1])
			if z[0]<0:
				H=0
			else:
				H=1
			if z[0]>0:
				Hm=0
			else:
				Hm=1
			dy=(1/gamy)*(kp*H-km*Hm)
			return numpy.array([dx,dy]) 

		def G(z, t):
			B=numpy.diag([alpha,beta])
			return B

		print('Solving SDE...')

		result = sdeint.itoint(f, G, x0, tspan)

		x = result[::ds_by, 0]

	if model_name == 'shadow_crash':
		p_true = 4
		model_type = 'nlar'

		h = 0.05

		if ds_by == None:
			ds_by = 2

		ttot = N*h*ds_by
		tburn = 20
		tf = ttot + tburn

		tspan = numpy.linspace(0.0, tf, int(tf/h))
		x0 = numpy.array([1.0, 1.0])

		def F(X, t):
			b = 0.42
			g = -0.04
			
			x = X[0]
			z = X[1]
			
			dX = numpy.array([x - x**2*numpy.exp(-b*x*z), z - z**2*numpy.exp(-g*x)])

			return dX

		def G(X, t):
			# B = numpy.diag([0.4, 0.01])
			B = numpy.diag([0.2, 0.01])

			x = X[0]
			z = X[1]

			B[0, 0] = x*B[0, 0]
			B[1, 1] = z*B[1, 1]
			
			return B

		dim = 2

		result = sdeint.itoint(F, G, x0, tspan)

		tspan = tspan[int(tburn/h)::ds_by]
		result = result[int(tburn/h)::ds_by, :]

		x = result[:, 0]

	return x, p_true, model_type
コード例 #24
0
ファイル: Risk.py プロジェクト: davideflo/Python_code
        dS = dW*dt
        S[i] = S[i-1] + dS
    return S
###############################################################################

plt.figure()
plt.plot(Wiener(0,1,n,T))

def mu(x,t):
    return x**2

def SIG(x, t):
    return np.sqrt(x)

tspan = np.linspace(0.0, 1, 10001)
res = sdeint.itoint(mu, SIG, 1, tspan)

plt.figure()
plt.plot(tspan,res.ravel())

def mu2(x,t):
    return x + 2
def sig2(x,t):
    return np.sin(x)

plt.figure()
tspan2 = np.linspace(0.0, 1, 10001)
for i in range(10):
    plt.plot(tspan2, sdeint.itoint(mu2, sig2, 0, tspan2))
    
def ForwardKolmogorovEquation(drift, diffusion):
コード例 #25
0
            X[s] * (b[s] - np.sum(np.dot(A, X)[0, s]))
            for s in range(0, len(X))
        ])
        return dydt

    def u(X, t):
        dydt = np.array([
            1. / np.sqrt(SS) * np.sqrt(X[s] *
                                       (b[s] + np.sum(np.dot(A, X)[0, s])))
            for s in range(0, len(X))
        ])
        return np.diag(dydt)

    x0 = np.array(np.repeat(1., d))
    for i in range(0, realization):
        stochastic_result = sdeint.itoint(f, u, x0, t)

        for cl in range(0, d):
            clean_ = np.nan_to_num(stochastic_result[:, cl])
            if min(clean_) <= 0:
                ext[i] = 1

    expected_transition.append(np.sum(ext) / len(ext))
    FeasibilitySize.append(SizeFeasibilityDomain(A, normalization))
    k = k + 0.002
    print SizeFeasibilityDomain(A, normalization), np.sum(ext) / len(ext)

nome_file = 'TransitionProbability_in_%i' % d + 'd.txt'
s = open(nome_file, 'w')
[
    s.write('%f %f\n' % (FeasibilitySize[i], expected_transition[i]))
コード例 #26
0
 def return_average_utility_for_node3(self,time_vector=np.linspace(0, 10, 10000)):
     soln = sdeint.itoint(self.rate_of_experience, self.noise, self.experiences_of_choices, time_vector)
     return np.average(np.average(self.utility_gained_at_node_three,0))
コード例 #27
0
ファイル: core.py プロジェクト: zeochoy/resim
    def _simulate(self):
        """Private, simulate sde

        Returns:
            dict of pandas data frame with key 'cells', 'drugs', 'fht'.
        """
        def f(x, t):
            if x[0] < 1e-8: x[0] = 0
            if x[1] < 1e-8: x[1] = 0
            if x[2] < 1e-8: x[2] = 0
            if x[3] < 1e-8: x[3] = 0
            if x[4] < 0: x[4] = 0

            if self._dose == 0:
                cs_dt = lambda x, t: (self._growth(self.grs, x) - self._quiescere_s(self.ksq, x[4]) - self._transit(self.kspr)) * x[0] + self._transit(self.kprs) * x[1] + self._transit(self.kqs) * x[3]
                cpr_dt = lambda x, t: (self._growth(self.grpr, x) - self._quiescere_r(self.kprq) - self._transit(self.kprs)) * x[1] + self._transit(self.kspr) * x[0] + self._transit(self.kqpr) * x[3]
                car_dt = lambda x, t: 0
                cq_dt = lambda x, t: -(self._transit(self.kqs) + self._transit(self.kqpr)) * x[3] + self._quiescere_s(self.ksq, x[4]) * x[0] + self._quiescere_r(self.kprq) * x[1]
                d_dt = lambda x, t: 0
            else:
                cs_dt = lambda x, t: (self._growth(self.grs, x) - self._quiescere_s(self.ksq, x[4]) - self._transit(self.ksar) - self._death(x[4])) * x[0] + self._transit(self.kprs) * x[1] + self._transit(self.kars) * x[2] + self._transit(self.kqs) * x[3]
                cpr_dt = lambda x, t: (self._growth(self.grpr, x) - self._quiescere_r(self.kprq) - self._transit(self.kprs)) * x[1]
                car_dt = lambda x, t: (self._growth(self.grar, x) - self._quiescere_r(self.karq) - self._transit(self.kars)) * x[2] + self._transit(self.ksar) * x[0] + self._transit(self.kqar) * x[3]
                cq_dt = lambda x, t: -(self._transit(self.kqs) + self._transit(self.kqar)) * x[3] + self._quiescere_s(self.ksq, x[4]) * x[0] + self._quiescere_r(self.kprq) * x[1] + self._quiescere_r(self.karq) * x[2]
                d_dt = lambda x, t: self._dose - self.ke * x[4]
            a = np.array([cs_dt(x,t), cpr_dt(x,t), car_dt(x,t), cq_dt(x,t), d_dt(x,t)])
            return a

        def g(x, t):
            difu = np.diag(np.repeat(self._sig, len(self.init)))
            return difu * x

        cth = self._get_cth()
        dfcell = pd.DataFrame()
        dfdrug = pd.DataFrame()
        fht = []

        for i in range(self.n):
            itg = sdeint.itoint(f, g, self.init, self._tspan)
            tdf = pd.DataFrame(itg)
            tdf = tdf.drop(tdf.index[len(tdf)-1])
            tdfdrug = pd.DataFrame(tdf.iloc[:,-1])
            tdfdrug[len(tdfdrug.columns)] = i
            tdfdrug[len(tdfdrug.columns)] = pd.Series(self._tspan)
            dfdrug = dfdrug.append(tdfdrug)
            tdfcell = tdf.iloc[:,0:4]
            tdfcell[len(tdfcell.columns)] = tdfcell.sum(axis=1)
            tdfcell[len(tdfcell.columns)] = i
            tdfcell[len(tdfcell.columns)] = pd.Series(self._tspan)
            dfcell = dfcell.append(tdfcell)
            tdfcellsum = tdfcell.iloc[:,4]
            if tdfcellsum.iloc[-1] < cth:
                tfht = np.nan
            else:
                tfht = [n for n, j in enumerate(tdfcell[4]) if j > cth][0]
            fht.append(tfht)

        dfcell.columns = ['sensitive', 'primary resistant', 'acquired resistant', 'quiescent', 'total', 'n', 'days']
        dfdrug.columns = ['drug conc', 'n', 'days']
        dfs = {'cells':dfcell, 'drugs':dfdrug, 'fht':fht}
        return dfs
コード例 #28
0
#b = 0.8
tspan = np.linspace(0.0, 600 * np.pi, 50001)
#tspan = np.linspace(0.0, 60*np.pi, 5001)
omega = 1.0
X0 = np.array([1.0, 0.])


def f(X, t):
    x, v = X
    dxdt = v
    dvdt = -omega**2 * x
    #return np.zeros(2)
    return np.array([dxdt, dvdt])


def g(X, t):
    #return np.diag([0., A])
    return np.diag([A, A])


A = float(input("A: "))
result = sdeint.itoint(f, g, X0, tspan)
#result = odeint(f, X0, tspan)

x = result[:, 0]
v = result[:, 1]

plt.plot(tspan, x)
plt.plot(tspan, v)
plt.show()
コード例 #29
0
ファイル: kuramoto_local.py プロジェクト: razib764/pyPTE
omega = np.ones_like(theta0) * 5
# omega = np.linspace(0, np.pi*2, N)

K = np.zeros((N, N))
print(K)
K[:, :] = 0
K[2, 3] = 0

dt = 0.01
t_end = 1
steps = t_end / dt
# K[:,3] = 100
f, G = kuramoto(omega, K, N, 0.01)
from sdeint import itoint
tspan = np.linspace(0, t_end, steps)
solution = itoint(f, G, theta0, tspan)
from matplotlib import pyplot as plt
plt.figure(1)
plt.subplot(211)
plt.plot(tspan, solution)

solution = np.mod(solution, 2 * np.pi)
solution -= np.pi
plt.subplot(212)
plt.plot(tspan, solution)
plt.show()

import itertools

dt = 0.1
t_end = 10
コード例 #30
0
ファイル: bslr.py プロジェクト: Veggente/one-shot-sampling
def eval_bslr_on_locke(sampling_times,
                       num_cond,
                       num_rep,
                       one_shot,
                       sigma_co,
                       sigma_bi,
                       write_file,
                       rand_seed=0,
                       sig_level=0.05,
                       output='',
                       num_integration_interval=100,
                       max_in_deg=3,
                       rep_avg=True,
                       diffusion_type='linear'):
    """Evaluate BSLR using network in Locke et al. MSB 2005.

    One environmental condition is modeled by the same set of
    initial conditions (values) of the gene expression levels,
    as well as the same condition-dependent nominal production
    variations.

    Args:
        sampling_times: array
            Sampling times as evenly spaced nonnegative
            numbers in an increasing order.
        num_cond: int
            Number of conditions.
        num_rep: int
            Number of replicates per single time.
        one_shot: bool
            True if one-shot, False if multi-shot.
        sigma_co: float
            Condition-dependent production variation level.
        sigma_bi: float
            Biological production variation level.
        write_file: bool
            Writes xml file if True.  Returns the adjacency
            matrix if False.
        rand_seed: int
            Random number generator seed.
        sig_level: float
            Significance level.
        output: str
            Output filename.
        num_integration_interval: int
            Number of integration intervals for the Ito
            integral, evenly spaced over
            [0, sampling_times[-1]].
        max_in_deg: int
            Maximum in-degree used in BSLR.
        rep_avg: bool
            Do replicate averaging if True.  Otherwise take
            replicates as different conditions.
        diffusion_type: str
            Diffusion type.  Can be 'linear' or
            'michaelis-menten'.

    Returns:
        Saves graph file or return adjacency matrix.
    """
    # Create a shallow copy of the default parameters.
    param_test = get_locke_params()
    param_test['sigma_co'] = sigma_co * np.ones((3, 4))
    param_test['sigma_bi'] = sigma_bi * np.ones((3, 4))
    param_test['diffusion_type'] = diffusion_type
    np.random.seed(rand_seed)
    # Generate data file.
    num_time = len(sampling_times)
    num_genes = 4
    mrna = np.empty((num_genes, num_cond * num_rep * num_time))
    tspan = np.linspace(0, sampling_times[-1], num_integration_interval + 1)
    if one_shot:
        num_rep_per_traj = num_rep * num_time
    else:
        num_rep_per_traj = num_rep
    for idx_cond in range(num_cond):
        # Generate the same 12-dimensional initial
        # conditions for all replicates.
        exp_init_per_rep = np.random.rand(12)
        exp_init = np.empty(12 * num_rep_per_traj)
        for idx_rep in range(num_rep_per_traj):
            exp_init[idx_rep +
                     np.arange(12) * num_rep_per_traj] = (exp_init_per_rep)
        # Entire solution over the fine tspan as a T-by-12R
        # matrix, where T = len(tspan) and R = num_rep_per_traj.
        exp_sol = sdeint.itoint(close(locke_drift, param_test),
                                close(diff_coeff, param_test), exp_init, tspan)
        # Sampled expression levels at the coarse times,
        # approximated by the closest time in tspan.
        exp_sampled = exp_sol[[
            int(round(x)) for x in np.asarray(sampling_times) /
            sampling_times[-1] * num_integration_interval
        ], :]
        # Reshape the array.
        for i in range(num_genes):
            for j in range(num_time):
                start = idx_cond * num_rep * num_time + j * num_rep
                if one_shot:
                    mrna[i, start:start + num_rep] = (
                        exp_sampled[j, i * num_rep_per_traj +
                                    j * num_rep:i * num_rep_per_traj +
                                    (j + 1) * num_rep])
                else:
                    mrna[i, start:start +
                         num_rep] = (exp_sampled[j, i * num_rep:(i + 1) *
                                                 num_rep])
    sample_ids = [
        'c{}_t{}_r{}'.format(k, i, j) for k in range(num_cond)
        for i in range(num_time) for j in range(num_rep)
    ]
    mrna_df = pd.DataFrame(data=mrna,
                           columns=sample_ids,
                           index=['G1', 'G2', 'G3', 'G4'])
    mrna_df.to_csv('exp-locke.csv')
    # Generate gene list file.
    np.savetxt('gene-list-locke.csv',
               [['G1', 'LHY'], ['G2', 'TOC1'], ['G3', 'X'], ['G4', 'Y']],
               fmt='%s',
               delimiter=',')
    # Generate condition list file.
    num_rep_alg = num_rep
    num_cond_alg = num_cond
    if rep_avg:
        conditions = list(range(num_cond))
    else:
        num_rep_alg = 1
        num_cond_alg = num_cond * num_rep
        conditions = list(range(num_cond_alg))
    json.dump([conditions, list(range(num_time))],
              open('cond-locke.json', 'w'),
              indent=4)
    # Generate design file.
    samples_df = pd.DataFrame(data=sample_ids)
    if rep_avg:
        samples_df['cond'] = samples_df[0].apply(lambda x: x.split('_')[0][1:])
    else:
        samples_df['cond'] = samples_df[0].apply(lambda x: int(
            x.split('_')[0][1:]) * num_rep + int(x.split('_')[2][1:]))
    samples_df['time'] = samples_df[0].apply(lambda x: x.split('_')[1][1:])
    samples_df.to_csv('design-locke.csv', header=False, index=False)

    if write_file:
        if not output:
            output = ('test-t{num_times}-c{num_cond}-bslr'
                      '-s{sig_level}-r{rand_seed}.xml'.format(
                          num_times=num_time,
                          sig_level=sig_level,
                          rand_seed=rand_seed,
                          num_cond=num_cond_alg))
        # Run BSLR.
        causnet.main('-c cond-locke.json '
                     '-i gene-list-locke.csv -g {output} '
                     '-x exp-locke.csv '
                     '-P design-locke.csv '
                     '-f {sig_level} '
                     '-m {max_in_deg}'.format(output=output,
                                              num_times=num_time,
                                              sig_level=sig_level,
                                              max_in_deg=max_in_deg).split())
        return
    else:
        parser_dict = causnet.load_parser('design-locke.csv')
        adj_mat_sign_rec = causnet.bslr(parser_dict, mrna_df, num_cond_alg,
                                        num_time, num_genes, num_rep_alg,
                                        max_in_deg, sig_level)
        return adj_mat_sign_rec
コード例 #31
0
ファイル: xjw.py プロジェクト: voytekresearch/pacological
    r0 = [8, 12.0]  # intial rates (re, ri)
    tmax = 1000  # run time, ms
    dt = .1  # resolution, ms

    # Stim params
    d = 1  # drive rate (want 0-1)
    scale = .01 * d
    Istim = create_I(tmax, d, scale, seed=42)

    # Simulate
    times = linspace(0, tmax, tmax / dt)
    rs0 = asarray(r0 * 8 + [0])

    f = partial(xjw, Je_e=3.0, Je_i=3.0, Ji_e=0.0, Ji_i=0.0, k1=0.9, k2=1.2)
    g = partial(ornstein_uhlenbeck, sigma=0.5, loc=[0, 1, 6, 7])  # re/i locs
    rs = itoint(f, g, rs0, times)

    # -------------------------------------
    # Select some interesting vars and plot
    t = times
    Is = rs[16]
    re1 = rs[:, 0]
    ri1 = rs[:, 1]
    re2 = rs[:, 6]
    ri2 = rs[:, 7]

    # 1
    plt.figure(figsize=(14, 10))
    plt.subplot(411)
    plt.plot(t, [Istim(x) for x in t], 'k', label='1: Stim')
    plt.legend(loc='best')
コード例 #32
0
def run_mc(params):

    ind = params[0]
    pressure = params[1]
    drive_freq = params[2]
    drive_voltage = params[3]
    drive_voltage_noise = params[4]
    drive_phase_noise = params[5]
    init_angle = params[6]
    discretized_phase = params[7]

    beta_rot = pressure * np.sqrt(m0) / kappa
    drive_amp = np.abs(bu.trap_efield([0, 0, 0, drive_voltage, -1.0*drive_voltage, \
                                       0, 0, 0], nsamp=1)[0])
    drive_amp_noise = drive_voltage_noise * (drive_amp / drive_voltage)

    seed = seed_init * (ind + 1)

    xi_0 = np.array([np.pi/2.0, 0.0, 0.0, \
                     0.0, 2.0*np.pi*drive_freq, 0.0])

    time_constant = Ibead / beta_rot

    np.random.seed(seed)

    ### If desired, set a thermalization time equal to 10x the time constant
    ### for this particular pressure and Ibead combination
    if variable_thermalization:
        t_therm = np.min([10.0 * time_constant, 300.0])
        nthermfiles = int(t_therm / out_file_length) + 1
    else:
        t_therm = user_t_therm
        nthermfiles = user_nthermfiles

    values_to_save = {}
    values_to_save['mbead'] = mbead
    values_to_save['Ibead'] = Ibead
    values_to_save['kappa'] = kappa
    values_to_save['beta_rot'] = beta_rot
    values_to_save['p0'] = p0
    values_to_save['fsamp'] = fsamp
    values_to_save['fsim'] = fsim
    values_to_save['seed'] = seed
    values_to_save['xi_0'] = xi_0
    values_to_save['init_angle'] = init_angle
    values_to_save['pressure'] = pressure
    values_to_save['m0'] = m0
    values_to_save['drive_freq'] = drive_freq
    values_to_save['drive_amp'] = drive_amp
    values_to_save['drive_amp_noise'] = drive_amp_noise
    values_to_save['drive_phase_noise'] = drive_phase_noise
    values_to_save['discretized_phase'] = discretized_phase
    values_to_save['t_therm'] = t_therm

    if not TEST:
        base_filename = os.path.join(base, 'mc_{:d}/'.format(ind))

        bu.make_all_pardirs(os.path.join(base_filename, 'derp.txt'))

        param_path = os.path.join(base_filename, 'params.p')
        pickle.dump(values_to_save, open(param_path, 'wb'))


    def E_phi_func(t, t_therm=0.0, init_angle=0.0):
        raw_val = 2.0 * np.pi * drive_freq * (t + t_therm) + init_angle
        if discretized_phase:
            n_disc = int(raw_val / discretized_phase)
            return n_disc * discretized_phase
        else:
            return raw_val

    ### Matrix for the stochastic driving processes
    torque_noise = np.sqrt(4.0 * kb * T * beta_rot)
    # B = np.array([[0, 0,   0,   0],
    #               [0, 0,   0,   0],
    #               [0, 0, 1.0,   0],
    #               [0, 0,   0, 1.0]])
    B = np.array([[0, 0, 0,   0,   0,   0],
                  [0, 0, 0,   0,   0,   0],
                  [0, 0, 0,   0,   0,   0],
                  [0, 0, 0, 1.0,   0,   0],
                  [0, 0, 0,   0, 1.0,   0],
                  [0, 0, 0,   0,   0, 1.0]])
    B *= torque_noise / Ibead

    ### Define the system such that d(xi) = f(xi, t) * dt
    # @jit()
    def f(x, t):
        torque_theta = drive_amp * p0 * np.sin(0.5 * np.pi - x[0]) \
                            - 1.0 * beta_rot * x[3]

        c_amp = drive_amp
        E_phi = E_phi_func(t)
        if fterm_noise:
            c_amp += drive_amp_noise * np.random.randn()
            E_phi += drive_phase_noise * np.random.randn()

        torque_phi = c_amp * p0 * np.sin(E_phi - x[1]) * np.sin(x[0]) \
                            - 1.0 * beta_rot * x[4]

        torque_psi = -1.0 * beta_rot * x[5]

        return np.array([x[3], x[4], x[5], \
                         torque_theta / Ibead, \
                         torque_phi / Ibead, \
                         torque_psi / Ibead])

    ### Define the stochastic portion of the system
    # @jit()
    def G(x, t):
        newB = np.zeros((6,6))

        if gterm_noise:
            E_phi = E_phi_func(t)
            amp_noise_term = drive_amp_noise * p0 * np.sin(E_phi - x[1]) * np.sin(x[0])

            E_phi_rand = drive_phase_noise * np.random.randn()
            phase_noise_term = drive_amp * p0  * np.sin(E_phi_rand) * np.sin(x[0])
            newB[4,4] += amp_noise_term + phase_noise_term

        return B + newB


    ### Thermalize
    xi_init = np.copy(xi_0)
    for i in range(nthermfiles):
        t0 = i*out_file_length
        tf = (i+1)*out_file_length

        nsim = int(out_file_length * fsim)
        tvec = np.linspace(t0, tf, nsim+1)

        result = sdeint.itoint(f, G, xi_init, tvec).T
        xi_init = np.copy(result[:,-1])


    ### Redefine the system taking into account the thermalization time
    ### and the desired phase offset
    # @jit()
    def f(x, t):
        torque_theta = drive_amp * p0 * np.sin(0.5 * np.pi - x[0]) \
                            - 1.0 * beta_rot * x[3]

        c_amp = drive_amp
        E_phi = E_phi_func(t, t_therm=t_therm, init_angle=init_angle)
        if fterm_noise:
            c_amp += drive_amp_noise * np.random.randn()
            E_phi += drive_phase_noise * np.random.randn()

        torque_phi = c_amp * p0 * np.sin(E_phi - x[1]) * np.sin(x[0]) \
                            - 1.0 * beta_rot * x[4]

        torque_psi = -1.0 * beta_rot * x[5]

        return np.array([x[3], x[4], x[5], \
                         torque_theta / Ibead, \
                         torque_phi / Ibead, \
                         torque_psi / Ibead])


    # @jit()
    # def f(x, t):
    #     torque_theta = - 1.0 * beta_rot * x[2]
    #     torque_phi = - 1.0 * beta_rot * x[3]

    #     return np.array([x[2], x[3], torque_theta / Ibead, torque_phi / Ibead])

    ### Define the stochastic portion of the system
    def G(x, t):
        newB = np.zeros((6,6))
        if gterm_noise:
            E_phi = E_phi_func(t, t_therm=t_therm, init_angle=init_angle)
            amp_noise_term = drive_amp_noise * p0 * np.sin(E_phi - x[1]) * np.sin(x[0])

            E_phi_rand = drive_phase_noise * np.random.randn()
            phase_noise_term = drive_amp * p0  * np.sin(E_phi_rand) * np.sin(x[0])

        newB[4,4] += amp_noise_term + phase_noise_term

        return B + newB




    ### Run the simulation with the thermalized solution
    for i in range(nfiles):
        # start = time.time()
        t0 = i*out_file_length
        tf = (i+1)*out_file_length

        nsim = int(out_file_length * fsim)
        tvec = np.linspace(t0, tf, nsim+1)

        ### Solve!
        # print('RUNNING SIM')
        result = sdeint.itoint(f, G, xi_init, tvec).T
        xi_init = np.copy(result[:,-1])

        tvec = tvec[:-1]
        soln = result[:,:-1]

        # print('DOWNSAMPLING')
        nsamp = int(out_file_length * fsamp)
        # soln_ds, tvec_ds = signal.resample(soln, t=tvec, \
        #                                    num=nsamp, axis=-1)
        # soln_ds = signal.decimate(soln, int(upsamp))

        tvec_ds = tvec[::int(upsamp)]
        soln_ds = soln[:,::int(upsamp)]

        # plt.plot(tvec, soln[1])
        # plt.plot(tvec_ds, soln_ds[1])
        # plt.plot(tvec_ds, soln_ds_2[1])

        # plt.show()

        if not TEST:
            out_arr = np.concatenate( (tvec_ds.reshape((1, len(tvec_ds))), soln_ds) )

            filename = os.path.join(base_filename, 'outdat_{:d}.h5'.format(i)) 

            fobj = h5py.File(filename, 'w')
            fobj.create_dataset('sim_data', data=out_arr, compression='gzip', \
                                compression_opts=9)
            fobj.close()

        # stop = time.time()
        # print('Time for one file: {:0.1f}'.format(stop-start))

    return seed
コード例 #33
0
 def return_average_utility(self,time_vector=np.linspace(0, 10, 10000)):
     soln = sdeint.itoint(self.rate_of_experience, self.noise, self.experiences_of_choices, time_vector)
     return np.average(np.average(self.orbits_utility,0))
コード例 #34
0
    def simulate(self, t, inputs=None):
        """
        The simulate function takes no, one or multiple inputs and simulates the
        system from the current state for all time marks in t. At these time the
        control is also updated!
        """
        outputDimension = self.system.outputOrder
        if outputDimension:
            output = np.zeros((t.size, outputDimension))

        t0 = t[0]
        index = 0
        tnew = t
        current_sample = 0
        num_samples = len(t)

        if 'jitter' in self.options:
            jitter_range = self.options['jitter']['range']
            if jitter_range > (t[1] - t[0]) / 2.:
                raise "Too large jitter range. Time steps could change order"
            tnew = t + (np.random.rand(t.size) - 0.5) * jitter_range
            print("With Jitter!")
            # print(t)
            # print(tnew)

        for timeInstance in tnew[1:]:
            # Store observations
            if outputDimension:
                output[index, :] = self.system.output(self.state)
                index += 1

            def f(x, t):
                """
                Compute the system derivative considering state control and input.
                """
                hom = np.dot(self.system.A, x.reshape(
                    (self.system.order, 1))).flatten()
                control = self.control.fun(t)
                input = np.zeros_like(hom)
                if inputs:
                    for signal in inputs:
                        input += signal.fun(timeInstance)

                return hom + control + input

            if "noise" in self.options:
                # Shock Noise
                noise_state = np.zeros(
                    (self.system.order, len(self.options["noise"])))
                for i, noiseSource in enumerate(self.options['noise']):
                    # print(noiseSource['std'])
                    if noiseSource['std'] > 0:
                        # std = np.sqrt(noiseSource["std"] ** 2 * (timeInstance - t0)) * noiseSource["steeringVector"]
                        std = noiseSource["std"] * noiseSource["steeringVector"]
                        noise_state[:, i] = std
                        # noise_state += (np.random.rand() - 0.5 ) * 2 * std
                def g(x, t):
                    return noise_state

            else:

                def g(x, t):
                    return np.zeros((self.system.order, 1))

            # Solve ordinary differential equation
            # self.state = odeint(derivate, self.state, np.array([t0, timeInstance]), mxstep=100, rtol=1e-13, hmin=1e-12)[-1, :]
            tspace = np.linspace(t0, timeInstance, 10)
            self.state = sdeint.itoint(f, g, self.state, tspace)[-1, :]
            # If thermal noise should be simulated
            # if "noise" in self.options:

            # # Shock Noise
            # noise_state = np.zeros(self.system.order)
            # for noiseSource in self.options['noise']:
            #     # print(noiseSource['std'])
            #     if noiseSource['std'] > 0:
            #         # std = np.sqrt(noiseSource["std"] ** 2 * (timeInstance - t0)) * noiseSource["steeringVector"]
            #         std = noiseSource["std"] * noiseSource["steeringVector"]
            #         noise_state += np.random.randn() * std
            #         # noise_state += (np.random.rand() - 0.5 ) * 2 * std
            # self.state += noise_state

            # # Thermal Noise Simulation
            # for noiseSource in self.options['noise']:
            #     if noiseSource['std'] > 0:
            #         def noiseDerivative(x, t):
            #             hom = np.dot(self.system.A, x.reshape((self.system.order,1))).flatten()
            #             noise = np.random.randn() * noiseSource["std"] * noiseSource["steeringVector"]
            #             return hom + noise
            #         noise_state = odeint(noiseDerivative, np.zeros_like(self.state), np.array([t0, timeInstance]))[-1, :]
            #         # print("Noise state %s" %noise_state)
            #         # print("state before ", self.state)
            #         self.state += noise_state
            #         # print("noise ", noise_state)
            #         # print("state after ", self.state)

            # Increase time
            t0 = timeInstance
            # Update control descisions
            # print(self.state)

            # Clip if state is out of bound
            if True:
                bound = 1.
                above = self.state > bound
                below = self.state < -bound

                oob_states = np.arange(self.system.order)[np.logical_or(
                    above, below)]
                if any(oob_states):
                    # self.log("STATE BOUND EXCEEDED! Sample #: {}".format(current_sample))
                    # self.log("X_{} = {}".format(oob_states, self.state[oob_states]))
                    self.num_oob += 1
                    #self.state[above] = bound
                    #self.state[below] = -bound

            # print(self.state)
            current_sample += 1
            self.control.update(self.state)

            # Print progress every 1e4 samples
            try:
                if current_sample % (num_samples // 1e4) == 0:
                    print("Simulation Progress: %.2f%%    \r" %
                          (100 * (current_sample / num_samples)),
                          end='',
                          flush=True)
            except ZeroDivisionError:
                pass

        # Return simulation object
        return {
            't': t,
            'control': self.control,
            'output': output,
            'system': self.system,
            'state': self.state,
            'options': self.options,
            'log': self.logstr,
            'num_oob': self.num_oob
        }
コード例 #35
0
import numpy as np
import matplotlib.pyplot as plt
import sdeint

a = 1.0
b = 0.8
tspan = np.linspace(0.0, 500.0, 3 * 5001)
x0 = 0.1


def f(x, t):
    #return -(a + x*b**2)*(1 - x**2)
    return 0.0


def g(x, t):
    #return b*(1 - x**2)
    return 1.0


count = 100
result = []
for i in range(count):
    if i % 5 == 0:
        print("{}/{}".format(i, count))
    x = sdeint.itoint(f, g, x0, tspan)[:, 0]
    plt.plot(tspan, x)
    result.append(x)
plt.show()
コード例 #36
0
ファイル: density.py プロジェクト: nmarzz/scores
    samples = base_density_1d(300).numpy()

    # Define an Ito SDE
    def f(x, t):
        return -x

    D = 1

    def g(x, t):
        return np.diag(np.sqrt(2 * D) * np.ones(len(x)))

    T = 2
    tspan = np.linspace(0.0, T, 200)

    # Solve the SDE defined above
    result = sdeint.itoint(f, g, samples, tspan)

    plt.plot(tspan, result)
    plt.title('Particle paths')
    plt.show()

    plt.hist(samples, density=True)
    plt.title('True Bimodal Base Density')
    plt.xlabel('x')
    plt.ylabel('Density')
    plt.show()

    plt.hist(result[-1], density=True)
    plt.title('Prior density ')
    plt.show()
コード例 #37
0
 def return_area(self, time_vector=np.linspace(0, 10, 10000)):
     soln = sdeint.itoint(self.rate_of_experience, self.noise, self.experiences_of_choices, time_vector)
     return trapz(self.orbits_for_sum_pi_ei, range(0, len(self.orbits_for_sum_pi_ei)))
コード例 #38
0
    return B


result = np.zeros((num_sims, N, 6))
result_ese = np.zeros((num_sims, N, 6))

for i in range(0, num_sims - 1):
    x0 = [
        np.random.randn(),
        np.random.randn(),
        np.random.randn(),
        np.random.randn(),
        np.random.randn(),
        np.random.randn()
    ]
    result[i] = sdeint.itoint(f, G, x0, tspan)
    result_ese[i] = sdeint.itoint(f_ese, G, x0, tspan)
    print(i)

print('Done with integrtion')

num_bins = 20
a = np.ceil(abs(max(np.ndarray.flatten(result[:][:][0]), key=abs))) / 3
step_size = 2 * a / num_bins


def counts(iterable, low, high, bins):
    step = (high - low + 0.0) / bins
    dist = collections.Counter((float(x) - low) // step for x in iterable)
    return [dist[b] for b in range(bins)]
    def simulate_meso(self, method="BDF", atol=1.e-5, rtol=1.e-6, G=None):

        qgrid = self.qgrid
        dqgrid = self.dqgrid
        weights = self.weights

        inner = slice(1, -1)
        left = slice(0, -2)
        right = slice(2, None)

        # works inplace w.r.t. v!
        def upwind(v, rho, dqgrid):

            v_pos = 0.5 * (v[inner] + np.abs(v[inner]))
            v_neg = 0.5 * (v[inner] - np.abs(v[inner]))

            drho = np.zeros_like(rho)

            drho[inner] -= v_pos / dqgrid[0:-1] * (rho[inner] - rho[left]
                                                   )  # * (rho[inner] > 0.5)
            drho[inner] -= v_neg / dqgrid[1:] * (rho[right] - rho[inner]
                                                 )  # * (rho[inner] > 0.5)

            return drho

        def ode_fun_meso(t, y):

            r = y[0]
            dr = y[1]
            rho = y[2:]

            M = self.pms_sym.M_mod_meso(r, dr, rho, qgrid, weights)
            F = self.pms_sym.F_mod_meso(r, dr, rho, qgrid, weights)
            v = self.pms_sym.v_mod_meso(r, dr, qgrid)

            return np.concatenate(
                [np.array([dr, F / M]),
                 upwind(v, rho, dqgrid)])

        if G is not None:

            def G_func(y, t):
                r = y[0]
                dr = y[1]
                rho = y[2:]

                M = self.pms_sym.M_mod_meso(r, dr, rho, qgrid, weights)

                return np.concatenate(
                    [np.array([0, G(y, t) / M]),
                     upwind(v, rho, dqgrid)])

            if self.t_eval is None:
                self.t_eval = np.linspace(0, self.t_end, 1000)

            res = si.itoint(lambda y, t: ode_fun_meso(t, y), G_func, self.y0,
                            self.t_eval)

            self.sol = lambda: 0
            self.sol.message = "SDE integrator finished."
            self.sol.y = res.T
            self.sol.t = self.t_eval

        else:
            self.sol = solve_ivp(ode_fun_meso, [0, self.t_end],
                                 self.y0_meso,
                                 method=method,
                                 atol=atol,
                                 rtol=rtol,
                                 t_eval=self.t_eval)

        self.r = self.sol.y[0, :]
        self.dr = self.sol.y[1, :]
        self.rho = self.sol.y[2:, :]