Пример #1
0
    def __initial_time_step(y0, dy0, G, masses, nbodies):

        p = 15
        ###########   ESTIMATE INITIAL STEP SIZE
        # Compute scaling
        # sc =  abs(y0)*epsb
        # Evaluate function
        f0 = ODE.ode_n_body_second_order(y0, G, masses)
        d0 = max(abs(y0))
        d1 = max(abs(f0))

        if (d0 < 1e-5) or (d1 < 1e-5):
            dt0 = 1e-6
        else:
            dt0 = 0.01 * (d0 / d1)

        # Perform one Euler step
        y1 = y0 + dt0 * dy0
        dy1 = dy0 + dt0 * f0
        # Call function
        f1 = ODE.ode_n_body_second_order(y1, G, masses)
        d2 = max(abs((f1 - f0))) / dt0

        if max(d1, d2) <= 1e-15:
            dt1 = max([1e-6, dt0 * 1e-3])
        else:
            dt1 = (0.01 / max([d1, d2]))**(1.0 / (p + 1))

        dt = min([100 * dt0, dt1])
        return dt
Пример #2
0
    def integrate(self, to_time=None):
        if self.__initialized is False:
            self.initialize()
            self.__initialized = True
        print(to_time, self.t_end)
        # Allocate dense output
        npts = int(np.floor((self.t_end - self.t_start) / self.h) + 1)

        # Initial state
        x = np.concatenate((self.particles.positions, self.particles.velocities))
        # Vector of times
        sol_time = np.linspace(self.t_start, self.t_start + self.h * (npts - 1), npts)
        energy_init = self.calculate_energy()
        # Compute second step
        dxdt0 = ODE.ode_n_body_first_order(x, self.CONST_G, self.particles.masses)
        x = x + dxdt0 * self.h

        # Launch integration
        count = 2
        for t in sol_time[count:]:
            dxdt = ODE.ode_n_body_first_order(x, self.CONST_G, self.particles.masses)
            # Advance step
            x += 0.5 * self.h * (3 * dxdt - dxdt0)

            # Update
            dxdt0 = dxdt
            self.particles.positions = x[0:self.particles.N * 3]
            self.particles.velocities = x[self.particles.N * 3:]
            self._t = t
            self.store_state()
            energy = self.calculate_energy()
            print(('t = %f, E/E0 = %g' % (self.t, np.abs(energy - energy_init) / energy_init)))
            count += 1
        self.buf.close()
        return 0
 def solver_dampingForce(state, profile):
     '''
     intialSuspensionState: [Zb, Zt, Zb_dt, Zt_dt] at t=0
     H: road profile [m] over time intervals
     t: time intervals
     '''
     N = len(profile)
     x = np.zeros(
         (N + 1, 6)
     )  # array of [Zb, Zt, Zb_dt, Zt_dt, Zb_dtdt, Zt_dtdt] at each time interval
     # Set inital state of the system and initial profile
     x[0] = state  # [Zb, Zt, Zb_dt, Zt_dt, Zb_dtdt, Zt_dtdt] at t=0
     Zh, Zh_dt = profile[0], 0.
     for n in range(0, N - 1):
         # Compute future state of the system with differential equation
         if hasattr(i, '__len__'):  # if we get sequence of all currents
             x[n + 1] = ODE(x[n][0], x[n][1], x[n][2], x[n][3], Zh, Zh_dt,
                            i[n], dt)
         else:
             x[n + 1] = ODE(x[n][0], x[n][1], x[n][2], x[n][3], Zh, Zh_dt,
                            i, dt)
         # Get the next profile
         Zh = profile[n + 1]
         Zh_dt = (Zh - profile[n]) / dt
     return x
Пример #4
0
def resp(x, t, s):
    """
        dx / dh = - t(x) * s(x)
    """
    def f(h, y):
        T = t(y)
        S = s(y)
        yprime = empty(shape(y))
        for i in range(len(y)):
            # FIXME: yes, with PLUS sign:
            yprime[i] = T[i] * S[i]
        return yprime

    print "resp: s(x0)=", s(x)

    #
    # This solves for X(t) such that dX / dt = f(t, X(t)), with X(0) =
    # x:
    #
    X = ODE(0.0, x, f)

    #
    # This computes X(inf):
    #
    x8 = limit(X)

    print "resp: s(x8)=", s(x8)
    return x8
Пример #5
0
    def integrate_numpy(self, to_time=None):
        if to_time is not None:
            self.t_end = to_time
        # Allocate dense output
        npts = int(np.floor((self.t_end - self.t_start) / self.h) + 1)

        # Initial state
        x = np.concatenate(
            (self._particles.positions, self._particles.velocities))
        # Vector of times
        sol_time = np.linspace(self.t_start,
                               self.t_start + self.h * (npts - 1), npts)
        energy_init = self.calculate_energy()
        # Launch integration
        count = 1
        for t in sol_time[count:]:
            # Evaluate coefficients
            k1 = ODE.ode_n_body_first_order(x, self.CONST_G,
                                            self._particles.masses)
            k2 = ODE.ode_n_body_first_order(x + 0.5 * self.h * k1,
                                            self.CONST_G,
                                            self._particles.masses)
            k3 = ODE.ode_n_body_first_order(x + 0.5 * self.h * k2,
                                            self.CONST_G,
                                            self._particles.masses)
            k4 = ODE.ode_n_body_first_order(x + self.h * k3, self.CONST_G,
                                            self._particles.masses)

            # Advance the state
            x += (self.h * (k1 + 2 * k2 + 2 * k3 + k4) / 6.0)

            # Store step
            self.particles.positions = x[0:self._particles.N * 3]
            self.particles.velocities = x[self._particles.N * 3:]
            self._t = t
            self.store_state()
            energy = self.calculate_energy()
            print(('t = %f, E/E0 = %g' %
                   (self.t, np.abs(energy - energy_init) / energy_init)))
            count += 1
        self.buf.close()
        return 0
Пример #6
0
    def __init__(self, X, G, B, H, tangents, lambdas):
        #
        # Function to integrate (t is "time", not "tangent"):
        #
        #   dg / dt = f(t, g)
        #
        def f(t, g):
            return gprime(t, g, H, G, X, tangents, lambdas)

        #
        # This Func can  integrate to T for any T  in the interval [0,
        # infinity).   To get  an  idea, very  approximately for  step
        # scale h = 1.0  the change of a gradients is: G1  = G + 1.0 *
        # f(0.0, G). Also we will need these H and G later:
        #
        self._slots = ODE(0.0, G, f), H, G
Пример #7
0
    def integrate_numpy(self, to_time):
        # Some parameters
        epsb = self.tol  # recommended 1e-9
        fac = 0.25

        # For fixed step integration, choose exponent = 0
        # exponent = 0;
        exponent = 1. / 7

        y0 = self.particles.positions
        dy0 = self.particles.velocities
        # Dimension of the system
        dim = len(y0)

        # Tolerance Predictor-Corrector
        tolpc = 1e-18

        # Return Radau spacing
        [hs, nh] = self.__radau_spacing()

        ddy0 = ODE.ode_n_body_second_order(y0, self.CONST_G,
                                           self._particles.masses)

        # Initial time step
        self.h = self.__initial_time_step(y0, dy0, self.CONST_G,
                                          self._particles.masses,
                                          self.particles.N)

        # Initialize
        bs0 = np.zeros((nh - 1, dim))
        bs = np.zeros((nh - 1, dim))
        g = np.zeros((nh - 1, dim))
        self._t = self.t_start
        E = np.zeros((nh - 1, dim))
        ddys = np.zeros((nh, dim))

        r = self.__compute_rs()
        c = self.__compute_cs()

        integrate = True
        if to_time is not None:
            self.t_end = to_time
        imode = 0
        energy_init = self.calculate_energy()
        while integrate:
            # if self.t + self.h > self.t_end:
            #     self.h = self.t_end - self.t
            #     integrate = False

            # Advance one step and return:
            y, dy, ddy, self._t, dt, g, bs, E, bs0, istat, imode = self.gaus_radau15_step(
                y0, dy0, ddy0, ddys, self.h, self.t, self.t_end, nh, hs, bs0,
                bs, E, g, r, c, self.tol, exponent, fac, imode, self.CONST_G,
                self._particles.masses, self.particles.N)

            # Detect end of integration:
            if istat == 2:
                integrate = False

            # Update step
            y0 = y
            dy0 = dy
            ddy0 = ddy

            self.particles.positions = y
            self.particles.velocities = dy
            self.store_state()
            energy = self.calculate_energy()
            # print('t = %f, E/E0 = %g' % (self.t, np.abs(energy-energy_init)/energy_init))
        self.buf.close()
        return 0
Пример #8
0
    def gaus_radau15_step(self, y0, dy0, ddy0, ddys, dt, t, tf, nh, hs, bs0,
                          bs, E, g, r, c, tol, exponent, fac, imode, G, masses,
                          nbodies):

        istat = 0
        while (True):
            # Variable number of iterations in PC
            for ipc in range(0, 12):
                ddys = ddys * 0
                # Advance along the Radau sequence
                for ih in range(0, nh):
                    # Estimate position and velocity with bs0 and current h
                    y = self.__approx_pos(y0, dy0, ddy0, hs[ih], bs, dt)
                    dy = self.__approx_vel(dy0, ddy0, hs[ih], bs, dt)
                    # Evaluate force function and store
                    ddys[ih, :] = ODE.ode_n_body_second_order(
                        y, self.CONST_G, self._particles.masses)
                    g = self.__compute_gs(g, r, ddys, ih)
                    bs = self.__compute_bs_from_gs(bs, g, ih, c)
                # Estimate convergence of PC
                db6 = bs[-1, :] - bs0[-1, :]
                if (max(abs(db6)) / max(abs(ddys[-1, :])) < 1e-16):
                    break
                bs0 = bs

            # Advance the solution:
            y = self.__approx_pos(y0, dy0, ddy0, 1., bs, dt)
            dy = self.__approx_vel(dy0, ddy0, 1., bs, dt)
            ddy = ODE.ode_n_body_second_order(y, self.CONST_G,
                                              self.particles.masses)

            # Estimate relative error
            estim_b6 = max(abs(bs[-1, :])) / max(abs(ddy))
            err = (estim_b6 / tol)**(exponent)

            # Step-size required for next step:
            dtreq = dt / err

            # Accept the step
            if (err <= 1):

                # Report accepted step:
                istat = 1

                # Advance time:
                t = t + dt

                # Update b coefficients:
                bs0 = bs

                # Refine predictor-corrector coefficients for next pass:
                [bs, E] = self.__refine_bs(bs, dtreq / dt, E, imode)

                # Normal refine mode:
                imode = 1

                # Check if tf was reached:
                if (t >= tf):
                    istat = 2

            # Step size for next iteration:
            if (dtreq / dt > 1.0 / fac):
                dt = dt / fac
            elif (dtreq < 1e-12):
                dt = dt * fac
            else:
                dt = dtreq

            # Correct overshooting:
            if (t + dt > tf):
                dt = tf - t
            # Return if the step was accepted:
            if (istat > 0):
                break

        return y, dy, ddy, t, dt, g, bs, E, bs0, istat, imode
Пример #9
0
    bla.get_evdt_vs_M(fig_name='figs/dwell_evdt',
                      ntraj=32,
                      X0=np.array([0., .0]).astype(np.float),
                      h0=.5,
                      solver=['Taylor_2p0_additive', 'explicit_1p5_additive'],
                      exp_range=range(8))

if test_ode:
    x = sp.Symbol('x')
    y = sp.Symbol('y')
    z = sp.Symbol('z')
    rho = 28.
    sigma = 10.
    beta = 8. / 3
    lorenz_rhs = [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
    bla = ODE(x=[x, y, z], f=lorenz_rhs)
    X0 = 10 * np.random.random((3, 128))
    fig = plt.figure(figsize=(6, 6))
    ax = fig.add_subplot(111)
    for solver in [['Euler', 'Taylor2'], ['Heun', 'Taylor2'],
                   ['cRK', 'Taylor4']]:
        evdt = bla.get_evdt(X0=X0, solver=solver)
        ax.errorbar(evdt[:, 0],
                    evdt[:, 2],
                    yerr=[evdt[:, 1], evdt[:, 3]],
                    label='{0} vs {1}'.format(solver[0], solver[1]))
    ax.set_xscale('log')
    ax.set_yscale('log')
    ax.legend(loc='best')
    fig.savefig('figs/ode_evdt.pdf', format='pdf')
Пример #10
0
    def __init__(self, X0, G0, B, H, tangents, lambdas):
        #
        # Function to integrate (h is "evolution time"):
        #
        #   dx / dh = f(h, x)
        #
        def xprime(h, x):
            """For the descent procedure return

              dx / dh = - H * ( g(x0 + x)  - lambda * t(x0 + x) )

            The Lagrange contribution parallel to the tangent t(x) is computed
            by the function lambdas(X, G, H, T).

            The gradient g(x) has the following relation to the coordinats:

                g - g0 = B * x

            with B being the inverse of H.

            Note that  this is equivalent  to the descent procedure

              dy / dh = - ( y  - lambda(y) * t(y) )

            for local coordinates  y (aka gradient g) defined  by a one-to-one
            relation:

              (x - x0) = H * (y - y0)

            By  now y  is the  same  as the  gradient g.  Though the  positive
            definite  hessian  H may  distrub  this  equivalence  at some  PES
            regions.

            The  current  form  of  xprime()  may be  used  to  either  ensure
            orthogonality of  dx /  dh to the  tangents or preserve  the image
            spacing  depending on  the definition  of function  lambdas() that
            delivers lagrangian factors.  I am afraid one cannot satisfy both.

            NOTE: imaginary time variable "h" is not used anywhere.
            """

            # G = G(X), here B should represent PES well:
            G = G0 + B.app(x)

            # T = T(X(G)):
            T = tangents(X0 + x)

            # Compute Lagrange factors, here H should be non-negative:
            LAM = lambdas(X0 + x, G, H, T)

            #
            # Add lagrange forces, only  component parallel to the tangents is
            # affected:
            #
            G2 = empty(shape(G))
            for i in xrange(len(G)):
                G2[i, ...] = G[i] - LAM[i] * T[i]

            # here H should be non-negative:
            return -H.inv(G2)
            # return dX1

        #
        # This Func can  integrate to T for any T  in the interval [0,
        # infinity).   To get  an  idea, very  approximately for  step
        # scale h = 1.0  the change of a gradients is: G1  = G + 1.0 *
        # f(0.0, G). Also we will need these H and G later:
        #
        self.ode = ODE(0.0, zeros(shape(X0)), xprime)
Пример #11
0
class Step1(Func):
    """
    A function of a scalar scale  parameter "h" in the interval [0, 1]
    that implements a "step towards the stationary solution". Think of
    a scaled Newton step dx = - h * H * g towards the stationary point
    but keep  in mind that due  to constraints and  path specifics the
    way  towards the stationary  point is  not necessarily  a straight
    line like is implied by a newton formula.

    To get a rough estimate of the (remaining) step one could use

        step = Step(X, G, B, H, tangents, lambdas)
        dX = (1.0 - h) * step.fprime(h)

    which  for a  special  case  of h  =  0 does  not  involve no  ODE
    integration.
    """
    def __init__(self, X0, G0, B, H, tangents, lambdas):
        #
        # Function to integrate (h is "evolution time"):
        #
        #   dx / dh = f(h, x)
        #
        def xprime(h, x):
            """For the descent procedure return

              dx / dh = - H * ( g(x0 + x)  - lambda * t(x0 + x) )

            The Lagrange contribution parallel to the tangent t(x) is computed
            by the function lambdas(X, G, H, T).

            The gradient g(x) has the following relation to the coordinats:

                g - g0 = B * x

            with B being the inverse of H.

            Note that  this is equivalent  to the descent procedure

              dy / dh = - ( y  - lambda(y) * t(y) )

            for local coordinates  y (aka gradient g) defined  by a one-to-one
            relation:

              (x - x0) = H * (y - y0)

            By  now y  is the  same  as the  gradient g.  Though the  positive
            definite  hessian  H may  distrub  this  equivalence  at some  PES
            regions.

            The  current  form  of  xprime()  may be  used  to  either  ensure
            orthogonality of  dx /  dh to the  tangents or preserve  the image
            spacing  depending on  the definition  of function  lambdas() that
            delivers lagrangian factors.  I am afraid one cannot satisfy both.

            NOTE: imaginary time variable "h" is not used anywhere.
            """

            # G = G(X), here B should represent PES well:
            G = G0 + B.app(x)

            # T = T(X(G)):
            T = tangents(X0 + x)

            # Compute Lagrange factors, here H should be non-negative:
            LAM = lambdas(X0 + x, G, H, T)

            #
            # Add lagrange forces, only  component parallel to the tangents is
            # affected:
            #
            G2 = empty(shape(G))
            for i in xrange(len(G)):
                G2[i, ...] = G[i] - LAM[i] * T[i]

            # here H should be non-negative:
            return -H.inv(G2)
            # return dX1

        #
        # This Func can  integrate to T for any T  in the interval [0,
        # infinity).   To get  an  idea, very  approximately for  step
        # scale h = 1.0  the change of a gradients is: G1  = G + 1.0 *
        # f(0.0, G). Also we will need these H and G later:
        #
        self.ode = ODE(0.0, zeros(shape(X0)), xprime)

    def taylor(self, h):
        #
        # Upper integration limit T (again "time", not "tangent)":
        #
        if h < 1.0:
            #
            # Assymptotically the gradients decay as exp[-t]
            #
            T = -log(1.0 - h)
            X, Xprime = self.ode.taylor(T)

            return X, Xprime / (1.0 - h)
        else:
            X = limit(self.ode)

            # FIXME:   how  to  approach   limit(self.ode.fprime(T)  *
            # exp(T))?
            return X, None
Пример #12
0
    def __init__(self, X0, G0, B, H, tangents, lambdas):
        #
        # Function to integrate (h is "evolution time"):
        #
        #   dx / dh = f(h, x)
        #
        def xprime(h, x):
            """For the descent procedure return

              dx / dh = - H * ( g(x0 + x)  - lambda * t(x0 + x) )

            The Lagrange contribution parallel to the tangent t(x) is computed
            by the function lambdas(X, G, H, T).

            The gradient g(x) has the following relation to the coordinats:

                g - g0 = B * x

            with B being the inverse of H.

            Note that  this is equivalent  to the descent procedure

              dy / dh = - ( y  - lambda(y) * t(y) )

            for local coordinates  y (aka gradient g) defined  by a one-to-one
            relation:

              (x - x0) = H * (y - y0)

            By  now y  is the  same  as the  gradient g.  Though the  positive
            definite  hessian  H may  distrub  this  equivalence  at some  PES
            regions.

            The  current  form  of  xprime()  may be  used  to  either  ensure
            orthogonality of  dx /  dh to the  tangents or preserve  the image
            spacing  depending on  the definition  of function  lambdas() that
            delivers lagrangian factors.  I am afraid one cannot satisfy both.

            NOTE: imaginary time variable "h" is not used anywhere.
            """

            # G = G(X), here B should represent PES well:
            G = G0 + B.app(x)

            # T = T(X(G)):
            T = tangents(X0 + x)

            # Compute Lagrange factors, here H should be non-negative:
            LAM = lambdas(X0 + x, G, H, T)

            #
            # Add lagrange forces, only  component parallel to the tangents is
            # affected:
            #
            G2 = empty(shape(G))
            for i in xrange(len(G)):
                G2[i, ...] = G[i] - LAM[i] * T[i]

            # here H should be non-negative:
            return -H.inv(G2)
            # return dX1

        #
        # This Func can  integrate to T for any T  in the interval [0,
        # infinity).   To get  an  idea, very  approximately for  step
        # scale h = 1.0  the change of a gradients is: G1  = G + 1.0 *
        # f(0.0, G). Also we will need these H and G later:
        #
        self.ode = ODE(0.0, zeros(shape(X0)), xprime)
Пример #13
0
class Step1(Func):
    """
    A function of a scalar scale  parameter "h" in the interval [0, 1]
    that implements a "step towards the stationary solution". Think of
    a scaled Newton step dx = - h * H * g towards the stationary point
    but keep  in mind that due  to constraints and  path specifics the
    way  towards the stationary  point is  not necessarily  a straight
    line like is implied by a newton formula.

    To get a rough estimate of the (remaining) step one could use

        step = Step(X, G, B, H, tangents, lambdas)
        dX = (1.0 - h) * step.fprime(h)

    which  for a  special  case  of h  =  0 does  not  involve no  ODE
    integration.
    """

    def __init__(self, X0, G0, B, H, tangents, lambdas):
        #
        # Function to integrate (h is "evolution time"):
        #
        #   dx / dh = f(h, x)
        #
        def xprime(h, x):
            """For the descent procedure return

              dx / dh = - H * ( g(x0 + x)  - lambda * t(x0 + x) )

            The Lagrange contribution parallel to the tangent t(x) is computed
            by the function lambdas(X, G, H, T).

            The gradient g(x) has the following relation to the coordinats:

                g - g0 = B * x

            with B being the inverse of H.

            Note that  this is equivalent  to the descent procedure

              dy / dh = - ( y  - lambda(y) * t(y) )

            for local coordinates  y (aka gradient g) defined  by a one-to-one
            relation:

              (x - x0) = H * (y - y0)

            By  now y  is the  same  as the  gradient g.  Though the  positive
            definite  hessian  H may  distrub  this  equivalence  at some  PES
            regions.

            The  current  form  of  xprime()  may be  used  to  either  ensure
            orthogonality of  dx /  dh to the  tangents or preserve  the image
            spacing  depending on  the definition  of function  lambdas() that
            delivers lagrangian factors.  I am afraid one cannot satisfy both.

            NOTE: imaginary time variable "h" is not used anywhere.
            """

            # G = G(X), here B should represent PES well:
            G = G0 + B.app(x)

            # T = T(X(G)):
            T = tangents(X0 + x)

            # Compute Lagrange factors, here H should be non-negative:
            LAM = lambdas(X0 + x, G, H, T)

            #
            # Add lagrange forces, only  component parallel to the tangents is
            # affected:
            #
            G2 = empty(shape(G))
            for i in xrange(len(G)):
                G2[i, ...] = G[i] - LAM[i] * T[i]

            # here H should be non-negative:
            return -H.inv(G2)
            # return dX1

        #
        # This Func can  integrate to T for any T  in the interval [0,
        # infinity).   To get  an  idea, very  approximately for  step
        # scale h = 1.0  the change of a gradients is: G1  = G + 1.0 *
        # f(0.0, G). Also we will need these H and G later:
        #
        self.ode = ODE(0.0, zeros(shape(X0)), xprime)

    def taylor(self, h):
        #
        # Upper integration limit T (again "time", not "tangent)":
        #
        if h < 1.0:
            #
            # Assymptotically the gradients decay as exp[-t]
            #
            T = - log(1.0 - h)
            X, Xprime = self.ode.taylor(T)

            return X, Xprime / (1.0 - h)
        else:
            X = limit(self.ode)

            # FIXME:   how  to  approach   limit(self.ode.fprime(T)  *
            # exp(T))?
            return X, None