Beispiel #1
0
def taylor(f, a, n):
    if isinstance(f, Callable):
        coefs = mpmath.taylor(f, a, n)
    else:
        coefs = mpmath.taylor(sp.lambdify(x, f), a, n)
    coefs = np.array([float(i) for i in coefs])
    return coefs
 def get_feature_importance(self, x0):
     # Returns the feature importance for a prediction at x0
     importance_list = [self.eps for _ in range(self.dim_x)]
     for k in range(len(self.terms_list)):
         g_k, v_k, w_k = self.terms_list[k]
         x_k = np.dot(v_k, x0) / (np.sqrt(self.dim_x) * np.linalg.norm(v_k))
         if x_k > 0:
             coef_k = mpmath.chop(mpmath.taylor(g_k.math_expr, x_k, 1))
             for n in range(self.dim_x):
                 importance_list[n] += sympify(
                     w_k * coef_k[1] * v_k[n] / (np.sqrt(self.dim_x) * np.linalg.norm(v_k)))
     return importance_list
def pade(t, n):
    """ pade approximation of a time delay, as per mathworks' controls toolkit.
    supports arbitrary precision mathematics via mpmath and sympy"
    for more information, see:
        * http://home.hit.no/~hansha/documents/control/theory/pade_approximation.pdf 
        * http://www.mathworks.com/help/control/ref/pade.html
    """
    # e**(s*t) -> laplace transform of a time delay with 't' duration
    # e**x -> taylor series
    taylor = mpmath.taylor(sympy.exp, 0, n * 2)
    (num, den) = mpmath.pade(taylor, n, n)
    num = sum([x * (-t * s)**y for y, x in enumerate(num[::-1])])
    den = sum([x * (-t * s)**y for y, x in enumerate(den[::-1])])
    return num / den
Beispiel #4
0
def collocation(n, w):
    t = sorted(
        np.squeeze(
            np.asarray(
                np.roots(chop(taylor(lambda x: chebyt(n, x), 0,
                                     n))[::-1]))))  # Корни многочлена Чебышева

    A = np.eye(n)
    B = np.ones((n, 1))
    for i in range(n):
        B[i] = f(t[i])
        for j in range(n):
            A[i][j] = sympy.lambdify(x, Lu(w[j]))(t[i])
    C = np.linalg.solve(A, B)
    mu = infnorm(A) * infnorm(np.linalg.inv(A))
    return A, B, C, mu
 def get_taylor(self, x0, approx_order):
     # Returns the Taylor expansion around x0 of order approx_order for our model
     expression = 0
     symbol_list = [Symbol("X" + str(k)) for k in range(self.dim_x)]
     for k in range(len(self.terms_list)):
         g_k, v_k, w_k = self.terms_list[k]
         x_k = np.dot(v_k, x0) / (np.sqrt(self.dim_x) * np.linalg.norm(v_k))
         if x_k > 0:
             P_k = 0
             for n in range(self.dim_x):
                 P_k += v_k[n] * symbol_list[n] / (np.sqrt(self.dim_x) * np.linalg.norm(v_k))
             coef_k = mpmath.chop(mpmath.taylor(g_k.math_expr, x_k, approx_order))
             for n in range(len(coef_k)):
                 if n > 0:
                     expression += w_k * coef_k[n] * (P_k - x_k) ** n
                 else:
                     expression += w_k * coef_k[n]
     return expression
Beispiel #6
0
def inverse_taylor(f, x0, n):
    """
    Taylor polynomial coefficients of the inverse of f.

    Given a callable f, and a point x0, find the Taylor polynomial of degree n
    of the inverse of f at x0.

    If y0 = f(x0), and if the inverse of f is g, this function returns
    the Taylor polynomial coefficients of g(y) at y0.

    f'(x0) must be nonzero.

    Examples
    --------
    >>> import mpmath
    >>> mpmath.mp.dps = 40

    Compute the Taylor coefficients of the inverse of the sine function
    sin(x) at x=1.

    >>> inverse_taylor(mpmath.sin, 1, 5)
    [mpf('1.0'),
     mpf('1.850815717680925617911753241398650193470396'),
     mpf('2.667464736243829370645086306803786566557799'),
     mpf('8.745566949501434796799480049601499630239969'),
     mpf('34.55691117453807764026147509020588920253199'),
     mpf('152.9343377104818039879748855586655382173672')]

    Compare that to computing the Taylor polynomial coefficients of
    the arcsine function directly:

    >>> mpmath.taylor(mpmath.asin, mpmath.sin(1), 5)
    [mpf('1.0'),
     mpf('1.850815717680925617911753241398650193470396'),
     mpf('2.667464736243829370645086306803786566557799'),
     mpf('8.745566949501434796799480049601499630240153'),
     mpf('34.55691117453807764026147509020588920253199'),
     mpf('152.9343377104818039879748855586655382173702')]
    """
    x0 = mpmath.mpf(x0)
    c = mpmath.taylor(f, x0, n)
    r, c0 = revert(c)
    r[0] = x0
    return [mpmath.mpf(t) for t in r]
Beispiel #7
0
 def taylor_expand(expr, around=0):
     if expr.args is ():
         return expr
     args = [ODE.taylor_expand(a) for a in expr.args]
     if expr.func in ODE.TRANSCEDENTAL_FUNCS:
         if len(args) != 1:
             raise RuntimeError(
                 'Cannot create a taylor series '
                 'approximation of: ', expr)
         else:
             # XXX: Build the polynomial for arg
             coeffs = poly.taylor(expr.func, around, ODE.NUM_TERMS)
             # print(coeffs)
             coeffs = [(S.Mul(float(a), S.Mul(*[args[0]
                                                for i in range(c)])))
                       for c, a in enumerate(coeffs)][::-1]
             # print(coeffs)
             return S.Add(*coeffs)
     else:
         return expr.func(*args)
Beispiel #8
0
def pade_coefs(*, pade_order, k0, dx, alpha=0):
    mpmath.mp.dps = 63

    if alpha == 0:

        def sqrt_1plus(x):
            return mpmath.mp.sqrt(1 + x)
    else:
        a_n, b_n = pade_sqrt_coefs(pade_order[1])

        def sqrt_1plus(x):
            return pade_sqrt(x, a_n, b_n, alpha)

    def propagator_func(s):
        return mpmath.mp.exp(1j * k0 * dx * (sqrt_1plus(s) - 1))

    t = mpmath.taylor(propagator_func, 0, pade_order[0] + pade_order[1] + 20)
    p, q = mpmath.pade(t, pade_order[0], pade_order[1])
    num_coefs = np.array([complex(v) for v in p])
    den_coefs = np.array([complex(v) for v in q])
    return num_coefs[::-1], den_coefs[::-1]
def gauss_lobatto_points(start, stop, num_points):
    """Get the node points for Gauss-Lobatto quadrature.

    Rather than using the optimizations in
    :func:`.dg1.gauss_lobatto_points`, this uses :mod:`mpmath` utilities
    directly to find the roots of :math:`P_n'(x)` (where :math:`n` is equal
    to ``num_points - 1``).

    :type start: :class:`mpmath.mpf` (or ``float``)
    :param start: The beginning of the interval.

    :type stop: :class:`mpmath.mpf` (or ``float``)
    :param stop: The end of the interval.

    :type num_points: int
    :param num_points: The number of points to use.

    :rtype: :class:`numpy.ndarray`
    :returns: 1D array, the interior quadrature nodes.
    """
    def leg_poly(value):
        """Legendre polynomial :math:`P_n(x)`."""
        return mpmath.legendre(num_points - 1, value)

    def leg_poly_diff(value):
        """Legendre polynomial derivative :math:`P_n'(x)`."""
        return mpmath.diff(leg_poly, value)

    poly_coeffs = mpmath.taylor(leg_poly_diff, 0, num_points - 2)[::-1]
    inner_roots = mpmath.polyroots(poly_coeffs)
    # Create result.
    start = mpmath.mpf(start)
    stop = mpmath.mpf(stop)
    result = [start]
    # Convert the inner nodes to the interval at hand.
    half_width = (stop - start) / 2
    for index in six.moves.xrange(num_points - 2):
        result.append(start + (inner_roots[index] + 1) * half_width)
    result.append(stop)
    return np.array(result)
Beispiel #10
0
    def __init__(self, n):
        self.n = n
        mpmath.mp.dps = 100

        def func(x):
            return mpmath.exp(-x) * mpmath.besseli(0, x)

        t = mpmath.taylor(func, 0, 2 * n + 1)
        self.p, self.q = mpmath.pade(t, n, n)
        # self.pade_coefs = list(zip_longest([-1 / complex(v) for v in mpmath.polyroots(p[::-1], maxsteps=2000)],
        #                               [-1 / complex(v) for v in mpmath.polyroots(q[::-1], maxsteps=2000)],
        #                               fillvalue=0.0j))
        #self.pade_roots_num = [complex(v) for v in mpmath.polyroots(self.p[::-1], maxsteps=5000)]
        #self.pade_roots_den = [complex(v) for v in mpmath.polyroots(self.q[::-1], maxsteps=5000)]
        self.pade_coefs_num = [complex(v) for v in self.p]
        self.pade_coefs_den = [complex(v) for v in self.q]
        self.taylor_coefs = [complex(v) for v in t]

        a = [self.q[-1]] + [b + c for b, c in zip(self.q[:-1:], self.p)]
        self.a_roots = [
            complex(v) for v in mpmath.polyroots(a[::-1], maxsteps=5000)
        ]
Beispiel #11
0
def pade_propagator_coefs(*, pade_order, diff2, k0, dx, spe=False, alpha=0):
    """

    :param pade_order: order of Pade approximation, tuple, for ex (7, 8)
    :param diff2:
    :param k0:
    :param dx:
    :param spe:
    :param alpha: rotation angle, see F. A. Milinazzo et. al. Rational square-root approximations for parabolic equation algorithms. 1997. Acoustical Society of America.
    :return:
    """

    mpmath.mp.dps = 63
    if spe:

        def sqrt_1plus(x):
            return 1 + x / 2
    elif alpha == 0:

        def sqrt_1plus(x):
            return mpmath.mp.sqrt(1 + x)
    else:
        a_n, b_n = pade_sqrt_coefs(pade_order[1])

        def sqrt_1plus(x):
            return pade_sqrt(x, a_n, b_n, alpha)

    def propagator_func(s):
        return mpmath.mp.exp(1j * k0 * dx * (sqrt_1plus(diff2(s)) - 1))

    t = mpmath.taylor(propagator_func, 0, pade_order[0] + pade_order[1] + 2)
    p, q = mpmath.pade(t, pade_order[0], pade_order[1])
    pade_coefs = list(
        zip_longest([
            -1 / complex(v) for v in mpmath.polyroots(p[::-1], maxsteps=2000)
        ], [-1 / complex(v) for v in mpmath.polyroots(q[::-1], maxsteps=2000)],
                    fillvalue=0.0j))
    return pade_coefs
Beispiel #12
0
def get_path_n(f, T=None):
    # TODO cire paper here
    """
    Get the formula to estimate the number of path of a function, given its longest path.

    :param f: function addr
    :param T: edges matrix of the function f
    :return: formula to estimate the number of paths
    """

    g_z = get_generating_function(f, T)
    expr = g_z.as_numer_denom()[1]
    rs = sympy.roots(expr)
    D = len(set(rs.keys()))  # number of distinct roots
    d = sum(rs.values())  # number of roots

    # get taylor coefficients
    f = sympy.utilities.lambdify(list(g_z.free_symbols), g_z)
    taylor_coeffs = mpmath.taylor(
        f, 0, d - 1)  # get the first d terms of taylor expansion

    #
    # calculate path_n
    #

    n = sympy.var('n')
    e_path_n = 0
    e_upper_n = 0
    coeff = []

    for i in xrange(1, D + 1):
        ri, mi = rs.items()[i - 1]
        for j in xrange(mi):
            c_ij = sympy.var('c_' + str(i) + str(j))
            coeff.append(c_ij)
            e_path_n += c_ij * (n**j) * ((1 / ri)**n)
            if ri.is_complex:
                ri = sympy.functions.Abs(ri)
            e_upper_n += c_ij * (n**j) * ((1 / ri)**n)
    equations = []

    for i, c in enumerate(taylor_coeffs):
        equations.append(sympy.Eq(e_path_n.subs(n, i), c))
    coeff_sol = sympy.linsolve(equations, coeff)

    # assert unique solution
    assert type(
        coeff_sol
    ) == sympy.sets.FiniteSet, "Zero or more solutions returned for path_n coefficients"
    coeff_sol = list(coeff_sol)[0]
    coeff_sol = [sympy.N(c, ROUND) for c in coeff_sol]

    for val, var in zip(coeff_sol, coeff):
        name = var.name
        e_path_n = e_path_n.subs(name, val)
        e_upper_n = e_upper_n.subs(name, val)

    return sympy.utilities.lambdify(list(e_path_n.free_symbols),
                                    e_path_n), sympy.utilities.lambdify(
                                        list(e_upper_n.free_symbols),
                                        e_upper_n)
Beispiel #13
0
import mpmath


def f(x):
    return (mpmath.pi + x + mpmath.sin(x)) / (2*mpmath.pi)


# Note: 40 digits might be overkill; a few more digits than the default
# might be sufficient.
mpmath.mp.dps = 40
ts = mpmath.taylor(f, -mpmath.pi, 20)
p, q = mpmath.pade(ts, 9, 10)

p = [float(c) for c in p]
q = [float(c) for c in q]
print('p =', p)
print('q =', q)
def compute_alpha(n):
    """alpha_n from DLMF 8.12.13"""
    coeffs = mp.taylor(eta, 0, n - 1)
    return lagrange_inversion(coeffs)
Beispiel #15
0
    def run_ESN(self,
                input_dat=None,
                around=0,
                order=None,
                time=200,
                init='zero',
                force_start=None):
        """
        Runs the resevoir computer for a given number of time steps, with or
        without input data.

        Args:
            self (simple_ESN object): the reservoir computer object.
            input_dat (timexK array or None): the input data. T is the temporal
            length of the data, K is the number of inputs.
            around (array or float): provides coefficients to neuron activation
                functions.
            order (str,int): 'power' gives a Taylor expansion with coefficients
                given by around, 'relu' gives a rectified linear activation
                function with input scaling around, an integer gives the Taylor
                expansion of tanh about around, while any non integer,non-string
                value gives the standard tanh activation function.
            time (int): number of steps to run the reservoir.
            init (str): 'last' starts the run at the last iteration of the
                training run. 'zero' starts the run with all neurons set to
                zero, and any other value starts the run with neuron
                displacements randomize.
            force_start (anything): if force_start is not None, starts output
                at last value of the teacher data.
        Yields:
            self.states (timexN array): states of the reservoir during the run.
            self.outputs (timexL array): reservoir outputs for run.
        """
        if input_dat is None:
            input_dat = np.zeros([time, self.K])
        if init == 'last':
            x = self.M[-1, :]
        elif init == 'zero':
            x = np.zeros([self.N])
        else:
            x = np.random.random_sample([self.N])
        if order == 'power':
            coeffs = np.array(around)
            update = aux.taylor_exp
        elif order == 'relu':
            if around == 0:
                coeffs = 1
            else:
                coeffs = around
            update = aux.ReLU
        elif isinstance(order, int) is True:
            mpm.dps = 16
            mpm.pretty = True
            coeffs = np.array(mpm.chop(mpm.taylor(mpm.tanh, around, order)),
                              dtype=np.float64)
            update = aux.taylor_exp
        else:
            coeffs = None
            update = np.tanh
        if force_start is not None:
            y = force_start  #should be last time step of teacher
        else:
            y = np.zeros([self.L])
        a = self.a

        bias = self.bias

        states = np.zeros([time, self.N])
        outputs = np.zeros([time, self.L])

        for t in range(0, time):
            u = input_dat[t, :]
            x = (1 - a) * x + a * update(
                np.dot(self.W, x) + np.dot(self.W_in, u) +
                np.dot(self.W_fb, y) + bias, coeffs)
            y = np.dot(self.W_out, x)

            states[t, :] = x
            outputs[t, :] = y
        self.input = input_dat
        self.states = states
        self.outputs = outputs
        return states
 def test_log(self):
     with mp.workdps(30):
         logcoeffs = mp.taylor(lambda x: mp.log(1 + x), 0, 10)
         expcoeffs = mp.taylor(lambda x: mp.exp(x) - 1, 0, 10)
         invlogcoeffs = lagrange_inversion(logcoeffs)
         mp_assert_allclose(invlogcoeffs, expcoeffs)
 def test_log(self):
     with mp.workdps(30):
         logcoeffs = mp.taylor(lambda x: mp.log(1 + x), 0, 10)
         expcoeffs = mp.taylor(lambda x: mp.exp(x) - 1, 0, 10)
         invlogcoeffs = lagrange_inversion(logcoeffs)
         mp_assert_allclose(invlogcoeffs, expcoeffs)
 def test_sin(self):
     with mp.workdps(30):
         sincoeffs = mp.taylor(mp.sin, 0, 10)
         asincoeffs = mp.taylor(mp.asin, 0, 10)
         invsincoeffs = lagrange_inversion(sincoeffs)
         mp_assert_allclose(invsincoeffs, asincoeffs, atol=1e-30)
Beispiel #19
0
    def train_ESN(self,
                  input_dat=None,
                  teacher=None,
                  around=0,
                  order=None,
                  washout=100,
                  noise=None,
                  bias=0,
                  mp=True,
                  B=0):
        """
        Single shot training of a simple_ESN object. Generates W_out.

        Args:
            self (simple_ESN object): the reservoir computer object.
            input_dat (TxK array): the training input data. T is the temporal
                length of the data, K is the number of inputs.
            teacher (TxL array): the training output ("teacher") data. T is the
                temporal length of the data, L is the number of outputs.
            around (array or float): provides coefficients to neuron activation
                functions.
            order (str,int): 'power' gives a Taylor expansion with coefficients
                given by around, 'relu' gives a rectified linear activation
                function with input scaling around, an integer gives the Taylor
                expansion of tanh about around, while any non integer,
                non-string value gives the standard tanh activation function.
            washout (int): number of discarded initial transient steps.
            noise (float): arbitrary noise added to each training update step.
            bias (float): bias added to each update step.
            mp (Boolean True/False): True for Moore-Penrose pseudoinverse,
                False for ridge regression.
            B (float): ridge regression parameter.
        Yields:
            self.W_out (LxN array): trained simple_ESN output weights.
        """
        time = teacher.shape[0]  #(time,L)
        if order == 'power':
            coeffs = np.array(around)
            update = aux.taylor_exp
        elif order == 'relu':
            if around == 0:
                coeffs = 1
            else:
                coeffs = around
            update = aux.ReLU
        elif isinstance(order, int) is True:
            mpm.dps = 16
            mpm.pretty = True
            coeffs = np.array(mpm.chop(mpm.taylor(mpm.tanh, around, order)),
                              dtype=np.float64)
            update = aux.taylor_exp
        else:
            coeffs = None
            update = np.tanh
        M = np.zeros([time - washout, self.N])  #state collecting matrix
        T = np.zeros([time - washout, self.L])  #target outputs
        x = np.zeros([self.N])
        y = np.zeros([self.L])

        a = self.a
        self.bias = bias
        for t in range(0, time):
            u = input_dat[t, :]
            if noise is not None:
                v = rn.uniform(-noise, noise)
            else:
                v = 0
            x = (1 - a) * x + a * update(
                np.dot(self.W, x) + np.dot(self.W_in, u) +
                np.dot(self.W_fb, y) + v + bias, coeffs)
            if t >= washout:
                k = t - washout
                M[k, :] = x  #just use internal states for now
                T[k, :] = teacher[t, :]
            y = teacher[t, :]

        #Set output weights
        self.M = M
        self.T = T
        if mp is True:  #Moore-Penrose pseudoinverse
            W_out = np.dot(np.linalg.pinv(M), T)
            W_out = np.transpose(W_out)
        else:  #Ridge Regression
            sq = np.dot(np.transpose(M), M)
            inv = np.linalg.inv(sq + B * np.identity(sq.shape[0]))
            W_out = np.dot(np.dot(np.transpose(T), M), inv)
        self.W_out = W_out
        return W_out
 def test_sin(self):
     with mp.workdps(30):
         sincoeffs = mp.taylor(mp.sin, 0, 10)
         asincoeffs = mp.taylor(mp.asin, 0, 10)
         invsincoeffs = lagrange_inversion(sincoeffs)
         mp_assert_allclose(invsincoeffs, asincoeffs, atol=1e-30)
Beispiel #21
0
from approx1D import *
import numpy as np 
import sympy as sm
import mpmath as mp

x = sm.Symbol('x')
f = sm.sin(x)
phi = [x**(2*j + 1) for j in range(5)]
Omega = [0,0]
for k in range(2,13):
	Omega[1] = k*np.pi/2
	u = least_squares(f,phi,Omega)
	comparison_plot(f,u,Omega, filename="tmp{0}.pdf".format(k))
	figure()

ucoeff = mp.taylor(mp.sin,0.0,9)
u = 0.0
for i in range(len(ucoeff)):
	u = u + ucoeff[i]*x**i

comparison_plot(f,u,Omega, filename="taylor.pdf")