示例#1
0
def broyden (func, x1, x2, tol = 1e-8):
    """
    Implementation of the Broyden method finding roots to a multivariable func-
    tion.

    Inputs:
        func: The vector function you wish to find a root for
        x1: One initial guess vector
        x2: A second initial guess vector
        [tol]: optional. Tolerance level

    Returns:
        x such that func(x) = 0
    """
    if abs(func(x1)) < tol:
        return x1
    if abs(func(x2)) < tol:
        return x2

    x1Array = sp.array(x1)
    x2Array = sp.array(x2)
    jaco = Jacobian(func)

    f1 = func(x1)
    f2 = func(x2)

    deltax = (x2-x1)
    Jacob = jaco.jacobian(deltax)

    Jnew = Jacob + (f2- f1 - sp.dot(sp.dot(Jacob,deltax),deltax.T))\
    /(la.norm(deltax)**2)

    xnew = la.inv(Jnew) * (-f2) + x2

    x1 = x2
    x2 = xnew
    Jacob = Jnew

    i=0
    while abs(la.norm(func(xnew))) > tol:
        f1 = func(x1)
        f2 = func(x2)

        deltax = (x2-x1)

        Jnew = Jacob + (f2- f1 - sp.dot(sp.dot(Jacob,deltax),deltax.T))\
        /(la.norm(deltax)**2)

        xnew = la.inv(Jnew) * (-f2) + x2

        x1 = x2
        x2 = xnew
        Jacob = Jnew

        i += 1
        print i
    return xnew
示例#2
0
文件: newtons.py 项目: monadplus/TOML
def newtons(objFun, x0, jacobian=None, hessian=None, accuracy=1e-4):
    """Newton's Method"""
    # For some reason, Jacobian returns the jacobian in matrix form i.e. [[]]
    if jacobian is None: jacobian = Jacobian(objFun)
    if hessian is None: hessian = Hessian(objFun)

    def getLambdaSquare(x):
        j = jacobian(x).reshape(-1)
        hinv = inv(hessian(x))
        return j.T @ hinv @ j

    def getStep(x):
        j = jacobian(x).reshape(-1)
        hinv = inv(hessian(x))
        return -hinv @ j

    steps = 0
    x = x0
    d = getStep(x)
    l = getLambdaSquare(x)
    # TODO l may be negative...
    while (l / 2 > accuracy):
        t = backtrackingLineSearch(objFun, x, jacobian=jacobian)
        x = x + t * d
        d = getStep(x)
        l = getLambdaSquare(x)
        steps += 1
    return (x, objFun(x), steps, l)
示例#3
0
def approx_hessian(point, vars=None, model=None):
    """
    Returns an approximation of the Hessian at the current chain location.

    Parameters
    ----------
    model : Model (optional if in `with` context)
    point : dict
    vars : list
        Variables for which Hessian is to be calculated.
    """
    from numdifftools import Jacobian

    model = modelcontext(model)
    if vars is None:
        vars = model.cont_vars

    point = Point(point, model=model)

    bij = DictToArrayBijection(ArrayOrdering(vars), point)
    dlogp = bij.mapf(model.fastdlogp(vars))

    def grad_logp(point):
        return np.nan_to_num(dlogp(point))

    '''
    Find the jacobian of the gradient function at the current position
    this should be the Hessian; invert it to find the approximate
    covariance matrix.
    '''
    return -Jacobian(grad_logp)(bij.map(point))
示例#4
0
def backtrackingLineSearch(objFun, x, alpha=0.25, beta=0.85, jacobian=None):
    """Backtracking Line Search"""
    t = 1
    if jacobian is None: jacobian = Jacobian(objFun)
    j = jacobian(x)
    # TODO do you also use jacobian for newton's ?
    while(objFun(x + t*(-j)) >= objFun(x) + alpha*t*j.T*(-j)):
        t *= beta
    return t
示例#5
0
def test_first_derivative_jacobian_richardson(
        example_function_jacobian_fixtures):
    f = example_function_jacobian_fixtures["func"]
    fprime = example_function_jacobian_fixtures["func_prime"]

    true_grad = fprime(np.ones(3))
    numdifftools_grad = Jacobian(f, order=2, n=3, method="central")(np.ones(3))
    grad = first_derivative(f, np.ones(3), n_steps=3, method="central")

    aaae(numdifftools_grad, grad)
    aaae(true_grad, grad)
示例#6
0
def broyden(func, x1, x2, tol=1e-5, maxiter=50):
    """Calculate the zero of a multi-dimensional function using Broyden's method"""
    
    def isscalar(x):
        if isinstance(x, sp.ndarray):
            if x.size == 1:
                return x.flatten()[0]
            else:
                return x
        else:
            return x

    def update_Jacobian(preJac, ch_x, ch_F):
        """Update Jacobian from preX to newX
        preX and newX are assumed to be array objects of the same shape"""
                
        frac = (ch_F-(preJac.dot(ch_x)))/(la.norm(ch_x)**2)

        Jac = preJac+sp.dot(isscalar(frac),ch_x.T)
        return Jac
        
    #truncate list to two tiems and sort
    x1 = sp.vstack(x1.flatten())
    x2 = sp.vstack(x2.flatten())
    
    fx1 = func(x1)
    fx2 = func(x2)
    
    #check our original points for zeros
    if abs(fx1) < tol:
        return x1
    elif abs(fx2) < tol:
        return x2

    #Calculate initial Jacobian matrix
    jac = Jacobian(func)(x1)
    mi = maxiter
    while abs(fx2) > tol and mi > 0:        
        fx1 = func(x1)
        fx2 = func(x2)
        ch_x=x2-x1
        ch_F=fx2-fx1
        
        jac = update_Jacobian(jac, ch_x, ch_F)
        y = la.lstsq(jac, sp.array([-fx2]))[0]
        xnew = y+x2
        x1 = x2
        x2 = xnew
        mi -= 1
    
    if mi==0:
        raise ValueError("Did not converge in %d iterations" % maxiter)
    else:
        return x2, maxiter-mi
示例#7
0
def compute_control_trajectory(f, x, nu):
    """ Compute the control trajectory from the state trajectory and
    the function that describes the system dynamics. """
    #assert len(x.shape) == 2, "x must have 2 dimensions"
    # Allocate memory for the control trajectory.
    N = x.shape[1]
    u = zeros([nu, N - 1])
    # Calculate the the control input estimate, u_hat, for the first time step.
    u_hat = zeros(nu)
    #dt = 1.0  # until there's a reason to use something else
    dx = x[:, 1] - x[:, 0]
    dfdu = Jacobian(lambda u: f(x[:, 0], u))
    u_hat = pinv(dfdu(u_hat)).dot(dx / dt - f(x[:, 0], u_hat))
    for k in range(N - 1):
        dfdu = Jacobian(lambda u: f(x[:, k], u))
        # find the change in u that makes f(x,u)dt close to dx
        dx = x[:, k + 1] - x[:, k]
        du = pinv(dfdu(u_hat)).dot(dx / dt - f(x[:, k], u_hat))
        u_hat += du
        u[:, k] += u_hat
    return u
示例#8
0
def systemNewton(function, x0, fPrime = None, tol = 0.0001):
    epsilon = 1e-5
    xArray = sp.array(x0, dtype = float)
    funArray = sp.array(function)
    numEqns = funArray.size
    numVars = xArray.size
    if numVars==1:
        if fPrime:
            return myNewNewton(function, x0, fPrime)
        else:
            return myNewNewton(function, x0)
    else:
        xold = xArray
        jacfun = Jacobian(function)
        jac0 = sp.mat(jacfun.jacobian(xold))
        xnew = xold - la.solve(jac0,function(xold))
        while max(abs(xnew-xold))> tol:
            xold = xnew
            jacNew = sp.mat(jacfun.jacobian(xold))
            xnew = xold - la.solve(jacNew,function(xold))
        return xnew
示例#9
0
    def optimize(self, goal, mass):
        if goal == 'fuel':
            func = self.func_fuel
        elif goal == 'time':
            func = self.func_time
        else:
            raise RuntimeError('Optimization goal [%s] not avaiable.' % goal)

        x0 = self.x0 * self.normfactor
        res = minimize(
            func,
            x0,
            args=(mass, ),
            bounds=self.bounds,
            jac=lambda x, m: Jacobian(lambda x: func(x, m))(x),
            options={'maxiter': 200},
            constraints=(
                {
                    'type':
                    'ineq',
                    'args': (mass, ),
                    'fun':
                    lambda x, m: self.func_cons_thrust(x, m),
                    'jac':
                    lambda x, m: Jacobian(lambda x: self.func_cons_thrust(
                        x, m))(x)
                },
                {
                    'type':
                    'ineq',
                    'args': (mass, ),
                    'fun':
                    lambda x, m: self.func_cons_lift(x, m),
                    'jac':
                    lambda x, m: Jacobian(lambda x: self.func_cons_lift(x, m))
                    (x)
                },
            ))
        return res
示例#10
0
def check(eta, negated):

    d = eta.shape[0]

    # deg = .15 * np.ones(d)
    # deg = np.arange(1, d + 1, dtype=np.double)
    deg = np.ones(d)
    costs = np.random.RandomState(0).uniform(0, 3, size=d)
    g, f = _make_factor(eta, negated, fn, B=2, costs=costs)
    u, _ = f.solve_qp_adjusted(eta, [], degrees=deg)

    J_ = np.zeros((d, d))
    for j, v in enumerate(np.eye(d)):
        du, _ = f.jacobian_vec(v)
        J_[j] = np.array(du)

    xor = lambda x : np.array(f.solve_qp(x, [])[0])
    print(u)
    J = Jacobian(xor, step=.000001)(eta)
    print(J)
    print(J_)
    print(np.linalg.norm(J - J_))
示例#11
0
文件: gdm.py 项目: monadplus/TOML
def gdm(objFun, x0, jacobian=None, accuracy=1e-4):
    """Gradient Descent Method.
    """
    if jacobian is None:
        jacobian = Jacobian(objFun)

    def getStep(x):
        return -jacobian(x)

    def getEta(d):
        return LA.norm(d, ord=2)

    steps = 0  # number of iterations
    x = x0
    d = getStep(x)
    eta = getEta(d)
    while (eta > accuracy):
        t = backtrackingLineSearch(objFun, x, jacobian=jacobian)
        x = x + t * d
        d = getStep(x)
        eta = getEta(d)
        steps += 1
    return (x, objFun(x), steps, eta)
def fun_der(x, a):
    return Jacobian(lambda x: fun(x, a))(x).ravel()
示例#13
0
def solver(fobj,
           x0,
           lb=None,
           ub=None,
           options={},
           method='lmmcp',
           jac='default',
           verbose=False):

    in_shape = x0.shape

    ffobj = lambda x: fobj(x.reshape(in_shape)).flatten()

    if not isinstance(jac, str):
        pp = np.prod(in_shape)

        def Dffobj(t):
            tt = t.reshape(in_shape)
            dval = jac(tt)
            return dval.reshape((pp, pp))

    elif jac == 'precise':
        from numdifftools import Jacobian
        Dffobj = Jacobian(ffobj)
    else:
        Dffobj = MyJacobian(ffobj)

    if method == 'fsolve':
        import scipy.optimize as optimize
        factor = options.get('factor')
        factor = factor if factor else 1
        [sol, infodict, ier, msg] = optimize.fsolve(ffobj,
                                                    x0.flatten(),
                                                    fprime=Dffobj,
                                                    factor=factor,
                                                    full_output=True,
                                                    xtol=1e-10,
                                                    epsfcn=1e-9)
        if ier != 1:
            print msg
    elif method == 'anderson':
        import scipy.optimize as optimize
        sol = optimize.anderson(ffobj, x0.flatten())
    elif method == 'newton_krylov':
        import scipy.optimize as optimize
        sol = optimize.newton_krylov(ffobj, x0.flatten())
    elif method == 'lmmcp':
        from dolo.numeric.extern.lmmcp import lmmcp, Big
        if lb == None:
            lb = -Big * np.ones(len(x0.flatten()))
        else:
            lb = lb.flatten()
        if ub == None:
            ub = Big * np.ones(len(x0.flatten()))
        else:
            ub = ub.flatten()

        sol = lmmcp(ffobj,
                    Dffobj,
                    x0.flatten(),
                    lb,
                    ub,
                    verbose=verbose,
                    options=options)

    return sol.reshape(in_shape)
示例#14
0
 def f_jacobian(self, a):  # Numerical differentiation for Jacobian
     return Jacobian(lambda x: self.objective(x))(
         a).ravel()  # ravel (from numpy) flattens the array
示例#15
0
I0 = 1000.0

# The time points.
times = [0.0, 1.0, 2.0, 3.0, 4.0]

# The intensities for the above I0 and R.
I = [1000.0, 367.879441171, 135.335283237, 49.7870683679, 18.3156388887]

# The intensity errors.
errors = [10.0, 10.0, 10.0, 10.0, 10.0]

# The variance weighting matrix.
W = eye(len(errors))/(10.0**2)

# Set up the Jacobian function.
jacobian = Jacobian(func)

# The covariance matrix at the minimum.
print("\n\nOn-minimum:\n")
J = jacobian([R, I0])
covar = inv(dot(transpose(J), dot(W, J)))
print("The covariance matrix at %s is:\n%s" % ([R, I0], covar))
print("The parameter errors are therefore:")
print("    sigma_R:   %25.20f" % sqrt(covar[0, 0]))
print("    sigma_I0:  %25.20f" % sqrt(covar[1, 1]))

# The covariance matrix off the minimum.
print("\n\nOff-minimum:\n")
R = 2.0
I0 = 500.0
J = jacobian([R, I0])
示例#16
0
 def _hess(*args, **kwargs):
     foo = Jacobian(grad)(*args, **kwargs)
     return (foo + foo.T) / 2
示例#17
0
def func_der(x, func=himmelblau):
    return Jacobian(lambda x: func(x))(x).ravel()
示例#18
0
def gradient_lin(args, x, y):
    return Jacobian(lambda args: linear(args, x, y))(args).ravel()
示例#19
0
import math
import numpy as np
from scipy.optimize import minimize
from timeit import timeit
from numdifftools import Jacobian, Hessian

def obj_fun(x):
    return math.exp(x[0])*(4*x[0]**2 + 2*x[1]**2 + 4*x[0]*x[1] + 2*x[1] + 1)

def fun_jac(x):
    dx = math.exp(x[0])*(4*x[0]**2 + 4*x[0]*(x[1] + 2) + 2*x[1]**2 + 6*x[1] + 1)
    dy = math.exp(x[0])*(4*x[0] + 4*x[1] + 2)
    return np.array([dx, dy])

# Slow, it seems it is computed each time..
fun_jac_true = Jacobian(obj_fun)

def fun_hess(x, a):
    dxx = math.exp(x[0])*(4*x[0]**2 + 4*x[0]*(x[1] + 2)*2*x[1]**2 + 10*x[1] + 9)
    dxy = math.exp(x[0])*(4*x[0] + 4*x[1] + 6)
    dyx = 2*math.exp(x[0])*(2*x[0] + 2*x[1] + 3)
    dyy = 4*math.exp(x[0])
    return np.array([[dxx, dxy],[dyx, dyy]])

ineq_cons = {'type': 'ineq',
             'fun' : lambda x: np.array( -x[0]*x[1]+x[0]+x[1]-1.5
                                        , x[0]*x[1]+10)
            }

x0s = np.array([[.0, .0],[10.0,20.0],[-10.0, 1.0],[-30.0,-30.0]])
示例#20
0
 def _hess(*args, **kwargs):
     foo = Jacobian(grad, **self._hess_options)(*args, **kwargs)
     return (foo + foo.T) / 2
示例#21
0
 def cost_fun_jac(params, x, y):
     return Jacobian(lambda p: cost_fun(p, x, y))(params).ravel()
示例#22
0
def gradient(args, x, y):
    return Jacobian(lambda args: F(args, x, y))(args).ravel()
示例#23
0
                 acc,
                 step,
                 time=end_time - start_time)

#%%
#  Newton's Method for optimization (SLSQP solver from Scipy uses this method)

from scipy.optimize import minimize
import numpy as np
from numdifftools import Jacobian, Hessian

print('\nSOLVING USING SCIPY\n')

# First function to optimize
fun = lambda x: 2 * x**2 - 0.5  # objective function
fun_Jac = lambda x: Jacobian(lambda x: fun(x))(x).ravel()  # Jacobian
fun_Hess = lambda x: Hessian(lambda x: fun(x))(x)  # Hessian
cons = ()  # unconstrained
bnds = ((None, None), ) * 1  # unbounded

# initial guess
x0 = 3.0

start_time = time.time() * 1000
res = minimize(fun,
               x0,
               bounds=bnds,
               constraints=cons,
               jac=fun_Jac,
               hess=fun_Hess)
end_time = time.time() * 1000
示例#24
0
 def _compute_standard_errors(self, coefficients, X_data, y_data):
     hessian_function = Jacobian(self._gradient, method='forward')
     H = hessian_function(coefficients, X_data, y_data)
     P = self._compute_basis_change()
     return np.sqrt(np.diagonal(P.dot(inv(H)).dot(P.T)))
示例#25
0
def test_PSD():
    from numdifftools import Jacobian
    x = np.random.randn(6)
    J = Jacobian(lambda x: PSD.P(x))

    assert np.allclose(J(x), PSD.J(x))
示例#26
0
def linearize_and_quadratize(f, F, g, G, h, l, x, u):
    """ Linearize the system dynamics and quadratize the costs around the state
    and control trajectories described by x and u for a system governed by the
    following equations.
        dx = f(x,u)dt + F(x,u)dw(t)
        dy = g(x,u)dt + G(x,u)dv(t)
        J(x) = E(h(x(T)) + integral over t from 0 to T of l(t,x,u))
    Where J(x) is the cost to go.
    We are using kalman_lqg as the 'inner loop' of this algorithm and it does
    not explicitly support linear state and control costs so we will augment
    the state vector to include the control inputs and a constant term. The
    augmented state vector, xa, is shown below.

    xa[k] = (x[k] u[k-1] 1).T

    This requires augmentation of the matrices A, B, C0, H, D, and Q. The
    augmentation of C0, H, and D is trivial as it simply involves adding zeros.
    The augmentation of A contains an additional 1 for the constant term added
    to the state vector.  The augmentation of B contains an identity submatrix
    which enables the addition of the control inputs to the state vector. The
    augmentation of Q is the most interesting.

    Qa[k] = [[ Q[k]    0         q[k]/2   ]
             [ 0       R[k-1]    r[k-1]/2 ]
             [ q[k]/2  r[k-1]/2  qs[k]    ]]
    
    Since the control costs are now state costs the control costs passed to
    kalman_lqg are zero, i.e. Ra = 0.
    """
    #dt = 1.0  # until there's a reason to use something else
    nx = x.shape[0]
    nxa = x.shape[0] + u.shape[0] + 1  # for state augmentation
    nu = u.shape[0]
    szC0 = F(x[:, 0], u[:, 0]).shape[1]
    ny = g(x[:, 0], u[:, 0]).shape[0]
    szD0 = G(x[:, 0], u[:, 0]).shape[1]
    N = x.shape[1]
    system = {}

    # build the vector for the initial augmented state
    x0a = [0.0 for i in range(nx)]
    u0a = [0.0 if i != nu else 1.0 for i in range(nu + 1)]
    system['X1'] = array(x0a + u0a)
    S1 = identity(nxa)
    S1[-1, -1] = 0.0
    system['S1'] = S1
    system['A'] = zeros([nxa, nxa, N - 1])
    system['B'] = zeros([nxa, nu, N - 1])
    system['C0'] = zeros([nxa, szC0, N - 1])
    system['C'] = zeros([nu, nu, szC0, N - 1])
    system['H'] = zeros([ny, nxa, N - 1])
    system['D0'] = zeros([ny, szD0, N - 1])
    system['D'] = zeros([ny, nxa, szD0, N - 1])
    system['Q'] = zeros([nxa, nxa, N])
    system['R'] = zeros([nu, nu, N - 1])
    for k in range(N - 1):
        dfdx = Jacobian(lambda x: f(x, u[:, k]))
        A = dfdx(x[:, k]) + identity(nx)
        dfdu = Jacobian(lambda u: f(x[:, k], u))
        B = dfdu(u[:, k])
        C0 = sqrt(dt) * F(x[:, k], u[:, k])
        dFdu = Jacobian(lambda u: F(x[:, k], u))
        # dFdu is x by w by u, BC is x by u by w, so swap last 2 dimensions
        # and multiply by pinv(B) to get C
        # !! Dont swap dFdu is x by u by w
        #C = sqrt(dt)*einsum('hi,ijk', pinv(B), swapaxes(dFdu(u[:,k]), -1, -2))
        C = sqrt(dt) * einsum('hi,ijk', pinv(B), dFdu(u[:, k]))
        #print(B.shape)
        #print(dFdu(u[:,k]).shape)
        #print(C.shape)
        dgdx = Jacobian(lambda x: g(x, u[:, k]))
        H = dgdx(x[:, k])
        system['D0'][:, :, k] = G(x[:, k], u[:, k]) / sqrt(dt)
        dGdx = Jacobian(lambda x: G(x, u[:, k]))
        # !!!!!!!!!!!!!!! print(dGdx(x[:,k]).shape)
        # dGdx is y by v by x, D is y by x by v, so swap last 2 dimensions  !!
        #D = swapaxes(dGdx(x[:,k]), -1, -2)/sqrt(dt)
        D = dGdx(x[:, k]) / dt
        # State cost, constant, linear, quadratic terms
        qs = dt * l(x[:, k], u[:, k], k)
        dldx = Jacobian(lambda x: l(x, u[:, k], k))
        q = dt * dldx(x[:, k])
        d2ldx2 = Hessian(lambda x: l(x, u[:, k], k))
        Q = dt * d2ldx2(x[:, k])
        if k == 0:
            # Due to state augmentation, the cost for control at k=0 will be
            # paid when k=1 so r[0] and R[0] are all zeros.
            r = zeros(nu)
            R = zeros([nu, nu])
        else:
            dldu = Jacobian(lambda u: l(x[:, k - 1], u, k))
            d2ldu2 = Hessian(lambda u: l(x[:, k - 1], u, k))
            r = dt * dldu(u[:, k - 1])
            R = dt * d2ldu2(u[:, k - 1])
        # augment matrices to accommodate linear state and control costs
        Aa = zeros([nxa, nxa])
        Aa[0:nx, 0:nx] = A
        Aa[-1, -1] = 1.0
        system['A'][:, :, k] = Aa
        Ba = zeros([nxa, nu])
        Ba[0:nx, 0:nu] = B
        Ba[nx:nx + nu, 0:nu] = identity(nu)
        system['B'][:, :, k] = Ba
        C0a = zeros([nxa, szC0])
        C0a[0:nx, 0:szC0] = C0
        system['C0'][:, :, k] = C0a
        Ha = zeros([ny, nxa])
        Ha[0:ny, 0:nx] = H
        system['H'][:, :, k] = Ha
        for j in range(D.shape[2]):
            Da = zeros([ny, nxa])
            #print(D.shape)
            #print(ny)
            #print(nxa)
            Da[0:ny, 0:nx] = D[:, :, j]
            system['D'][:, :, j, k] = Da
        Qa = zeros([nxa, nxa])
        Qa[0:nx, 0:nx] = Q
        Qa[0:nx, nx + nu] = q / 2
        Qa[nx + nu, 0:nx] = q / 2
        Qa[nx:nx + nu, nx:nx + nu] = R
        Qa[nx:nx + nu, nx + nu] = r / 2
        Qa[nx + nu, nx:nx + nu] = r / 2
        Qa[-1, -1] = qs
        system['Q'][:, :, k] = Qa
        # Control costs are built into the augmented Q matrix, Qa so R=0.
        system['R'][:, :, k] = zeros([nu, nu])

    # last time point, use h(x) for the state cost
    qs = h(x[:, N - 1])
    dhdx = Jacobian(lambda x: h(x))
    q = dhdx(x[:, N - 1])
    d2hdx2 = Hessian(lambda x: h(x))
    Q = d2hdx2(x[:, N - 1])
    dldu = Jacobian(lambda u: l(x[:, N - 2], u, k))
    r = dt * dldu(u[:, N - 2])
    d2ldu2 = Hessian(lambda u: l(x[:, N - 2], u, k))
    R = dt * d2ldu2(u[:, N - 2])
    Qa = zeros([nxa, nxa])
    Qa[0:nx, 0:nx] = Q
    Qa[0:nx, nx + nu] = q / 2
    Qa[nx + nu, 0:nx] = q / 2
    Qa[nx:nx + nu, nx:nx + nu] = R
    Qa[nx:nx + nu, nx + nu] = r / 2
    Qa[nx + nu, nx:nx + nu] = r / 2
    Qa[-1, -1] = qs
    system['Q'][:, :, N - 1] = Qa
    # iLQG does not accommodate noise added to the state estimate
    system['E0'] = zeros([1, 1, N])
    return system
示例#27
0
def gradient_rat(args, x, y):
    return Jacobian(lambda args: rational(args, x, y))(args).ravel()
 def fun_der(self, x, n):
     return Jacobian(lambda x: self.test_gen.objective(x, n))(x).ravel()