示例#1
0
def approx_cov(model, chain_state, vars=None):
    """
    returns an approximation of the hessian at the current chain location 
    """
    if vars is None:
        vars = model.vars
    dlogp = model_dlogp(model, vars)
    mapping = DASpaceMap(vars)

    def grad_logp(x):
        return np.nan_to_num(-dlogp(mapping.rproject(x, chain_state)))

    #find the jacobian of the gradient function at the current position
    #this should be the hessian; invert it to find the approximate covariance matrix
    return np.linalg.inv(nd.Jacobian(grad_logp)(mapping.project(chain_state)))
示例#2
0
def fisher(data_time,q,u0,te,t0,xp,yp,fs,fb,flux,fs_err,fb_err,flux_err,d):
    func = lambda c: binary_function_4_fisher(c[0],t,u0,c[1],t0,xp,yp,d)
    Fisher = [[0, 0], [ 0,  0]]
    for i in range(len(data_time)):
        f = flux[i]
        f_err = flux_err[i]
        t = data_time[i]
        if t > t0-te and t < t0+te:
            sigma = np.sqrt((fb_err/fs)**2+((f-fb)*fs_err/fs**2)**2+(f_err/fs)**2)
            jac1 = nd.Jacobian(func)
            jac = jac1([q,te])
            f = 1/sigma**2 * (np.transpose(jac) @ jac)
            Fisher = np.add(Fisher,f)
    Fisher_inv = inv(Fisher)
    return Fisher_inv
示例#3
0
def f_geneigvector( psd, bin_div=bin_div, moments=moments ):
    ## Calculate the counts
    global bin_mid, bin_diff
    count = f_count(psd,bin_mid,bin_diff,200*5)
    dchi = f_delta_chisquare( psd, count, moments, bin_mid, bin_diff, mobs )

    jac = np.matrix(nd.Jacobian(f_sum_chisquare,step=np.array([1,1,1e-3])*1e-3)( nml, moments, bin_mid, bin_diff, mobs ))
    hes = np.matrix(nd.Hessian(f_sum_chisquare,step=np.array([1,1,1e-3])*1e-3)( nml, moments, bin_mid, bin_diff, mobs ))

    # HV = Vd = VD
    d, V = scipy.linalg.eigh(hes)
    # D = np.asmatrix(np.diag(d))
    V = np.asmatrix(V)
    centroid = nml
    return centroid, V, d, dchi # sync1
def test_two_dimentional_jacobian():
    """check two dimentation sparsemax jacobian against approiximation"""
    t = np.linspace(-2, 2, 100)
    z = np.vstack([t, np.zeros(100)]).T

    Jacobian_approx = numdifftools.Jacobian(
        lambda z_i: sparsemax.forward(z_i[np.newaxis, :])[0])

    jacobian_exact = sparsemax.jacobian(z)

    for i, z_i in enumerate(z):
        with warnings.catch_warnings(record=True) as w:
            # numdifftools causes a warning, a better filter could be made
            np.testing.assert_almost_equal(jacobian_exact[i, :, :],
                                           Jacobian_approx(z_i))
示例#5
0
    def build_jacobian_fun(self, inputs):
        DiffX = lambda X: np.tile(X, (2, 1)) - np.transpose(np.tile(X, (2, 1)))
        dx = lambda X, Y: - X + (
                    (self.A - pow(X, 2) - pow(Y, 2)) * X - self.w * Y + self.G * (np.sum(self.C * DiffX(X), axis=1))) \
                          + self.dsig * np.random.randn(2)
        DiffY = lambda Y: np.tile(Y, (2, 1)) - np.transpose(np.tile(Y, (2, 1)))
        dy = lambda X, Y: - Y + (
                    (self.A - pow(X, 2) - pow(Y, 2)) * Y + self.w * X + self.G * (
                np.sum(self.C * DiffY(Y), axis=1))) + self.dsig * np.random.randn(2)

        def fun(X):
            x, y = X[:2], X[2:]
            return np.sum((dx(x,y), dy(x,y)))
        jac_fun = nd.Jacobian(fun)
        return jac_fun
示例#6
0
文件: ekf.py 项目: lizizatt/EKF
 def __init__(self, initialObservation, numSensors, sensorNoise,
              stateTransitionFunction, sensorTransferFunction, processNoise,
              sprocessNoiseCovariance):
     self.numSensors = numSensors
     self.estimate = initialObservation  #current estimate, initialized with first observation
     self.previousEstimate = initialObservation  #previous state's estimate, initialized with first observation
     self.gain = np.identity(
         numSensors
     )  #tradeoff of system between estimation and observation, initialized arbitrarily at identity
     self.previousGain = np.identity(
         numSensors)  #previous gain, again arbitarily initialized
     self.errorPrediction = np.identity(
         numSensors
     )  #current estimation of the signal error,... starts as identity arbitrarily
     self.previousErrorPrediction = np.identity(
         numSensors)  #previous signal error, again arbitrary initialization
     self.sensorNoiseProperty = sensorNoise  #variance of sensor noise
     self.f = stateTransitionFunction  #state-transition function, from user input
     self.fJac = nd.Jacobian(self.f)  #jacobian of f
     self.h = sensorTransferFunction  #sensor transfer function, from user input
     self.hJac = nd.Jacobian(self.h)  #jacobian of h
     self.processNoise = processNoise
     #process noise
     self.Q = processNoiseCovariance  #process noise covariance
示例#7
0
def compute_cell_error(cell, curl_x, curl_y, curl_z, ef_x, ef_y, ef_z):
    """Computes the error in a given cell of a mesh"""
    def ef_interpolator(x):
        return np.array([ef_x(*x), ef_y(*x), ef_z(*x)])

    jacobian = nd.Jacobian(ef_interpolator, order=2)(cell)
    jacobian[np.isnan(jacobian)] = 0  # handle NaN-values in the jacobian
    curl = np.array([
        jacobian[2, 1] - jacobian[1, 2], jacobian[0, 2] - jacobian[2, 0],
        jacobian[1, 0] - jacobian[0, 1]
    ])

    curl_field = np.array([curl_x(*cell), curl_y(*cell), curl_z(*cell)])
    error = np.linalg.norm(curl_field - curl)
    return error
示例#8
0
    def __call__(self, f, x0, finite_differences_stepsize):
        if x0.ndim != 1:
            raise ValueError("x must be a 1d array")
        if self.jacobian == "numdifftools":
            jacobian_obj = ndt.Jacobian(f, step=finite_differences_stepsize)
            jacobian_func = lambda x, f_x: jacobian_obj(x)
        else:
            jacobian_func = lambda x, f_x: self.jacobian(
                f, x, finite_differences_stepsize, f_x)
        step, f_x, i, jacobian = np.inf, f(x0), 0, None

        while not self.convergence_rule(step, f_x) and i < self.max_iter:
            i += 1
            if jacobian is None:
                jacobian = jacobian_func(x0, f_x)
            update_result = self.update_rule(f, x0, f_x, jacobian)
            try:
                x0, step, f_x, jacobian = update_result
            except ValueError:
                x0, step, f_x = update_result
                jacobian = None
            if self.printout:
                print(
                    "Iteration: ",
                    i,
                    "\nFunction value:\n",
                    f_x,
                    "\nNew solution:\n",
                    x0,
                    "\n",
                )

    ######################### TODO n_fev is wrong; why?
    ### Split up into an initial fev and a step-fev, with initial fev defined in __init__
        n_fev = ((i + 1 + x0.size + (i - 1) * self._nfev_multiplier * x0.size)
                 if self.jacobian != "numdifftools" else -1)
        if self._nfev_multiplier == 2:
            n_fev += x0.size

    ######################### TODO n_fev is wrong; why?
        return {
            "nfev": n_fev,
            "nit": i,
            "success": self.convergence_rule(step, f_x),
            "x": x0,
            "fun": f_x,
            "fjac": jacobian,
        }
 def calcDiff(self, data, x, u=None):        
     if u is None:
         u = self.unone        
     x = m2a(x)
     
     def function(x):
         x = torch.tensor(x, dtype = torch.float32)
         x = x.resize_(1, 3)
         jacobian = self.net.jacobian(x).squeeze().numpy()
         return np.array(jacobian).reshape(3,)
     
     j = function(x)
     hessian = nd.Jacobian(function)
     h = hessian(x)
     data.Lx = a2m(j)
     data.Lxx = a2m(h)
示例#10
0
def jacobian_of_tps():
    import numdifftools as ndt
    D = 3
    pts0 = np.random.randn(100, 3)
    pts_eval = np.random.randn(20, D)
    lin_ag, trans_g, w_ng, x_na = np.random.randn(
        D, D), np.random.randn(D), np.random.randn(len(pts0), D), pts0

    def eval_partial(x_ma_flat):
        x_ma = x_ma_flat.reshape(-1, 3)
        return tps.tps_eval(x_ma, lin_ag, trans_g, w_ng, pts0)

    for i in xrange(len(pts_eval)):
        rots = ndt.Jacobian(eval_partial)(pts_eval[i])
        rots1 = tps.tps_grad(pts_eval[i:i + 1], lin_ag, trans_g, w_ng, pts0)
        assert np.allclose(rots1, rots)
示例#11
0
def confidence_interval(model, minimizer, attr, *args, **kwargs):
    def pred_func(params):
        old_params = copy.deepcopy(model.parameters)
        model.set_parameters(params)
        result = getattr(model, attr)(*args, **kwargs)
        model.set_parameters(old_params)
        return result

    bf_params = minimizer.np_values()

    cov = minimizer.np_matrix(skip_fixed=False)
    jac = numdifftools.Jacobian(pred_func)(bf_params)
    err = scipy.stats.chi2.ppf(0.68, df=1) * np.sqrt(
        np.matmul(jac, np.matmul(cov, jac.T)).diagonal())
    cent = pred_func(bf_params)
    return cent - err, cent + err
示例#12
0
def test_hessian_2():
    n = 3
    seqs = [
        [1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2,
         2, 1, 1, 1, 1, 2, 3, 3, 3, 3]]

    model = PESContinuousTimeMSM().fit(seqs)
    print(model.timescales_)
    print(model.uncertainty_timescales())
    theta = model.theta_
    C = model.countsmat_
    print(C)

    C_flat = (C + C.T)[np.triu_indices_from(C, k=1)]
    print(C_flat)
    print('theta', theta, '\n')
    inds = np.where(theta != 0)[0]

    hessian1 = _ratematrix_PES.hessian(theta, C, inds=inds)
    hessian2 = nd.Jacobian(lambda x: _ratematrix_PES.loglikelihood(x, C)[1])(theta)
    hessian3 = nd.Hessian(lambda x: _ratematrix_PES.loglikelihood(x, C)[0])(theta)

    np.set_printoptions(precision=3)

    # H1 = hessian1[np.ix_(active, active)]
    # H2 = hessian2[np.ix_(active, active)]
    # H3 = hessian2[np.ix_(active, active)]

    print(hessian1, '\n')
    print(hessian2, '\n')
    # print(hessian3)
    print('\n')

    info1 = np.zeros((len(theta), len(theta)))
    info2 = np.zeros((len(theta), len(theta)))
    info1[np.ix_(inds, inds)] = scipy.linalg.pinv(-hessian1)
    info2[np.ix_(inds, inds)] = scipy.linalg.pinv(-hessian2[np.ix_(inds, inds)])

    print('Inverse Hessian')
    print(info1)
    print(info2)
    # print(scipy.linalg.pinv(hessian2))
    # print(scipy.linalg.pinv(hessian1)[np.ix_(last, last)])
    # print(scipy.linalg.pinv(hessian2)[np.ix_(last, last)])

    print(_ratematrix_PES.sigma_pi(info1, theta, n))
    print(_ratematrix_PES.sigma_pi(info2, theta, n))
示例#13
0
文件: gmm.py 项目: huning2009/mygmm
    def __approx_dmoment(self, theta, **kwargs):
        """Approxiamte derivative of the moment function numerically.

        Parameters
        ----------
        theta : (nparams,) array
            Parameters

        Returns
        -------
        (nmoms, nparams) array
            Derivative of the moment function

        """
        with np.errstate(divide='ignore'):
            return nd.Jacobian(lambda x: self.momcond(x, **kwargs)[0].mean(0))(
                theta)
示例#14
0
    def propagate_parameter_uncertainties(
        self, result_params: Parameters
    ) -> np.ndarray:
        """Propagates the post fit uncertainty of the fit parameters
        to the bin counts via Gaussian error propagation and returns
        the bin-by-bin post fit covariance matrix.
        """

        # create helper function to calculate bin counts purely via paramters
        def exp_bin_counts(param_values):
            yields, nui_params = self.split_parameter_array(param_values)
            return self._expected_evts_per_bin(yields, nui_params)

        jac = nd.Jacobian(exp_bin_counts)(result_params.values)
        bin_by_bin_cov = jac @ result_params.covariance @ np.transpose(jac)

        return bin_by_bin_cov
示例#15
0
def get_jacobian(function, point, minima, maxima):

    wrapper, scaled_deltas, scaled_point, orders_of_magnitude, n_dim = _get_wrapper(
        function, point, minima, maxima)

    # Compute the Jacobian matrix at best_fit_values
    jacobian_vector = nd.Jacobian(wrapper, scaled_deltas,
                                  method='central')(scaled_point)

    # Transform it to numpy matrix

    jacobian_vector = np.array(jacobian_vector)

    # Now correct back the Jacobian for the scales

    jacobian_vector /= orders_of_magnitude

    return jacobian_vector[0]
示例#16
0
def jacobian(
    func, params, method="central", extrapolation=True, func_args=None, func_kwargs=None
):
    """
    Calculate the jacobian of *func*.

    Args:
        func (function): A function that maps params_sr into a numpy array
                        or pandas Series.

        params (DataFrame): see :ref:`parmas_df`
        method (string): The method for the computation of the derivative. Default is
                         central as it gives the highest accuracy.
        extrapolation (bool): This variable allows to specify the use of the
                                richardson extrapolation.
        func_args (list): additional positional arguments for func.
        func_kwargs (dict): additional positional arguments for func.

    Returns:
        DataFrame: If func returns a Series, the index is the index of this Series or
        the index is 0,1,2... if func returns a numpy array. The columns are the
        index of params_sr.

    """
    if method not in ["central", "forward", "backward"]:
        raise ValueError("Method has to be in ['central', 'forward', 'backward']")

    func_args = [] if func_args is None else func_args
    func_kwargs = {} if func_kwargs is None else func_kwargs

    f_x0 = func(params, *func_args, **func_kwargs)

    internal_func = _create_internal_func(func, params, func_args, func_kwargs)
    params_value = params["value"].to_numpy()

    if extrapolation:
        jac_np = nd.Jacobian(internal_func, method=method)(params_value)
    else:
        jac_np = _no_extrapolation_jacobian(internal_func, params_value, method)

    if isinstance(f_x0, pd.Series):
        return pd.DataFrame(index=f_x0.index, columns=params.index, data=jac_np)
    else:
        return pd.DataFrame(columns=params.index, data=jac_np)
示例#17
0
def test_hessian_1():
    n = 3
    seqs = [[
        1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2,
        1, 1, 1, 1, 2, 3, 3, 3, 3
    ]]

    model = ContinuousTimeMSM().fit(seqs)
    theta = model.theta_
    C = model.countsmat_

    hessian1 = _ratematrix.hessian(theta, C)
    Hfun = nd.Jacobian(lambda x: _ratematrix.loglikelihood(x, C)[1])
    hessian2 = Hfun(theta)

    # not sure what the cutoff here should be (see plot_test_hessian)
    assert np.linalg.norm(hessian1 - hessian2) < 1e-6

    print(_ratematrix.sigma_pi(-scipy.linalg.pinv(hessian1), theta, n))
示例#18
0
def Jacobian(x,y,xk):
    m=int(x.shape[0])
    f=np.empty([1,],dtype=object)
    i=0
    while i < m:    
        g=[lambda z: y[0,i]-z[0]*np.cos(z[1]*x[i])-z[1]*np.sin([z[0]*x[i]])]
        i=i+1
        f=np.concatenate((f,g),axis=0)
    f=f[1:]  
    n=len(xk)  
    i=0
    J=np.empty([0,n])
    row=np.empty([1,n])
    while i+1<= m:
        grd= nd.Jacobian(f[i])
        row[0:n-1] = grd(xk)
        i=i+1
        J=np.concatenate((J,row),axis=0)
    return J
示例#19
0
 def jacobian(self, t, r, u, trained=True):
     """ The jacobian matrix of the untrained reservoir ode w.r.t. r. That is, if
             dr/dt = F(t, r, u)
             Jij = dF_i/dr_j
         Parameters:
             t (float): Time value
             r (ndarray): Array of length `self.res_sz` reservoir node state
             u (callable): function that accepts `t` and returns an ndarray of length `self.signal_dim`
         Returns:
             Jnum (callable): Accepts a node state r (ndarray of length `self.res_sz') and returns a
             (`self.res_sz` x `self.res_sz`) array of partial derivatives (Computed numerically
             with finite differences). See `numdifftools.Jacobian`
     """
     if trained:
         f = lambda r: self.trained_res_ode(t, r, u)
     else:
         f = lambda r: self.res_ode(t, r, u)
     Jnum = ndt.Jacobian(f)
     return Jnum
    def __init__(self,
                 covariance_matrix_target = (np.matrix([[1, 0], [0, 1]])),
                 init = [0, 0],
                 n_samples = 200,
                 L = 25,
                 epsilon_range = [0.20, 0.25]):

        self.cov_mat_target = covariance_matrix_target
        self.n_samples = n_samples
        self.init = init
        self.epsilon_range = epsilon_range
        self.L = L
        self.sample = 'No sample yet, run the sampler first'
        self.acceptance_rate = 'No sample yet, run the sampler first'
        self.effective_n = 'No sample yet, run the sampler first'
        self.type_of_sampler = 'hmc'
        self.grad_U = nd.Jacobian(self.U)
        self.trace_mode = False
        self.trace = 'No trace yet, run trace_hmc_quantities()'
 def testAngularVelocityBodyFrameJacobian(self):
     r = sm.EulerAnglesZYX();
     for order in range(2,7):
         bsp = bsplines.BSplinePose(order,r)
         T_n_0 = bsp.curveValueToTransformation(numpy.random.random(6))
         T_n_1 = bsp.curveValueToTransformation(numpy.random.random(6))
             
         # Initialize the curve.
         bsp.initPoseSpline(0.0,1.0,T_n_0, T_n_1)
             
         for t in numpy.linspace(bsp.t_min(), bsp.t_max(), 4):
             
             oJI = bsp.angularVelocityBodyFrameAndJacobian(t);
             #print "TJI: %s" % (TJI)
             je = nd.Jacobian(lambda c: bsp.setLocalCoefficientVector(t,c) or bsp.angularVelocityBodyFrame(t))
             estJ = je(bsp.localCoefficientVector(t))
             J = oJI[1]
             
             self.assertMatricesEqual(J, estJ, 1e-9,"omega Jacobian")
示例#22
0
def nonrigidity_gradient():
    import numdifftools as ndt
    D = 3
    pts0 = np.random.randn(10, D)
    pts_eval = np.random.randn(3, D)

    def err_partial(params):
        lin_ag = params[0:9].reshape(3, 3)
        trans_g = params[9:12]
        w_ng = params[12:].reshape(-1, 3)
        return tps.tps_nr_err(pts_eval, lin_ag, trans_g, w_ng, pts0)

    lin_ag, trans_g, w_ng = np.random.randn(
        D, D), np.random.randn(D), np.random.randn(len(pts0), D)
    J1 = tps.tps_nr_grad(pts_eval, lin_ag, trans_g, w_ng, pts0)
    J = ndt.Jacobian(err_partial)(np.r_[lin_ag.flatten(),
                                        trans_g.flatten(),
                                        w_ng.flatten()])
    assert np.allclose(J1, J)
示例#23
0
    def build_jacobian_fun(self):
        first_layer, second_layer, third_layer, fourth_layer = self.build_numpy_submodelto()
        alpha_fun, beta_fun = self.build_numpy_submodelfrom()

        def sigmoid(x):
            return 1 / (1 + np.exp(-x))

        act_state_fun = lambda x: self.act_deterministic(alpha_fun(x), beta_fun(x)) @ self.act_state_weights
        f_fun = lambda x: fourth_layer(third_layer(second_layer(first_layer(act_state_fun(x)))))

        z_fun = lambda x: sigmoid(x @ self.U_z + f_fun(x) @ self.W_z + self.b_z)
        r_fun = lambda x: sigmoid(x @ self.U_r + f_fun(x) @ self.W_r + self.b_r)
        g_fun = lambda x: np.tanh(r_fun(x) * (x @ self.U_h) + f_fun(x) @ self.W_h + self.b_h)

        def dynamical_system(x):
            return - x + z_fun(x) * x + (1 - z_fun(x)) * g_fun(x)
        jac_fun = nd.Jacobian(dynamical_system)

        return jac_fun
示例#24
0
文件: solve.py 项目: TB-IKP/Truss
def break_ALM(self):

    #first, the gradient of the objective function (nabla*f)
    #and the non-relaxed constraints
    #in order to obtain the non-augmented objective function,
    #set method_ALM to False and immediately back to true

    self.method_ALM = False

    KKT_lagrangian = self.gradient(self.par_ALM['x'])

    self.method_ALM = True

    KKT_lagrangian += np.dot(
        self.par_ALM['mult_sub'].T,
        self.jacobian(self.par_ALM['x']).reshape(self.par_ALM['m'],
                                                 self.par_ALM['n']))

    #the relaxed constraints (eta*nabla*GH)
    KKT_lagrangian += np.dot(self.par_ALM['eta'],
                             nd.Jacobian(self.vanishing)(self.par_ALM['x']))

    #the bounds
    try:
        KKT_lagrangian -= self.par_ALM['mult_lb']
        KKT_lagrangian += self.par_ALM['mult_ub']

    except:
        KeyError

    #complementarity test for relaxed constraint
    KKT_complement = np.min(
        (-self.vanishing(self.par_ALM['x']), self.par_ALM['eta']), axis=0)

    if self.verbose:
        print('KKT_lagrangian', np.max(np.abs(KKT_lagrangian)))
        print('KKT_complement', np.max(np.abs(KKT_complement)))

    if np.max(np.abs(KKT_lagrangian)) < self.par_ALM['stop_crit']:
        if np.max(np.abs(KKT_complement)) < self.par_ALM['stop_crit']:
            return True
    else:
        return False
示例#25
0
    def fit(self, A):
        A_mean = np.mean(A)
        A_variance = np.var(A)
        A_skew = scipy.stats.skew(A)
        A_kurtosis = scipy.stats.kurtosis(A)

        array_moments = array([A_mean, A_variance, A_skew, A_kurtosis])

        def objetivo(f):
            mu = f[0]
            sigma = f[1]
            alpha = f[2]

            if alpha > 4:
                pass

            moments = array([
                self._mean(mu, sigma, alpha),
                self._variance(mu, sigma, alpha),
                self._skew(mu, sigma, alpha),
                self._kurtosis(mu, sigma, alpha)
            ])

            aux = array_moments - moments

            return aux.dot(aux)

        jac_objetivo = lambda f: nd.Jacobian(objetivo)(f).ravel()

        f0 = array([0.0, 1.0, 5.0])

        f = minimize(objetivo,
                     f0,
                     method='Newton-CG',
                     jac=jac_objetivo,
                     tol=1e-8)

        self._mu = f.x[0]
        self._sigma = f.x[1]
        self._alpha = f.x[2]

        return f
示例#26
0
def make_lv(every):
    ########## Parameters adapted from "Chaos in low-dimensional ...."
    r = np.array([1., 0.72, 1.53, 1.27])
    A = np.matrix([[1. * r[0], 1.09 * r[0], 1.52 * r[0], 0. * r[0]],
                   [0. * r[1], 1. * r[1], 0.44 * r[1], 1.36 * r[1]],
                   [2.33 * r[2], 0. * r[2], 1. * r[2], 0.47 * r[2]],
                   [1.21 * r[3], 0.51 * r[3], 0.35 * r[3], 1. * r[3]]])

    ###### Initial conditions and time steps #######
    x0 = 0.2
    y0 = 0.2
    z0 = 0.3
    k0 = 0.3
    ####### At 1015 the model explode
    T = 1000
    dt = 0.01
    n_steps = T / dt
    t = np.linspace(0, T, n_steps)
    X_f1 = np.array([x0, y0, z0, k0])

    ################################################
    ################################################
    def dX_dt(X, t=0):
        dydt = np.array([
            X[s] * (r[s] - np.sum(np.dot(A, X)[0, s]))
            for s in range(0, len(X))
        ])
        return (dydt)

    ################################################
    ts = integrate.odeint(dX_dt, X_f1, t)
    X_f1 = ts[ts.shape[0] - 1, :]
    ts = integrate.odeint(dX_dt, X_f1, t)
    jacobiano = []
    dat = []
    for i in range(0, ts.shape[0]):
        if i % every == 0:
            f_jacob = nd.Jacobian(dX_dt)(np.squeeze(np.asarray(ts[i, :])))
            jacobiano.append(f_jacob)
            dat.append(ts[i, :])

    return (np.stack(dat), jacobiano)
示例#27
0
    def compute_numerical_jacobian(self, u, step=1.0e-7):
        """
        Computes finite difference approximation to the Jacobian. Not intended to be used
        for simulation runs since it's quite slow, but it's useful for debugging.

        Parameters
        ----------
        u: (N,) ndarray
            Array of ice velocity in m/yr.
        step: float, optional
            Step size for finite differences. Default: 1.0e-7.

        Returns
        -------
        J: (N+2, N+2) ndarray
            2D Jacobian array.
        """
        import numdifftools as nd
        jacfun = nd.Jacobian(self.compute_pde_values, step=step)
        return jacfun(u)
def test_goal_depth_model_jacobian():
    x = np.random.rand(STATES, 1)
    # x = np.zeros((STATES, 1))
    # x[3] = 0.1
    # x[4] = -0.3
    # x[5] = 1.2
    # x[16] = -0.45
    # x[17] = 1.23
    # x[18] = 0.1
    jac_func = nd.Jacobian(goal_depth_meas_model)

    meas = goal_depth_meas_model(x)
    # print('x = {}'.format(x))
    # print('meas = {}'.format(meas))
    jac = jac_func(x)
    analytical_jac = analytical_goal_depth_jac(x)
    # print('jac = {}'.format(jac))

    res = np.isclose(jac, analytical_jac)
    assert(np.all(res)), res
 def testInverseOrientationJacobian(self):
     rvs = (sm.RotationVector(), sm.EulerAnglesZYX(), sm.EulerRodriguez())
     for r in rvs:
         for order in range(2,7):
             bsp = bsplines.BSplinePose(order,r)
             T_n_0 = bsp.curveValueToTransformation(numpy.random.random(6))
             T_n_1 = bsp.curveValueToTransformation(numpy.random.random(6))
             
             # Initialize the curve.
             bsp.initPoseSpline(0.0,1.0,T_n_0, T_n_1)
             
             for t in numpy.linspace(bsp.t_min(), bsp.t_max(), 4):
                 # Create a random homogeneous vector
                 v = numpy.random.random(3)
                 CJI = bsp.inverseOrientationAndJacobian(t);
                 #print "TJI: %s" % (TJI)
                 je = nd.Jacobian(lambda c: bsp.setLocalCoefficientVector(t,c) or numpy.dot(bsp.inverseOrientation(t), v))
                 estJ = je(bsp.localCoefficientVector(t))
                 JT = CJI[1]
                 J = numpy.dot(sm.crossMx(numpy.dot(CJI[0],v)), JT)                    
                 self.assertMatricesEqual(J, estJ, 1e-8,"C_n_0")
示例#30
0
    def numericElasticities(model, y0, rate):
        """Numerically approximates elasticisties for a rate

        Parameters
        ----------
        y0 : list or numpy.array
            State vector
        rate : str
            Name of the rate for which elasticities are calculated

        Returns
        -------
        epsilon : numdifftools.Jacobian
            elasticities
        """
        def vi(y):
            v = model.rates(y)
            return v[rate]
        jac = nd.Jacobian(vi, step=y0.min()/100)
        epsilon = jac(y0)
        return epsilon