Example #1
0
    def checkStability(self):
        """Computes the Jacobian matrix of partial derivatives and evaluates
        it at steady state, and then calculates the eigenvalues and 
        eigenvectors of the Jacobian.  

        In order for the the steady state to be dynamically stable, we need 
        to have one stable eigenvalue (i.e., one eigenvalue less than unity) 
        and one unstable eigenvalue (i.e., one eigenvalue greater than unity).

        Returns: A list containing...

            1. jacobian: a (2, 2) array of the evaluated partial derivatives.
            2. eigenvalues: the eigenvalues of the Jacobian matrix.
            3. eigenvectors: the eigenvectors of the Jacobian matrix.
            
        """
        # want to evaluate partial derivatives at steady state
        SS = (self.SS_dict['k_star'], self.SS_dict['c_star'])
            
        # calculate partial derivatives
        capital_c = mp.diff(f=self.capital, x=SS, n=(0, 1))
        capital_k = mp.diff(f=self.capital, x=SS, n=(1, 0))
        euler_c   = mp.diff(f=self.euler, x=SS, n=(0, 1))
        euler_k   = mp.diff(f=self.euler, x=SS, n=(1, 0))
        
        # define the Jacobian
        jacobian = np.array([[capital_k, capital_c], 
                             [euler_k, euler_c]], dtype='float')
        
        # calculate eigenvalues/vectors
        eigenvalues, eigenvectors = np.linalg.eig(jacobian)
        
        return [jacobian, eigenvalues, eigenvectors]
Example #2
0
    def checkStability(self):
        """Computes the Jacobian matrix of partial derivatives and evaluates
        it at steady state, and then calculates the eigenvalues and 
        eigenvectors of the Jacobian.  

        In order for the the steady state to be dynamically stable, we need 
        to have one stable eigenvalue (i.e., one eigenvalue less than unity) 
        and one unstable eigenvalue (i.e., one eigenvalue greater than unity).

        Returns: A list containing...

            1. jacobian: a (2, 2) array of the evaluated partial derivatives.
            2. eigenvalues: the eigenvalues of the Jacobian matrix.
            3. eigenvectors: the eigenvectors of the Jacobian matrix.
            
        """
        # want to evaluate partial derivatives at steady state
        SS = (self.SS_dict['k_star'], self.SS_dict['c_star'])

        # calculate partial derivatives
        capital_c = mp.diff(f=self.capital, x=SS, n=(0, 1))
        capital_k = mp.diff(f=self.capital, x=SS, n=(1, 0))
        euler_c = mp.diff(f=self.euler, x=SS, n=(0, 1))
        euler_k = mp.diff(f=self.euler, x=SS, n=(1, 0))

        # define the Jacobian
        jacobian = np.array([[capital_k, capital_c], [euler_k, euler_c]],
                            dtype='float')

        # calculate eigenvalues/vectors
        eigenvalues, eigenvectors = np.linalg.eig(jacobian)

        return [jacobian, eigenvalues, eigenvectors]
Example #3
0
    def hplus(self, L, r, E, prime=0):
        """Calculates the Hankel Function H+ in terms of the regular (coulombf) and irregular (coulombg) Coulomb functions: H(+)=G+iF

        Parameters
        __________
        L: int
            Angular momentum of the scattering state
        r: float
            Position of the boundry match condition
        E: int or float
            Energy of the scattering wavefunction
        prime: int 0 or 1
            Whether to calculate the derivative
        Returns
        _______
        H+: float
            The Hankel function H+ or its derivative
        
        """

        if prime == 0:
            return mp.coulombg(
                L, 0,
                cm.sqrt(self.Const * E) *
                r) + complex(0, 1) * mp.coulombf(L, 0,
                                                 cm.sqrt(self.Const * E) * r)
        elif prime == 1:
            return mp.diff(
                lambda x: mp.coulombg(L, 0,
                                      cm.sqrt(self.Const * E) * x),
                r) + complex(0, 1) * mp.diff(
                    lambda x: mp.coulombf(L, 0,
                                          cm.sqrt(self.Const * E) * x), r)
Example #4
0
    def checkStability(self):
        """Computes the Jacobian matrix of partial derivatives and 
        evaluates it at the deterministic steady state, and then 
        calculates the eigenvalues and eigenvectors of the Jacobian.  

        In order for the the steady state to be dynamically stable we 
        need to have:

            1. Same number of stable eigenvalues as pre-determined 
               variables (i.e., state variables).
            2. Same number of unstable eigenvalue as control (i.e., 
               jump variables).

        Returns: A list containing...

            jacobian:     Array of the evaluated partial derivatives.
            eigenvalues:  The eigenvalues of the Jacobian matrix.
            eigenvectors: The eigenvectors of the Jacobian matrix.  
            
        """
        if self.timing == 'discrete':
            # define symbolic variables
            k = sp.var('k')
            c = sp.var('c')
        
            # consumption depends on next period's capital!
            kplus = self.get_nextCapital(k, c)
            ramseySystem = sp.Matrix([self.get_nextCapital(k, c), \
                                      self.get_nextConsumption(kplus, c)])

            # define the Jacobian
            evalDict = {k:self.SS_dict['k_bar'], c:self.SS_dict['c_bar']}
            jac = ramseySystem.jacobian([k, c]).evalf(n=12, subs=evalDict)
            jacobian = np.array(jac).astype('float')

        elif self.timing == 'continuous':
            # define the Jacobian
            SS = (self.SS_dict['k_bar'], self.SS_dict['c_bar'])
            capital_c = mp.diff(self.get_nextCapital, x=SS, n=(0, 1))
            capital_k = mp.diff(self.get_nextCapital, x=SS, n=(1, 0))
            euler_c   = mp.diff(self.get_nextConsumption, x=SS, n=(0, 1))
            euler_k   = mp.diff(self.get_nextConsumption, x=SS, n=(1, 0))

            jacobian = np.array([[capital_k, capital_c], 
                                 [euler_k, euler_c]], dtype='float')
            
        # calculate eigenvalues/vectors
        eigenvalues, eigenvectors = np.linalg.eig(jacobian)

        # which is the eigenvector for the stable eigenvalue
        if eigenvalues[0] < 1:
            index = 0
        elif eigenvalues[1] < 1:
            index = 1
        else:
            raise Exception, 'No stable eigenvalue!'
         
        return [jacobian, eigenvalues, eigenvectors, index]
Example #5
0
    def compute_scaling_factor(self, dim, offset, verbose=False):
        # compute the scaling factor by minimizing the KL-divergence between
        # two multivariate t distributions of dimension `dim` where one has
        # covariance I and dof lambda + offset and the other has covariance
        # alpha * I and dof lambda, against alpha.

        # We use mpmath here because the high dimensionality leads to precision
        # errors in scipy.

        def _integrand(v, alpha, m, lmd, d):
            return (mp.power(v / (1 + v), m / 2) * 1.0 /
                    (v * mp.power(1 + v, (lmd + d) / 2.0)) *
                    mp.log(1 + (lmd + d) / (alpha * lmd) * v))

        def _H(alpha, m, lmd, d):
            H2 = mp.beta(m / 2, (lmd + d) / 2) * mp.log(alpha)
            Q = mp.quad(lambda v: _integrand(v, alpha, m, lmd, d), [0, mp.inf])
            H3 = (1 + lmd / m) * Q
            return H2 + H3

        m = mp.mpf(dim)
        lmd = mp.mpf(self.lambda0)
        d = mp.mpf(offset)
        F = lambda alpha: _H(alpha, m, lmd, d)
        dF = lambda alpha: mp.diff(F, alpha)
        alpha_star = mp.findroot(dF, mp.mpf(1.0), verbose=verbose)
        return float(alpha_star)
Example #6
0
	def __init__(self, fun, n, err=(15)):
		#declarando as variaveis primordiais
		self.fun = fun
		self.n = n
		self.error = []
		self.xlist = []
		self.n = []
		i = 1
		#iterative loop
		#escolhendo um numero aleatório para x
		x = rd.random()
		while(True):
			 i += i
			#promovendo a soma iterativa
			 x = x - (fun(x)-n)/mp.diff(fun,x)
			 self.xlist.append(x)
			 #print(x)
			#econtrando o erro entre a função pedida e a iteração
			 e = fun(x) - n
			#adicionando os erros na lista para amostra ou grafico
			 self.error.append(e)
			 #print(e)
			 self.n.append(i)
			#interrompimento d acordo com a grandeza de erro aceita
			 if(e < 10**(-err)):
			 	break
			 else:
			 	continue
		self.x = x
Example #7
0
 def __call__(self, Xi, Xj, ni, nj, hyper_deriv=None, symmetric=False):
     """Evaluate the covariance between points `Xi` and `Xj` with derivative order `ni`, `nj`.
     
     Parameters
     ----------
     Xi : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` inputs with dimension `N`.
     Xj : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` inputs with dimension `N`.
     ni : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` derivative orders for set `i`.
     nj : :py:class:`Matrix` or other Array-like, (`M`, `N`)
         `M` derivative orders for set `j`.
     hyper_deriv : Non-negative int or None, optional
         The index of the hyperparameter to compute the first derivative
         with respect to. If None, no derivatives are taken. Hyperparameter
         derivatives are not supported at this point. Default is None.
     symmetric : bool, optional
         Whether or not the input `Xi`, `Xj` are from a symmetric matrix.
         Default is False.
     
     Returns
     -------
     Kij : :py:class:`Array`, (`M`,)
         Covariances for each of the `M` `Xi`, `Xj` pairs.
     
     Raises
     ------
     NotImplementedError
         If the `hyper_deriv` keyword is not None.
     """
     if hyper_deriv is not None:
         raise NotImplementedError("Hyperparameter derivatives have not been implemented!")
     n_cat = scipy.asarray(scipy.concatenate((ni, nj), axis=1), dtype=int)
     X_cat = scipy.asarray(scipy.concatenate((Xi, Xj), axis=1), dtype=float)
     n_cat_unique = unique_rows(n_cat)
     k = scipy.zeros(Xi.shape[0], dtype=float)
     # Loop over unique derivative patterns:
     if self.num_proc > 1:
         pool = multiprocessing.Pool(processes=self.num_proc)
     for n_cat_state in n_cat_unique:
         idxs = scipy.where(scipy.asarray((n_cat == n_cat_state).all(axis=1)).squeeze())[0]
         if (n_cat_state == 0).all():
             k[idxs] = self.cov_func(Xi[idxs, :], Xj[idxs, :], *self.params)
         else:
             if self.num_proc > 1 and len(idxs) > 1:
                 k[idxs] = scipy.asarray(
                     pool.map(_ArbitraryKernelEval(self, n_cat_state), X_cat[idxs, :]),
                     dtype=float
                 )
             else:
                 for idx in idxs:
                     k[idx] = mpmath.chop(mpmath.diff(self._mask_cov_func,
                                                      X_cat[idx, :],
                                                      n=n_cat_state,
                                                      singular=True))
     
     if self.num_proc > 0:
         pool.close()
     return k
Example #8
0
 def __call__(self, Xi, Xj, ni, nj, hyper_deriv=None, symmetric=False):
     """Evaluate the covariance between points `Xi` and `Xj` with derivative order `ni`, `nj`.
     
     Parameters
     ----------
     Xi : :py:class:`Matrix` or other Array-like, (`M`, `D`)
         `M` inputs with dimension `D`.
     Xj : :py:class:`Matrix` or other Array-like, (`M`, `D`)
         `M` inputs with dimension `D`.
     ni : :py:class:`Matrix` or other Array-like, (`M`, `D`)
         `M` derivative orders for set `i`.
     nj : :py:class:`Matrix` or other Array-like, (`M`, `D`)
         `M` derivative orders for set `j`.
     hyper_deriv : Non-negative int or None, optional
         The index of the hyperparameter to compute the first derivative
         with respect to. If None, no derivatives are taken. Hyperparameter
         derivatives are not supported at this point. Default is None.
     symmetric : bool, optional
         Whether or not the input `Xi`, `Xj` are from a symmetric matrix.
         Default is False.
     
     Returns
     -------
     Kij : :py:class:`Array`, (`M`,)
         Covariances for each of the `M` `Xi`, `Xj` pairs.
     
     Raises
     ------
     NotImplementedError
         If the `hyper_deriv` keyword is not None.
     """
     if hyper_deriv is not None:
         raise NotImplementedError("Hyperparameter derivatives have not been implemented!")
     n_cat = scipy.asarray(scipy.concatenate((ni, nj), axis=1), dtype=int)
     X_cat = scipy.asarray(scipy.concatenate((Xi, Xj), axis=1), dtype=float)
     n_cat_unique = unique_rows(n_cat)
     k = scipy.zeros(Xi.shape[0], dtype=float)
     # Loop over unique derivative patterns:
     if self.num_proc > 1:
         pool = multiprocessing.Pool(processes=self.num_proc)
     for n_cat_state in n_cat_unique:
         idxs = scipy.where(scipy.asarray((n_cat == n_cat_state).all(axis=1)).squeeze())[0]
         if (n_cat_state == 0).all():
             k[idxs] = self.cov_func(Xi[idxs, :], Xj[idxs, :], *self.params)
         else:
             if self.num_proc > 1 and len(idxs) > 1:
                 k[idxs] = scipy.asarray(
                     pool.map(_ArbitraryKernelEval(self, n_cat_state), X_cat[idxs, :]),
                     dtype=float
                 )
             else:
                 for idx in idxs:
                     k[idx] = mpmath.chop(mpmath.diff(self._mask_cov_func,
                                                      X_cat[idx, :],
                                                      n=n_cat_state,
                                                      singular=True))
     
     if self.num_proc > 0:
         pool.close()
     return k
Example #9
0
def main():
    func = lambda x: mpmath.exp(mpmath.power(x, 2))
    precision = sys.argv[1].split('**')
    precision = math.pow(int(precision[0]), int(precision[1]))
    x = mpmath.mpf(float(sys.argv[2]))

    print "expected value = %f" % mpmath.quad(func, [0, x])
    print "precision = %f" % precision
    print "x = %f" % x
    print "max Taylor degree to try = %s" % sys.argv[3]
    print ""

    upperbound = int(sys.argv[3])
    lowerbound = 0
    lowestn = 0

    # find the degree logarithmically, this is usually faster than trying 0..n
    while lowerbound < upperbound:
        n = (lowerbound + upperbound) / 2

        # estimate the remainder
        diff = mpmath.diff(func, x, n)
        rn = diff / mpmath.factorial(n + 1)
        rn = rn * mpmath.power(x, n + 1)

        # is it good enough?
        if rn < precision:
            upperbound = n
            lowestn = n
        else:
            lowerbound = n + 1

    if lowestn:
        print "lowest Taylor degree needed = %d" % lowestn
        coefficients = []

        # find the coefficients of our Taylor polynomial
        for k in reversed(range(lowestn + 1)):
            if k > 0:
                coefficients.append(mpmath.diff(func, 0, k - 1) / mpmath.factorial(k))

        # compute the value of the polynomial (add 0 for the free variable, the value of the indefinite integral at 0)
        p = mpmath.polyval(coefficients + [0], x)
        print "computed value = %f" % p
    else:
        print "max n is too low"
Example #10
0
def zetac_series(N):
    coeffs = []
    with mpmath.workdps(100):
        coeffs.append(-1.5)
        for n in range(1, N):
            coeff = mpmath.diff(mpmath.zeta, 0, n) / mpmath.factorial(n)
            coeffs.append(coeff)
    return coeffs
Example #11
0
def cutoff(s, b):
    return abs(
        im(
            w(
                mp.findroot(
                    lambda y: diff(lambda x: im(w(x - j * s / 4, s, b)), y, 1),
                    1 / 2,
                    tol=10**(-10)) - j * s / 4, s, b)))
Example #12
0
def zetac_series(N):
    coeffs = []
    with mpmath.workdps(100):
        coeffs.append(-1.5)
        for n in range(1, N):
            coeff = mpmath.diff(mpmath.zeta, 0, n)/mpmath.factorial(n)
            coeffs.append(coeff)
    return coeffs
Example #13
0
def basc_Newton_Raphson(f, x):
    step = 0
    last_x = x
    while np.abs(f(x)) > eps or np.abs(x - last_x) > eps:
        last_x = x
        x -= f(x) / diff(f, x)
        step += 1
        print(f'Step {step}: x = {x}, f(x) = {f(x)}')
    return x
Example #14
0
 def RF_efield(self, xeval, *p):
     """
     Derivative of RF_potential
     :param xeval:
     :param p: [a0, a1] --> diff(a0 + a1 * x)
     :return: diff(a0 + a1 * x)
     """
     a0, a1 = p
     return np.float(mpmath.diff(lambda y: a0 + a1 * y, xeval))
Example #15
0
def d2DedekindEtaA(tau):
    ''' Compute the derivative of the Dedekind Eta function for imaginary argument tau. 
        Numerically.'''
    try:
        import mpmath as mp
        mpmath_loaded = True
    except ImportError:
        mpmath_loaded = False 
    
    return mp.diff(lambda tau:dDedekindEtaA(tau),tau,1)
Example #16
0
def accuracy_condition(point, rf):
    der_tuple = [0]*len(point)
    point_condition = 0.0
    for i in range(0,len(point)):
        temp_tuple = der_tuple
        temp_tuple[i] = 1
        point_condition += diff(rf, tuple(point),tuple(temp_tuple))*bf.getulp(point[i])
    ulp_point = bf.getulp(rf(*point))
    point_condition = point_condition/ulp_point
    return math.fabs(point_condition)
Example #17
0
 def __call__(self, X_cat_row):
     """Return the covariance function of object evaluated at the given `X_cat_row`.
     
     Parameters
     ----------
     X_cat_row : Array-like, (2,)
         The `Xi` and `Xj` point to evaluate at.
     """
     return mpmath.chop(mpmath.diff(self.obj._mask_cov_func,
                                    X_cat_row,
                                    n=self.n_cat_state,
                                    singular=True))
Example #18
0
 def __call__(self, X_cat_row):
     """Return the covariance function of object evaluated at the given `X_cat_row`.
     
     Parameters
     ----------
     X_cat_row : Array-like, (2,)
         The `Xi` and `Xj` point to evaluate at.
     """
     return mpmath.chop(mpmath.diff(self.obj._mask_cov_func,
                                    X_cat_row,
                                    n=self.n_cat_state,
                                    singular=True))
Example #19
0
 def _generate_poly_coefficients(self, function, center, degree):
     """Private method which generates coefficients for the Taylor polynomial based on given degree and center."""
     for i in range(0, degree + 1):
         # Each "i" term contains the function's ith derivative value at x=center,
         # divded by the ith factorial.
         # This is because when taking the ith derivative of the Taylor polynomial-
         # power rule results in additional terms being multiplied into the ith coefficient (i.e 1*2*3*4...*i)
         # (Note: the ith coefficient becomes a constant term)
         # These multiples need to be cancelled out-
         # so that the constant term is the "function"'s actual derivative value, hence dividing by i! factorial.
         self.polynomial.append(
             round(mpmath.diff(function, center, i), 6) / factorial(i))
Example #20
0
def _fprime(x, fx, method=None, extrap=False, **kwds):
    '''find gradient of fx at x, where fx is a function z=fx(x)

    Input:
      x: an array of shape (npts, dim) or (npts,)
      fx: a function, z = fx(x)
      method: string for kind of gradient method
      extrap: if True, extrapolate a bounding box (can reduce # of nans)
      new: if True, include the extrapolated points in the output

    Output:
      array of dimensions x.shape, gradient of the points at (x,fx)

    NOTE:
      if method is 'approx' (the default) use mystic's approx_fprime,
      which uses a local gradient approximation; other choices are
      'symbolic', which uses mpmath.diff if installed.

    NOTE:
      if extrap is True, extrapolate using interpf with method='thin_plate'
      (or 'rbf' if scipy is not found). Alternately, any one of ('rbf',
      'linear','cubic','nearest','inverse','gaussian','multiquadric',
      'quintic','thin_plate') can be used. If extrap is a cost function
      z = f(x), then directly use it in the extrapolation.
    '''
    slc = slice(None, None) if kwds.get('new', False) else slice(None, len(x))
    import numpy as np
    if extrap:
        x = extrapolate(x)
    x, i = _unique(x, index=True)
    if method is None or method == 'approx':
        from mystic._scipyoptimize import approx_fprime, _epsilon
        err = np.seterr(all='ignore')  # silence warnings (division by nan)
        #fx = _to_objective(fx) # conform to gradient interface
        x, s = np.atleast_2d(x), x.shape
        z = np.array([approx_fprime(xi, fx, _epsilon) for xi in x]).reshape(*s)
        np.seterr(**err)
        return z[i][slc]
    try:  #XXX: mpmath.diff is more error prone -- don't use it?
        from mpmath import diff
    except ImportError:
        return _fprime(x, fx, method=None)[i][slc]
    err = np.seterr(all='ignore')  # silence warnings (division by nan)
    #fx = _to_objective(fx) # conform to gradient interface
    k = range(s[-1])
    z = np.array([[
        diff(lambda *x: fx(_swapvals(x, j)), xk[_swapvals(k, j)], (1, ))
        for j in k
    ] for xk in x],
                 dtype=x.dtype).reshape(*s)
    np.seterr(**err)
    return z[i][slc]
Example #21
0
def damp_Newton_Raphson(f, x):
    step = 0
    while True:
        tmp = f(x) / diff(f, x)
        lambd = 1
        while lambd > 0 and np.abs(f(x - lambd * tmp)) >= np.abs(f(x)):
            lambd /= 2
        x -= lambd * tmp
        step += 1
        print(f'Step {step}: x = {x}, f(x) = {f(x)}')
        if lambd <= 0:
            break
    return x
Example #22
0
def fmin_gd(f,
            f_dx=None,
            x_0=1,
            alpha=0,
            error=1e-10,
            max_iter=1e+5,
            alpha_mul=1,
            disp=True):
    '''
	Uses gradient descent algorithm to find a *univariate* function's minimum
	Based on mpmath
	returns x where f(x) is minimized

	f - function to minimize. must only take in x as a parameter
	f_dx - optional, first derivative of f
	x_0 - optional, initial value of x
	alpha - optional, learning rate
	error - optional, acceptable error threshold
	max_iter - optional, maximum iterations
	alpha_mul - optional, multiplier to heuristically determined alpha
	disp - optional, prints out Iterations and Final Step if True

	Reference: https://en.wikipedia.org/wiki/Gradient_descent#Python
	'''
    if f_dx is None: f_dx = lambda x: mp.diff(f, x)

    # heuristically determine learning rate
    if alpha == 0:
        try:
            alpha = alpha_mul * mp.power(
                10, -2 - int(mp.log10(abs(f(x_0)) / abs(x_0))))
        except:
            alpha = alpha_mul * mp.power(
                10, -2 - int(mp.log10(abs(f(x_0 + 0.1)) / abs((x_0 + 0.1)))))

    cur_x = x_0
    step = alpha * f_dx(cur_x)
    ctr = 0
    while abs(step) > abs(error):
        step = alpha * f_dx(cur_x)
        cur_x -= step
        ctr += 1
        if ctr >= max_iter:
            warnings.warn(
                "Gradient Descent exited due to max iterations %i.\nReview alpha or x_0."
                % (max_iter))
            break
    if disp: print('Iterations: %i\nFinal Step Size: %.2e' % (ctr, step))
    return cur_x
Example #23
0
    def checkStability(self):
        """Computes the Jacobian matrix of partial derivatives and evaluates
        it at steady state, and then calculates the eigenvalues and 
        eigenvectors of the Jacobian.  

        In order for the the steady state to be dynamically stable, we need 
        to have one stable eigenvalue (i.e., one eigenvalue less than unity).

        Returns: A list containing the Jacobian evaluated at steady state.
            
        """
        # compute the Jacobian
        capital_k = mp.diff(f=self.capital, x=(self.SS_dict["k_star"]), n=(1))
        jacobian = np.array([capital_k])

        return [jacobian]
Example #24
0
 def train_bp_neuron(neuron, error, iteration, signals):
     # Ugly!
     summed = math.fsum(map(lambda entry: entry[0] * entry[1], zip(signals, neuron.weights))) - neuron.bias
     derivative = mpmath.diff(neuron.activation_func, summed)
     old_weights = neuron.weights
     if hasattr(neuron, 'old_weights'):
         neuron.weights = map(
                 lambda val: val[0] + learning_rate(iteration) * error * derivative * val[1] + momentum_rate(
                     iteration) *
                                                                                               (val[0] - val[2]),
                 zip(neuron.weights, signals, neuron.old_weights))
     else:
         neuron.weights = map(
                 lambda val: val[0] + learning_rate(iteration) * error * derivative * val[1],
                 zip(neuron.weights, signals))
     neuron.old_weights = old_weights
Example #25
0
    def checkStability(self):
        """Computes the Jacobian matrix of partial derivatives and evaluates
        it at steady state, and then calculates the eigenvalues and 
        eigenvectors of the Jacobian.  

        In order for the the steady state to be dynamically stable, we need 
        to have one stable eigenvalue (i.e., one eigenvalue less than unity).

        Returns: A list containing the Jacobian evaluated at steady state.
            
        """
        # compute the Jacobian
        capital_k = mp.diff(f=self.capital, x=(self.SS_dict['k_star']), n=(1))
        jacobian = np.array([capital_k])

        return [jacobian]
Example #26
0
    def activationFunctionDerivative(self, x):
        '''
        Sigmoid derivative function.

        Parameters
        ----------
        x : TYPE float/array
            DESCRIPTION.

        Returns
        -------
        TYPE float/array
            DESCRIPTION. returns the value of the dsigmoid(x)/dx

        '''
        afunction = self.activationFuncition

        return float(mpmath.diff(afunction, x))
Example #27
0
 def _compute_dy_dtau(self, tau, b, r2l2):
     r"""Evaluate the derivative of the inner argument of the Matern kernel.
     
     Uses Faa di Bruno's formula to take the derivative of
     
     .. math::
     
         y = \sqrt{2 \nu \sum_i(\tau_i^2 / l_i^2)}
     
     Parameters
     ----------
     tau : :py:class:`Matrix`, (`M`, `N`)
         `M` inputs with dimension `N`.
     b : :py:class:`Array`, (`P`,)
         Block specifying derivatives to be evaluated.
     r2l2 : :py:class:`Array`, (`M`,)
         Precomputed anisotropically scaled distance.
     
     Returns
     -------
     dy_dtau: :py:class:`Array`, (`M`,)
         Specified derivative at specified locations.
     """
     deriv_partitions = generate_set_partitions(b)
     dy_dtau = scipy.zeros_like(r2l2, dtype=float)
     non_zero_idxs = (r2l2 != 0)
     for p in deriv_partitions:
         dy_dtau[non_zero_idxs] += self._compute_dy_dtau_on_partition(tau[non_zero_idxs], p, r2l2[non_zero_idxs])
     
     # Case at tau=0 is handled with mpmath for now.
     # TODO: This is painfully slow! Figure out how to do this analytically!
     derivs = scipy.zeros(tau.shape[1], dtype=int)
     for d in b:
         derivs[d] += 1
     dy_dtau[~non_zero_idxs] = mpmath.chop(
         mpmath.diff(
             self._compute_y_wrapper,
             scipy.zeros(tau.shape[1], dtype=float),
             n=derivs,
             singular=True,
             direction=1
         )
     )
     return dy_dtau
Example #28
0
    def _compute_dy_dtau(self, tau, b, r2l2):
        r"""Evaluate the derivative of the inner argument of the Matern kernel.
        
        Uses Faa di Bruno's formula to take the derivative of
        
        .. math::
        
            y = \sqrt{2 \nu \sum_i(\tau_i^2 / l_i^2)}
        
        Parameters
        ----------
        tau : :py:class:`Matrix`, (`M`, `D`)
            `M` inputs with dimension `D`.
        b : :py:class:`Array`, (`P`,)
            Block specifying derivatives to be evaluated.
        r2l2 : :py:class:`Array`, (`M`,)
            Precomputed anisotropically scaled distance.
        
        Returns
        -------
        dy_dtau: :py:class:`Array`, (`M`,)
            Specified derivative at specified locations.
        """
        deriv_partitions = generate_set_partitions(b)
        dy_dtau = scipy.zeros_like(r2l2, dtype=float)
        non_zero_idxs = (r2l2 != 0)
        for p in deriv_partitions:
            dy_dtau[non_zero_idxs] += self._compute_dy_dtau_on_partition(
                tau[non_zero_idxs], p, r2l2[non_zero_idxs])

        # Case at tau=0 is handled with mpmath for now.
        # TODO: This is painfully slow! Figure out how to do this analytically!
        derivs = scipy.zeros(tau.shape[1], dtype=int)
        for d in b:
            derivs[d] += 1
        dy_dtau[~non_zero_idxs] = mpmath.chop(
            mpmath.diff(self._compute_y_wrapper,
                        scipy.zeros(tau.shape[1], dtype=float),
                        n=derivs,
                        singular=True,
                        direction=1))
        return dy_dtau
Example #29
0
def newton(f, x0, damp=False):
    eps = 1e-8
    k = 0  # iteration step
    l = 0.9  # initial damp
    x = last_x = x0
    while np.abs(f(x)) > eps or np.abs(x - last_x) > eps:
        s = f(x) / np.float64(diff(f, x))
        last_x = x
        x = last_x - s
        k += 1
        print('Step {:2d}: s = {:.7f}, x = {:.7f}, f(x) = {:.7f}'.format(
            k, s, x, f(x)))
        if damp:
            i = 0
            while np.abs(f(x)) > np.abs(f(last_x)):
                l_n = l * (0.5**i)  # lambda_i = l * 2 ^ i
                x = last_x - l_n * s
                i += 1
                print(
                    '- Damp with factor {:.5f}, s = {:.7f}, x = {:.7f}, f(x) = {:.7f}'
                    .format(l_n, l_n * s, x, f(x)))
    return x
Example #30
0
    def nth_derivative(self, nth: int) -> Func:
        """Create the nth-derivative of a function.

        If the nth-derivative has already been found, return that.
        Otherwise, numerically estimate an arbitrary derivative of
        the function.

        Parameters
        ----------
        nth
            The derivative desired.

        Returns
        -------
        nth_derivative : Func
            The nth-derivative of the function.

        """
        try:
            return self.derivatives[nth]
        except KeyError:
            if not nth:
                return self.func
            return lambda x_val: mp.diff(self.func, x_val, n=nth)
Example #31
0
def deriv(x, n):
    return mp.diff(f, x, n)
Example #32
0
def lambertw_pade():
    derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)]
    p, q = mpmath.pade(derivs, 3, 2)
    return p, q
Example #33
0
def lambertw_pade():
    derivs = []
    for n in range(6):
        derivs.append(mpmath.diff(mpmath.lambertw, 0, n=n))
    p, q = mpmath.pade(derivs, 3, 2)
    return p, q
Example #34
0
def deriv(x, n):  # finding derivative by mpmath package
    return mp.diff(f, x, n)
def Dw(v,s,b):
    """
    derivative of w. We use this function to transform UV cutoff.
    """
    return diff(lambda x: w(x,s,b), v, 1)
def cutoffD(s,b):
    """
    compute quench cutoff a for double splitting quench from s and b.
    """
    return im(w(mp.findroot(lambda y: diff(lambda x: im(w(x-j*s/4,s,b)) ,y,1),1/2)-j*s/4,s,b))
Example #37
0
#!/usr/bin/env python

import mpmath


# A function with no parameters
def f(x, y):
    return mpmath.exp(3.0 * x - y)


# A function with a parameter (alpha)
def g(x, y, alpha):
    return mpmath.exp(alpha * x - y)


# Point where derivatives are evaluated
point = (1.0, 1.0)

# Order of x and y derivatives
order = (2, 1)

print mpmath.diff(f, point, order)
print mpmath.diff(lambda x, y: g(x, y, 3.0), point, order)
# <codecell>

# assign resulting symbolic expression to a variable
symbolic_capital_k = sympy.diff(capital(k), k)

# <codecell>

# Evaluate the symbolic derivative at k_star and compare to the analytic solution.
print 'Symbolic derivative of capital() w.r.t. k evaluated at k*:', symbolic_capital_k.evalf(n=12, subs={k:numeric_k_star})
print 'Analytic derivative of capital() w.r.t. k evaluated at k*:', analytic_capital_k(numeric_k_star)

# <codecell>

# Method 3: Numerical differentiation using Python (numerical differentiation and evaluation all in one step!)
numeric_capital_k = mpmath.diff(f=capital, x=(numeric_k_star), n=(1))

# print results
print 'Numeric derivative of capital() w.r.t. k evaluated at k*: ', numeric_capital_k
print 'Symbolic derivative of capital() w.r.t. k evaluated at k*:', symbolic_capital_k.evalf(n=12, subs={k:numeric_k_star})
print 'Analytic derivative of capital() w.r.t. k evaluated at k*:', analytic_capital_k(numeric_k_star)

# <markdowncell>

# Task 5: Graphical analysis of the Solow model using Matplotlib
# --------------------------------------------------------------
# 
# In this task you will learn how to recreate some of the basic diagrams used to analyze the Solow model using the Python library matplotlib.  First, we will create the standard Solow diagram; second, we will create a phase plot for the Solow model.  Both of these diagrams should be very familiar to you from both the textbook and your lectures.

# <codecell>
 def leg_poly_diff(value):
     """Legendre polynomial derivative :math:`P_n'(x)`."""
     return mpmath.diff(leg_poly, value)
Example #40
0
def lambertw_pade():
    derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)]
    p, q = mpmath.pade(derivs, 3, 2)
    return p, q
Example #41
0
 def train_bp_neuron(neuron, error, iteration, signals):
     # Ugly!
     summed = math.fsum(map(lambda entry: entry[0] * entry[1], zip(signals, neuron.weights))) - neuron.bias
     derivative = mpmath.diff(neuron.activation_func, summed)
     neuron.weights = map(lambda val: val[0] + learning_rate(iteration) * error * derivative * val[1],
                          zip(neuron.weights, signals))
 def f2(x0):
     return mpmath.diff(lambda y: func(y), x0, 2)
Example #43
0
 def _compute_dk_dy(self, y, n):
     r"""Evaluate the derivative of the outer form of the Matern kernel.
     
     Uses the general Leibniz rule to compute the n-th derivative of:
     
     .. math::
     
         f(y) = \frac{2^{1-\nu}}{\Gamma(\nu)} y^\nu K_\nu(y)
     
     Notice that this is very poorly-behaved at :math:`x=0`. There, the
     value is approximated using :py:func:`mpmath.diff` with the `singular`
     keyword. This is rather slow, so if you require a fixed value of `nu`
     you may wish to consider implementing the appropriate kernel separately.
     
     Parameters
     ----------
     y : :py:class:`Array`, (`M`,)
         `M` inputs to evaluate at.
     n : non-negative scalar int.
         Order of derivative to compute.
     
     Returns
     -------
     dk_dy : :py:class:`Array`, (`M`,)
         Specified derivative at specified locations.
     """
     warnings.warn("The Matern kernel has not been verified for derivatives. Consider using MaternKernelArb.")
     
     dk_dy = scipy.zeros_like(y, dtype=float)
     non_zero_idxs = (y != 0)
     for k in xrange(0, n + 1):
         dk_dy[non_zero_idxs] += (scipy.special.binom(n, k) *
                                  scipy.special.poch(1 - k + self.nu, k) *
                                  (y[non_zero_idxs])**(-k + self.nu) *
                                  scipy.special.kvp(self.nu, y[non_zero_idxs], n=n-k))
     
     # Handle the cases at y=0.
     # Compute the appropriate value using mpmath's arbitrary precision
     # arithmetic. This is potentially slow, but seems to behave pretty
     # well. In cases where the value should be infinite, very large
     # (but still finite) floats are returned with the appropriate sign.
     if n >= 2 * self.nu:
         warnings.warn("n >= 2*nu can yield inaccurate results.", RuntimeWarning)
     
     # Use John Wright's expression for n < 2 * nu:
     if n < 2.0 * self.nu:
         if n % 2 == 1:
             dk_dy[~non_zero_idxs] = 0.0
         else:
             m = n / 2.0
             dk_dy[~non_zero_idxs] = (
                 (-1.0)**m *
                 2.0**(self.nu - 1.0 - n) *
                 scipy.special.gamma(self.nu - m) *
                 scipy.misc.factorial(n) / scipy.misc.factorial(m)
             )
     else:
         # Fall back to mpmath to handle n >= 2 * nu:
         core_expr = lambda x: x**self.nu * mpmath.besselk(self.nu, x)
         deriv = mpmath.chop(mpmath.diff(core_expr, 0, n=n, singular=True, direction=1))
         dk_dy[~non_zero_idxs] = deriv
     
     dk_dy *= 2.0**(1 - self.nu) / (scipy.special.gamma(self.nu))
     
     return dk_dy  
Example #44
0
    def shape_func(x, y, z):
        L1,L2,L3,L4= volume_of_tetra(np.array([x,y,z]),mc[1],mc[2],mc[3])/v,\
                     volume_of_tetra(mc[0],np.array([x,y,z]),mc[2],mc[3])/v,\
                     volume_of_tetra(mc[0],mc[1],np.array([x,y,z]),mc[3])/v,\
                     volume_of_tetra(mc[0],mc[1],mc[2],np.array([x,y,z]))/v

        if m == 1:
            L = [L1, L2, L3, L4]
            return L[n]
        if m == 2:
            L = [
                2 * (L1 - 1) * L1, 2 * (L2 - 1) * L2, 2 * (L3 - 1) * L3,
                2 * (L4 - 1) * L4, 4 * L1 * L2, 4 * L1 * L3, 4 * L1 * L4,
                4 * L2 * L3, 4 * L3 * L4, 4 * L2 * L4
            ]
            return L[n]

    return shape_func


if __name__ == "__main__":
    mc = np.array([[0, 0, 0], [0, -1., 0.], [1., -1., 0], [0, -1., 1.]])

    def f(x, y, z):
        return volume_of_tetra(np.array([x, y, z]), mc[1], mc[2], mc[3])

    def fx(x, y, z):
        return x * y + z

    a = mp.diff(fx, (0.1, 1, 0.5), (0, 1, 0))
Example #45
0
 def _derivate(x, deriv=0):
     ret = mp.diff(func, x, deriv)
     return float(ret)
Example #46
0
 def _derivate(x, deriv=(0, 0, 0)):
     ret = mp.diff(func, (x[0], x[1], x[2]), deriv)
     return float(ret)
Example #47
0
def lambertw_pade():
    derivs = []
    for n in range(6):
        derivs.append(mpmath.diff(mpmath.lambertw, 0, n=n))
    p, q = mpmath.pade(derivs, 3, 2)
    return p, q