示例#1
75
def test_jacobian_hessian():
    x = Symbol('x')
    y = Symbol('y')
    L = Matrix(1,2,[x**2*y, 2*y**2 + x*y])
    syms = [x,y]
    assert L.jacobian(syms) == Matrix([[2*x*y, x**2],[y, 4*y+x]])

    L = Matrix(1,2,[x, x**2*y**3])
    assert L.jacobian(syms) == Matrix([[1, 0], [2*x*y**3, x**2*3*y**2]])

    f = x**2*y
    syms = [x,y]
    assert hessian(f, syms) == Matrix([[2*y, 2*x], [2*x, 0]])

    f = x**2*y**3
    assert hessian(f, syms) == Matrix([[2*y**3, 6*x*y**2],[6*x*y**2, 6*x**2*y]])
 def stepSizeSteepestDescent(f,x0,g0):
     """
     Description : Calculates the minimized step size for the steepest descent algorithm using Newton's 1D method
 
     Parameters:
                1. f   : symbolic symbolic representation of the function f
                2. x0  : list of independant variables for the function
                3. g0  : gradient value at point x0 as a numpy matrix
 
     Output:
                gradient vector as numpy matrix
     """
     phi,alpha = symbols('phi alpha')
     Q = hessian(f,X)
     if (poly(f).is_quadratic):
         return float(g0*g0.transpose()/matrix2numpy(g0*Q*g0.transpose()))
     else:
         xStar = x0 - squeeze(asarray(alpha*g0))
         def alphaValue(phi,a0): return phi.subs([(alpha,a0)])
         a0 = 0.
         phi = fValue(f,xStar)
         while (True):
             a = a0
             a0 = a0 - alphaValue(diff(phi,alpha),a0)/alphaValue(diff(diff(phi,alpha),alpha),a0)
             if (abs(a-a0)<epsilon): return a0
示例#3
0
def kk_ver2(Pos, SpCons, Len):
    '2n変数の一括最適化'
    t_start = time()
    nodes, dim = Pos.shape
    const = (SpCons, Len)
    P = sp.IndexedBase('P')
    K = sp.IndexedBase('K')
    L = sp.IndexedBase('L')
    X = sp.IndexedBase('X')

    i, j, d = [
        sp.Idx(*spec) for spec in [('i', nodes), ('j', nodes), ('d', dim)]
    ]
    i_range, j_range, d_range = [(idx, idx.lower, idx.upper)
                                 for idx in [i, j, d]]

    #potential functionの用意
    print('reserving Potential function')
    dist = sp.sqrt(sp.Sum((P[i, d] - P[j, d])**2, d_range)).doit()
    E = sp.Sum(K[i, j] * (dist - L[i, j])**2, i_range, j_range)

    #jacobian,Hessianの用意
    print('reserving jacobian and hessian')
    varP = [P[i, d] for i in range(nodes) for d in range(dim)]
    E_jac = sp.Matrix([E]).jacobian(varP)
    E_hes = sp.hessian(E, varP)

    print('generating derivative equation')
    varX = [X[i] for i in range(nodes * dim)]
    PX = np.array([X[i * dim + j] for i in range(nodes)
                   for j in range(dim)]).reshape(nodes, dim)

    E_X = E.replace(K, SpCons).replace(L, Len).replace(P, PX).doit()
    E_jac_X = sp.Matrix([E_X]).jacobian(varX)
    E_hes_X = sp.hessian(E_X, varX)

    print('generating derivative function')
    F, G, H = [sp.lambdify(X, f) for f in [E_X, E_jac_X, E_hes_X]]

    print('fitting')
    res = minimize(F,
                   Pos,
                   jac=lambda x: np.array([G(x)]).flatten(),
                   hess=H,
                   method='trust-ncg')

    print('[time:', time() - t_start, 's]')
    return res.x.reshape(nodes, dim)
示例#4
0
    def stepSizeSteepestDescent(f, x0, g0):
        """
        Description : Calculates the minimized step size for the steepest descent algorithm using Newton's 1D method
    
        Parameters:
                   1. f   : symbolic symbolic representation of the function f
                   2. x0  : list of independant variables for the function
                   3. g0  : gradient value at point x0 as a numpy matrix
    
        Output:
                   gradient vector as numpy matrix
        """
        phi, alpha = symbols('phi alpha')
        Q = hessian(f, X)
        if (poly(f).is_quadratic):
            return float(g0 * g0.transpose() /
                         matrix2numpy(g0 * Q * g0.transpose()))
        else:
            xStar = x0 - squeeze(asarray(alpha * g0))

            def alphaValue(phi, a0):
                return phi.subs([(alpha, a0)])

            a0 = 0.
            phi = fValue(f, xStar)
            while (True):
                a = a0
                a0 = a0 - alphaValue(diff(phi, alpha), a0) / alphaValue(
                    diff(diff(phi, alpha), alpha), a0)
                if (abs(a - a0) < epsilon): return a0
示例#5
0
def newton(f, x0, X):
    """
    Description : Returns the minimizer using Newton's Algorithm
    
    Inputs: 
           f  = symbolic representation of the function to be minimized
           x0 = Initial guess of the minimizer
           X  = list of the function's dependant variables
    
    Outputs: 
           Displays the minimizer on the screen
    """
    def printFunction(f, x0):
        """
        Description : Prints the minimizer of the function on the screen
        
        Inputs: 
               f  = symbolic representation of the function to be minimized
               x0 = Initial guess of the minimizer
        """
        print "\n+++++++++++++++++++++++++++++NEWTONS METHOD+++++++++++++++++++++++++++++"
        print "\nThe minimizer of the function\n\nf = %s \nis at \nx = %s\n\nThe value of f is \nf(x) = %f\n " % (
            f, x0, fValue(f, x0, X))
        print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
        return 0

    gradF = [f.diff(x) for x in X]
    hessianF = hessian(f, X)
    while (True):
        g0 = gradValue(gradF, x0, X)
        if (all(abs(i) < epsilon for i in squeeze(asarray(g0, 'float')))):
            return printFunction(f, x0)
        F0 = hessianValue(hessianF, x0, X)
        x0 = x0 - squeeze(asarray((inv(F0) * g0.transpose()).transpose()))
示例#6
0
def question_1_c():
    sp.init_printing(use_unicode=True)
    x1, x2 = sp.symbols('x1 x2', real=True)
    x = sp.Matrix([[0], [0]])

    def evaluate(function, x):
        return function.evalf(subs={x1: x[0], x2: x[1]})

    f = (x1**2 + x2 - 11)**2 + (x1 + x2**2 - 7)**2
    hessian = sp.hessian(f, (x1, x2))

    is_positive_definite = evaluate(hessian, x).is_positive_definite
    print('\nThe hessian of the function is:')
    print(hessian)
    print(
        '\nSo, if we want to show whether the function is convex or not, to do that we are going to use determinate whether is definite positive or not.'
    )
    print(
        'To do that we are going to fix x = (x1, x2) = (0, 0), so the value of the hessian in that point is:'
    )
    print(evaluate(hessian, x))
    print(
        '\nSince the value is a matrix, we call the property "is_positive_definite" of a sympy matrix, and it will return us the following value:'
    )
    print(is_positive_definite)
    print('So, we can affirm that this function is not convex.')
示例#7
0
def get_hessian(func_str, all_vals, eval_func_map=FUNC_MAP, math_func_map=MATH_FUNC_MAP):
    """
    Returns the hessian matrix of the input function over the input variables
    :param func_str: A string of input math function
    :param all_vals: A list of real scalar values
    :param eval_func_map: A mapping of math expression to python's math function
    :param math_func_map: A mapping of math expression to python's math function
    :return:
    """
    # Assume func is a valid expression
    D = len(all_vals)
    assert D > 0, "There should be at least 1 variable!"

    H = np.zeros((D, D))
    if D == 1:
        H[0][0] = get_ord2_der(func_str, all_vals, 0, eval_func_map)
    else:
        var_map = {"x%d" % i: val for i, val in enumerate(all_vals)}
        var_map.update(math_func_map)
        f = sympify(func_str)
        vs = f.free_symbols
        hess = hessian(f, list(ordered(vs)))
        # print(hess)
        for i in range(D):
            for j in range(D):
                didj_func = hess[i * D + j]
                H[i][j] = eval(str(didj_func), var_map)
    return H
def newton(f,x0,X):
    """
    Description : Returns the minimizer using Newton's Algorithm
    
    Inputs: 
           f  = symbolic representation of the function to be minimized
           x0 = Initial guess of the minimizer
           X  = list of the function's dependant variables
    
    Outputs: 
           Displays the minimizer on the screen
    """
    
    def printFunction(f,x0):
        """
        Description : Prints the minimizer of the function on the screen
        
        Inputs: 
               f  = symbolic representation of the function to be minimized
               x0 = Initial guess of the minimizer
        """
        print "\n+++++++++++++++++++++++++++++NEWTONS METHOD+++++++++++++++++++++++++++++"
        print "\nThe minimizer of the function\n\nf = %s \nis at \nx = %s\n\nThe value of f is \nf(x) = %f\n "%(f,x0,fValue(f,x0,X))
        print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
        return 0
    gradF = [f.diff(x) for x in X]
    hessianF = hessian(f,X)
    while(True):
        g0 = gradValue(gradF,x0,X)
        if(all(abs(i) < epsilon for i in squeeze(asarray(g0,'float')))): return printFunction(f,x0)
        F0 = hessianValue(hessianF,x0,X)
        x0 = x0 - squeeze(asarray((inv(F0)*g0.transpose()).transpose()))
示例#9
0
def test_jacobian_hessian():
    x = Symbol('x')
    y = Symbol('y')
    L = Matrix(1,2,[x**2*y, 2*y**2 + x*y])
    syms = [x,y]
    assert L.jacobian(syms) == Matrix([[2*x*y, x**2],[y, 4*y+x]])

    L = Matrix(1,2,[x, x**2*y**3])
    assert L.jacobian(syms) == Matrix([[1, 0], [2*x*y**3, x**2*3*y**2]])

    f = x**2*y
    syms = [x,y]
    assert hessian(f, syms) == Matrix([[2*y, 2*x], [2*x, 0]])

    f = x**2*y**3
    assert hessian(f, syms) == Matrix([[2*y**3, 6*x*y**2],[6*x*y**2, 6*x**2*y]])
示例#10
0
def newtons_method_oring(x, y):

    # Initialize log_likelihood & parameters
    Θ_1 = 0
    Θ_2 = 0  # The intercept term
    Δl = np.Infinity
    l = log_lh(x, y, Θ_1, Θ_2)
    # Convergence Conditions
    δ = .0000000001
    max_iterations = 1000
    i = 0
    while abs(Δl) > δ and i < max_iterations:
        i += 1
        g = gradient(x, y, Θ_1, Θ_2)
        hess = hessian(x, y, Θ_1, Θ_2)
        H_inv = np.linalg.inv(hess)
        # @ is syntactic sugar for np.dot(H_inv, g.T)¹
        Δ = H_inv @ g.T
        ΔΘ_1 = Δ[0][0]
        ΔΘ_2 = Δ[1][0]

        # Perform our update step
        Θ_1 += ΔΘ_1
        Θ_2 += ΔΘ_2

        # Update the log-likelihood at each iteration
        for i in range(0, max_iterations):
            l_new = log_lh(x, y, Θ_1, Θ_2)
            Δl = l - l_new
            l = l_new

            print(np.array([Θ_1, Θ_2]))
示例#11
0
文件: lm.py 项目: cszachary/CS-math
def lm(f_str, symbols_str):
    [f, symbols] = preprocessing(f_str, symbols_str)
    # # of variables
    D = len(symbols)
    hess = sp.hessian(f, symbols)
    mexpr = sp.Matrix([f])
    grad = mexpr.jacobian(symbols)
    grad = grad.T
    # initial value
    xk = sp.zeros(D, 1)
    fk = 0
    uk = 0.000000001
    epsilon = 0.00001
    for k in range(100):
        Xk_map = {}
        for i in range(D):
            Xk_map[symbols[i]] = xk[i, 0]
        fk = f.evalf(subs=Xk_map)
        gk = grad.evalf(subs=Xk_map)
        Gk = hess.evalf(subs=Xk_map)
        if gk.norm() < epsilon:
            break
        while True:
            eigvalue = sp.Matrix.eigenvals(Gk + uk * sp.eye(D))
            if allgtzero(eigvalue):
                break
            else:
                uk = uk * 4


# 		sk = sp.(A=Gk + uk * sp.ones(D), b=-gk)
        sk = (Gk + uk * sp.eye(D)).LUsolve(-gk)
        Xsk_map = {}
        for i in range(D):
            Xsk_map[symbols[i]] = xk[i, 0] + sk[i, 0]
        fxsk = f.evalf(subs=Xsk_map)
        delta_fk = fk - fxsk

        t1 = (gk.T * sk)[0, 0]
        t2 = (0.5 * sk.T * Gk * sk)[0, 0]
        qk = fk + t1 + t2
        delta_qk = fk - qk
        rk = delta_fk / delta_qk
        if rk < 0.25:
            uk = uk * 4
        elif rk > 0.75:
            uk = uk / 2
        else:
            uk = uk

        if rk <= 0:
            xk = xk
        else:
            xk = xk + sk

    print f, symbols
    for i in range(D):
        print symbols[i], ' = ', xk[i]
    print 'min f =', fk
    return [xk, fk, k]
示例#12
0
def uncons_opt(obj, cons, Vars, x0):
    """nlinprog

    Parameters
    ----------
    obj :
    cons :

    Returns
    -------

    Notes
    ------
    """
    raise NotImplementedError

    assert(obj == 0)
    for c in cons:
        assert(isinstance(c, sym.LessThan))
        assert(c.rhs == 0)
        obj += c.lhs**2

    eval_f = ft.partial(eval_expr, sym.lambdify(Vars, obj))
    eval_grad_f = ft.partial(eval_grad_obj, sym.lambdify(Vars, grad(Vars, obj)))
    eval_hessian_f = ft.partial(eval_expr, sym.lambdify(Vars, sym.hessian(obj, Vars)))

    # Return codes in IpReturnCodes_inc.h
    res_x, mL, mU, Lambda, res_obj, status = pyipopt.fmin_unconstrained(
            eval_f,
            x0,
            fprime=eval_grad_f,
            fhess=eval_hessian_f,
            )

    return spec.OPTRES(res_obj, res_x, 'OK', res_obj <= 0)
示例#13
0
def lm(f_str, sym_str):
	[f, symbols] = pr(f_str, sym_str)
	# # of variables
	D = len(symbols)
	hess = sp.hessian(f, symbols)
	mexpr = sp.Matrix([f])
	grad = mexpr.jacobian(symbols)
	grad = grad.T
	# initial value
	xk = sp.zeros(D, 1)
	fk = 0
	uk = 0.000000001
	epsilon = 0.00001
	for k in range(100):
		Xk_map = {} 
		for i in range(D):
			Xk_map[symbols[i]] = xk[i, 0]
		fk = f.evalf(subs=Xk_map)
		gk = grad.evalf(subs=Xk_map)
		Gk = hess.evalf(subs=Xk_map)
		if gk.norm() < epsilon:
			break
		while  True:
			eigvalue = sp.Matrix.eigenvals(Gk + uk * sp.eye(D))
			if allzero(eigvalue):
				break
			else:
				uk = uk * 4
# 		sk = sp.(A=Gk + uk * sp.ones(D), b=-gk)
		sk = (Gk + uk * sp.eye(D)).LUsolve(-gk)
		Xsk_map = {}
		for i in range(D):
			Xsk_map[symbols[i]] = xk[i, 0] + sk[i, 0]
		fxsk = f.evalf(subs=Xsk_map)
		delta_fk = fk - fxsk
		
		t1 = (gk.T * sk)[0, 0]
		t2 = (0.5 * sk.T * Gk * sk)[0, 0]
		qk = fk + t1 + t2
		delta_qk = fk - qk
		rk = delta_fk / delta_qk
		if rk < 0.25:
			uk = uk * 4
		elif rk > 0.75:
			uk = uk / 2
		else:
			uk = uk
		
		if rk <= 0:
			xk = xk
		else:
			xk = xk + sk
		
	print f, symbols
	for i in range(D):
		print symbols[i], ' = ', xk[i]
	print 'min f =', fk
	return [xk, fk, k]
示例#14
0
def dH(f, coord):
    H = list(sp.hessian(f, coord))
    for i in range(len(coord)):
        for j in range(len(coord)):
            d = []
            for a in coord:
                d.append(sp.diff(H[i*len(coord)+j], a))
            H[i*len(coord)+j] = d
    return np.reshape(np.array(H), (3, 3, 3))
示例#15
0
文件: snsympy.py 项目: Ikki1/Kamada
def derivatives(F, X, ARGS):
    FGH = F, G, H = [sp.simplify(F) for F in
            [F, sp.Matrix([F]).jacobian(X).T, sp.hessian(F, X)]]
    
    md("**Function:**", '$$', F, '$$');  md('-----')
    md("**Gradient:**", '$$', G, '$$');  md('-----')
    md("**Hessian:**",  '$$', H, '$$\n')
    
    return [sp.lambdify(ARGS, F, dummify=False) for F in FGH]
示例#16
0
def kk_ver1(Pos, SpCons, Len, eps=0.001):
    '関数リストを用いた部分最適化'
    t_start = time()
    nodes, dim = Pos.shape
    const = (SpCons, Len)
    P = sp.IndexedBase('P')
    K = sp.IndexedBase('K')
    L = sp.IndexedBase('L')
    i, j, d = [
        sp.Idx(*spec) for spec in [('i', nodes), ('j', nodes), ('d', dim)]
    ]
    i_range, j_range, d_range = [(idx, idx.lower, idx.upper)
                                 for idx in [i, j, d]]

    #potential functionの用意
    dist = sp.sqrt(sp.Sum((P[i, d] - P[j, d])**2, d_range))
    Potential = 1 / 2 * K[i, j] * (dist - L[i, j])**2
    E = sp.Sum(Potential, i_range, j_range).doit()

    #list of functions
    E_jac, E_hess = [], []
    for m in range(nodes):
        variables = [P[m, d] for d in range(dim)]
        mth_jac, mth_hess = [
            partial(sp.lambdify((K, L, P), f, dummify=False), *const) for f in
            [sp.Matrix([E]).jacobian(variables),
             sp.hessian(E, variables)]
        ]
        E_jac.append(mth_jac)
        E_hess.append(mth_hess)
        print('generating...', int(m / nodes * 100), '%', "\r", end="")

    print('derivative functions are generated:', time() - t_start, 's')

    ##Optimisation
    delta_max = sp.oo
    loops = 0
    while (delta_max > eps):
        max_idx, delta_max = 0, 0
        for m in range(nodes):
            mth_jac = E_jac[m]
            delta = la.norm(mth_jac(Pos))
            if (delta_max < delta):
                delta_max = delta
                max_idx = m

        print(loops, 'th:', max_idx, ' delta=', delta_max)
        loops += 1
        jac = E_jac[max_idx]
        hess = E_hess[max_idx]
        while (la.norm(jac(Pos)) > eps):
            delta_x = la.solve(hess(Pos), jac(Pos).flatten())
            Pos[max_idx] -= delta_x

    print('Fitting Succeeded')
    print('Finish:', time() - t_start, 's')
    return Pos
示例#17
0
def second_order_taylorization(expr,shocks,covariances):    
    h = sympy.hessian(expr,shocks)
    resp = expr
    for i in range(covariances.shape[0]):
        for j in range(covariances.shape[1]):
            resp = resp + h[i,j] * covariances[i,j] / 2
    for s in shocks:
        resp = resp.subs(s,0)
    return(resp)
示例#18
0
 def calculate_hessian(self):
     r"""Calculate the Hessian matrix :math:`\nabla^2 V` of the potential :math:`V(x)`
     with :math:`x \in \mathbb{R}^D`. For potentials which depend only one variable,
     this equals the second derivative and :math:`D=1`. Note that this function is idempotent.
     """
     if self._hessian_s is None:
         # TODO: Add symbolic simplification
         self._hessian_s = sympy.hessian(self._potential_s[0, 0], self._variables)
         self._hessian_n = tuple(sympy.lambdify(self._variables, entry, "numpy") for entry in self._hessian_s)
示例#19
0
文件: A2.py 项目: choij/optimization
def rosenbrock():
    x, y = symbols('x y')
    f = symbols('f', cls=Function)
    f = 100 * (y - x**2)**2 + (1 - x)**2
    J11 = diff(f, x)
    J12 = diff(f, y)
    J = [J11, J12]
    H = hessian(f, [x, y])
    return f, J, H, x, y
示例#20
0
 def calculate_hessian(self):
     r"""Calculate the Hessian matrix :math:`\nabla^2 V` of the potential :math:`V(x)`
     with :math:`x \in \mathbb{R}^D`. For potentials which depend only one variable,
     this equals the second derivative and :math:`D=1`. Note that this function is idempotent.
     """
     if self._hessian_s is None:
         # TODO: Add symbolic simplification
         self._hessian_s = sympy.hessian(self._potential_s[0,0], self._variables)
         self._hessian_n = tuple([ sympy.lambdify(self._variables, entry, "numpy") for entry in self._hessian_s ])
示例#21
0
def second_order_taylorization(expr, shocks, covariances):
    h = sympy.hessian(expr, shocks)
    resp = expr
    for i in range(covariances.shape[0]):
        for j in range(covariances.shape[1]):
            resp = resp + h[i, j] * covariances[i, j] / 2
    for s in shocks:
        resp = resp.subs(s, 0)
    return (resp)
 def __init__(self, f, variables=('x', 'y')):
     self.f = f
     self.vars = symbols(' '.join(variables))
     self.invocations = 0
     self.grad_invocations_counter = 0
     self.hessian_invocations_counter = 0
     self._grad = [Derivative(f, var).doit() for var in self.vars]
     self._hessian = hessian(f, self.vars)
     self.require_count = False
示例#23
0
    def __init__(self, env, continious_cost):
        self.ccost = continious_cost
        self.env = env

        u = symv("u", env.action_size)
        x = symv('x', env.state_size)
        xc, uc = env.sym_discrete_xu2continious_xu(x, u)
        z = list(x) + list(u)

        # create the two cost functions
        w = continious_cost.sym_cf(xc, uc, 0) * env.dt
        self.c = sym.lambdify((tuple(x), tuple(u)), w, 'numpy')
        self.c_z = sym.lambdify((tuple(x), tuple(u)), derive_by_array(w, z), 'numpy')
        self.c_zz = sym.lambdify((tuple(x), tuple(u)),hessian(w, z), 'numpy')

        J = continious_cost.sym_c(x0=None, t0=None, xF=xc, tF=0)
        self.J = sym.lambdify(tuple(x), J, 'numpy')
        self.J_x = sym.lambdify(tuple(x), derive_by_array(J, x), 'numpy')
        self.J_xx = sym.lambdify(tuple(x), hessian(J, x), 'numpy')
示例#24
0
    def newton(self, max_ite=1000):
        # 初始化
        setattr(self, "ite", 0)
        setattr(self, "x1", self.x1_init)
        setattr(self, "x2", self.x2_init)

        fx = diff(self.f, self.x)
        fy = diff(self.f, self.y)
        grad_f1 = Matrix([[fx], [fy]])
        grad_H2 = hessian(self.f, (self.x, self.y))
        x_tmp = self.x1_init
        y_tmp = self.x2_init

        #迭代
        # start = time.time()
        for _ in range(max_ite):
            grad_f1 = np.array(
                [[float(fx.evalf(subs={
                    self.x: x_tmp,
                    self.y: y_tmp
                }))], [float(fy.evalf(subs={
                    self.x: x_tmp,
                    self.y: y_tmp
                }))]])
            tmp = matrix2numpy(grad_H2.evalf(subs={
                self.x: x_tmp,
                self.y: y_tmp
            }),
                               dtype=float)
            ans_tmp = np.array([[x_tmp], [y_tmp]]) - np.dot(
                np.linalg.inv(tmp), grad_f1)
            acc_tmp = ((ans_tmp[0, 0] - x_tmp)**2 +
                       (ans_tmp[1, 0] - y_tmp)**2)**0.5
            self.ite += 1

            print("第{}次迭代后,坐标为({}, {})".format(self.ite, ans_tmp[0, 0],
                                               ans_tmp[1, 0]))

            x_tmp = ans_tmp[0, 0]
            y_tmp = ans_tmp[1, 0]
            f_tmp = self.f.evalf(subs={self.x: x_tmp, self.y: y_tmp})

            if acc_tmp <= self.stop_condition:
                self.x1 = ans_tmp[0, 0]
                self.x2 = ans_tmp[1, 0]
                break
        # end = time.time()
        # print((end-start)*1000)
        print("\n迭代结束点为({}, {})".format(self.x1, self.x2))
        print("最小值为{}".format(
            self.f.evalf(subs={
                self.x: self.x1,
                self.y: self.x2
            })))
示例#25
0
文件: HW5_P2.py 项目: e3u3/Robotics
def Q_approximation(r_const, k_const):
    sympy.var('x0:4')
    X = sympy.Matrix(4, 1, sympy.var('x0:4'))
    u = sympy.Matrix([sympy.var('u')])

    stage_cost = 1 - sympy.exp(k * sympy.cos(x0) + k * sympy.cos(x1) -
                               2 * k) + ((r**2) / 2 * u * u)[0, 0]
    mystage_cost = stage_cost.subs([(r, r_const), (k, k_const)])
    Q = sympy.hessian(mystage_cost, X)
    stage_cost0 = (0.5 * X.T * Q * X + r**2 * u**2 / 2)
    return Q, stage_cost0
示例#26
0
def init():
    alph = sym.Symbol('a')
    expr = sym.sympify(str(input('Input cost function\n')))
    var_list = list(expr.free_symbols)
    sgrad = sym.Matrix([sym.diff(expr, i) for i in var_list])
    shess = sym.hessian(expr, list(expr.free_symbols))
    values = {}
    for var in var_list:
        values[var] = float(
            input('Provide initial value of variable {} :'.format(var)))
    x = sym.Matrix(var_list)
    return [x, alph, expr, sgrad, shess, values]
def question_1_d(x: float, y: float):
    sp.init_printing(use_unicode=True)
    x1, x2, a = sp.symbols('x1 x2 a', real=True)

    def evaluate(func, x):
        return func.evalf(subs={x1: x[0], x2: x[1]})

    f = (x1**2 + x2 - 11)**2 + (x1 + x2**2 - 7)**2
    gradient = sp.Matrix([f]).jacobian((x1, x2)).T
    hessian = sp.hessian(f, (x1, x2))

    tolerance = 1e-13

    sigma = 0.1
    beta = 0.1
    s = 1

    j = 0
    x = sp.Matrix([[x], [y]])
    fx = evaluate(f, x)
    grad = evaluate(gradient, x)

    while (grad.norm() > tolerance):

        # direction searching
        d = -grad

        # line searching
        rhs = -(sigma*s*d.T*grad)[0]  # right hand side
        alpha = s
        while fx - evaluate(f, x + alpha*d) < rhs:
            alpha *= beta
            rhs *= beta

        print(f'Iteration {j}')
        print(f'x\t\t: {x}')
        print(f'f(x)\t\t: {fx}')
        print(f'direction\t: {d}')
        print(f'alpha\t\t: {alpha}\n')
        print()

        # Updating
        x = x + alpha*d
        j += 1
        fx = evaluate(f, x)
        grad = evaluate(gradient, x)

    print(f'x\t: {x}')
    print(f'f(x)\t: {fx}')
示例#28
0
def conjugateGradient(f, x0, X):
    """
    Description : Returns the minimizer using the Conjugate Gradient Algorithm (as a Direct Method)
    
    Parameters:
               1. f   : symbolic representation of the function to be minimized
               2. x0  : list of independant variables for the function
               3. X   : symbolic list of the dependant variables in the function f
    
    Output:
               Prints the minimizer of the function f to terminal
    """
    def printFunction(f, x0):
        """
        Description : Prints the minimizer of the function on the screen
        
        Inputs: 
               f  = symbolic representation of the function to be minimized
               x0 = Initial guess of the minimizer
        """
        print "\n+++++++++++++++++++++++++++++CONJUGATE GRADIENT METHOD+++++++++++++++++++++++++++++"
        print "\nThe minimizer of the function\n\nf = %s \nis at \nx = %s" % (
            f, x0)
        print "+++++++++++++++++++++++++++++++++++++++++++++++++=============+++++++++++++++++++++++\n"
        return 0

    gradF = [f.diff(x) for x in X]
    g0 = gradValue(gradF, x0, X)
    if (all(abs(i) < epsilon for i in squeeze(asarray(g0, 'float')))):
        return printFunction(f, x0)
    Q = matrix2numpy(hessian(f, X))
    d0 = -g0
    while (True):
        alpha = -squeeze(asarray(g0 * d0.transpose())) / squeeze(
            asarray(d0 * Q * d0.transpose()))
        x0 = [float(i) for i in (x0 + squeeze(asarray(alpha * d0)))]
        g0 = gradValue(gradF, x0, X)
        if (all(abs(i) < epsilon for i in squeeze(asarray(g0, 'float')))):
            return printFunction(f, x0)
        beta = float(
            squeeze(
                asarray(g0 * Q * d0.transpose()) /
                squeeze(asarray(d0 * Q * d0.transpose()))))
        d0 = -g0 + beta * d0
示例#29
0
def kk_ver3(Pos, SpCons, Len, eps=0.00001):
    '頂点の匿名性から微分関数を1つにまとめたもの'
    start = time()
    nodes, dim = Pos.shape
    ni = nodes - 1
    X = sp.IndexedBase('X')  # 動かす頂点
    P = sp.IndexedBase('P')  # 動かさない頂点
    Ki = sp.IndexedBase('Ki')  # 動かす頂点に関するばね定数
    Li = sp.IndexedBase('Li')  # 動かす頂点に関する自然長

    j, d = [sp.Idx(*spec) for spec in [('j', ni), ('d', dim)]]
    j_range, d_range = [(idx, idx.lower, idx.upper) for idx in [j, d]]

    #potential functionの用意
    dist = sp.sqrt(sp.Sum((X[d] - P[j, d])**2, d_range)).doit()
    Ei = sp.Sum(Ki[j] * (dist - Li[j])**2, j_range)

    #jacobian,Hessianの用意
    varX = [X[d] for d in range(dim)]
    Ei_jac, Ei_hess = [
        sp.lambdify((X, P, Ki, Li), sp.simplify(f), dummify=False)
        for f in [sp.Matrix([Ei]).jacobian(varX),
                  sp.hessian(Ei, varX)]
    ]
    print('generate function:', time() - start, 's')

    start = time()
    ##Optimisation
    delta_max = sp.oo
    xpkl = partial(_xpkl, P=Pos, K=SpCons, L=Len, n=nodes)
    while (delta_max > eps):
        # 最も改善すべき頂点の選択
        norms = np.array(
            list(map(lambda m: la.norm(Ei_jac(*xpkl(m))), range(nodes))))
        max_idx, delta_max = norms.argmax(), norms.max()

        # Newton法で最適化
        xpkl_m = xm, pm, km, lm = xpkl(max_idx)
        while (la.norm(Ei_jac(*xpkl_m)) > eps):
            delta_x = la.solve(Ei_hess(*xpkl_m), Ei_jac(*xpkl_m).flatten())
            xm -= delta_x

    print('Finish:', time() - start, 's')
    return Pos
    def calculate_hessian(self):
        r"""Calculate the Hessian matrix :math:`\nabla^2 \lambda_i` of the potential's
        eigenvalues :math:`\Lambda(x)` with :math:`x \in \mathbb{R}^D`. For potentials
        which depend only one variable, this equals the second derivative and :math:`D=1`.
        Note that this function is idempotent.
        """
        if self._hessian_s is None:
            self.calculate_eigenvalues()

            self._hessian_s = []
            # TODO: Add symbolic simplification
            for ew in self._eigenvalues_s:
                self._hessian_s.append( sympy.hessian(ew, self._variables) )

            self._hessian_n = []

            for hessian in self._hessian_s:
                self._hessian_n.append([ sympy.lambdify(self._variables, entry, "numpy")
                                         for entry in hessian ])
示例#31
0
def geodesic(h, x1, x2):
    """Compute geodesic equations for z = h(x1, x2)."""

    coords = [x1, x2]
    Dh = Matrix([h]).jacobian(coords)
    Hh = hessian(h, coords)
    assert Hh.is_symmetric()
    v = [x.diff() for x in coords]

    numer2 = Hh[0, 0] * v[0]**2 + 2 * Hh[0, 1] * v[0] * v[1] + Hh[1, 1] * v[1]**2
    denom = 1 + Dh[0, 0]**2 + Dh[0, 1]**2

    print(r'$$\begin{align*}')
    for i, x in enumerate(coords):
        numer1 = -Dh[0, i]
        geq = Eq(x.diff(), simplify(numer1 * numer2 / denom))
        print_sympyobj(geq)
        print(r'\\')
    print(r'\end{align*}$$')
示例#32
0
    def calculate_hessian(self):
        r"""Calculate the Hessian matrix :math:`\nabla^2 \lambda_i` of the potential's
        eigenvalues :math:`\Lambda(x)` with :math:`x \in \mathbb{R}^D`. For potentials
        which depend only one variable, this equals the second derivative and :math:`D=1`.
        Note that this function is idempotent.
        """
        if self._hessian_s is None:
            self.calculate_eigenvalues()

            self._hessian_s = []
            # TODO: Add symbolic simplification
            for ew in self._eigenvalues_s:
                self._hessian_s.append(sympy.hessian(ew, self._variables))

            self._hessian_n = []

            for hessian in self._hessian_s:
                self._hessian_n.append([
                    sympy.lambdify(self._variables, entry, "numpy")
                    for entry in hessian
                ])
示例#33
0
def taylor2(sym_f, sym_x, sym_a):
    if len(sym_x) != len(sym_a):
        raise TypeError("taylor2 arguments must have the same length")

    m_sym_x = sp.Matrix(sym_x)
    m_sym_a = sp.Matrix(sym_a)
    
    sym_dst = m_sym_x - m_sym_a
    print("sym_dst:", sym_dst)
    
    A = m_subs(sp.Matrix(s_diff(sym_f, sym_x)), sym_x, sym_a)
    print("A:", A)

    B = m_subs(sp.hessian(sym_f, sym_x), sym_x, sym_a)
    print("B:", B)
    
    sym_appr = s_subs(sym_f, sym_x, sym_a)
    sym_appr += (A.T * sym_dst)[0,0]
    sym_appr += sp.sympify('0.5') * (sym_dst.T * B * sym_dst)[0,0]
    
    return sp.simplify(sym_appr)
示例#34
0
def deriveFunction(symbolVec, costSymbolic, args):
    # Derives from the given symbolic function, the desired numeric functions such as
    # the func itself, its gradient, hessian and etc

    funcs = {}
    if 'func' in args:
        cost = sp.lambdify(symbolVec, costSymbolic(symbolVec), modules=['numpy'])
        funcs['func'] = cost
    if 'grad' in args:
        gradCost = sp.lambdify(symbolVec, [sp.diff(costSymbolic(symbolVec), var) \
                                            for var in symbolVec], 'numpy')
        funcs['grad'] = gradCost
    if 'hess' in args:
        hessianCost = sp.lambdify(symbolVec, \
                                    sp.hessian(costSymbolic(symbolVec), symbolVec), 'numpy')
        funcs['hess'] = hessianCost
    if 'jaco' in args:
        jacobian = sp.lambdify(symbolVec, costSymbolic(symbolVec).jacobian(symbolVec), \
                                modules=['numpy'])
        funcs['jaco'] = jacobian

    return funcs
def conjugateGradient(f,x0,X):
    """
    Description : Returns the minimizer using the Conjugate Gradient Algorithm (as a Direct Method)
    
    Parameters:
               1. f   : symbolic representation of the function to be minimized
               2. x0  : list of independant variables for the function
               3. X   : symbolic list of the dependant variables in the function f
    
    Output:
               Prints the minimizer of the function f to terminal
    """
    def printFunction(f,x0):
        """
        Description : Prints the minimizer of the function on the screen
        
        Inputs: 
               f  = symbolic representation of the function to be minimized
               x0 = Initial guess of the minimizer
        """
        print "\n+++++++++++++++++++++++++++++CONJUGATE GRADIENT METHOD+++++++++++++++++++++++++++++"
        print "\nThe minimizer of the function\n\nf = %s \nis at \nx = %s" %(f,x0)
        print "+++++++++++++++++++++++++++++++++++++++++++++++++=============+++++++++++++++++++++++\n"
        return 0
    
    gradF = [f.diff(x) for x in X]
    g0 = gradValue(gradF,x0,X)
    if(all(abs(i) < epsilon for i in squeeze(asarray(g0,'float')))): return printFunction(f,x0)
    Q = matrix2numpy(hessian(f,X))
    d0 = -g0
    while(True):
        alpha = -squeeze(asarray(g0*d0.transpose()))/squeeze(asarray(d0*Q*d0.transpose()))
        x0 = [float(i) for i in (x0 + squeeze(asarray(alpha *d0)))]
        g0 = gradValue(gradF,x0,X)
        if(all(abs(i) < epsilon for i in squeeze(asarray(g0,'float')))): return printFunction(f,x0)
        beta = float(squeeze(asarray(g0*Q*d0.transpose())/squeeze(asarray(d0*Q*d0.transpose()))))
        d0 = -g0 +beta*d0
示例#36
0
文件: kk_func.py 项目: Ikki1/Kamada
        for v in 'p q'.split()]


##md(r'Locations of the vertices $\boldsymbol{p}$ and $\boldsymbol{q}$: $', p, ', ', q, '$')

# The actual length of the spring is the distance between points p and q
length = (p - q).norm()
##md('**The actual length of the spring**: $', length, '$')

# The potential energy as given by Hook's Law and its derivatives
potential = k_ij * (length - l_ij) ** 2 / 2

potentials = [
    sp.Matrix([potential]),
    sp.Matrix([potential]).jacobian(p).T,
    sp.hessian(potential, p)]

# ###
# def display_potentials(potentials):
#     for i, p in zip(range(len(potentials)), potentials):
#         md('$$\mathbf {Potential}', "'" * i, ': ', p, '$$')
#         md('-----')

# ##display_potentials(potentials)

# cse_result = cse_substitution, eliminated = cse(potentials)

# ##display_potentials(eliminated)

# def expand_cse_substitution(cse_substitution):
#     substitution = []
示例#37
0
def _kamada_kawai_intro_(dim=2):
    markdown(r'''
#力学モデルを用いたグラフ描画

グラフを描画する際、頂点の配置をどのようにするかということは視覚的な理解に大きな影響を及ぼす。
本記事では、グラフの頂点と辺に仮想的な力を割り当て、力学的エネルギーの低い安定状態を探して
グラフのレイアウトを求める**kamada-kawai**法を用いる。
''')

    P = sp.IndexedBase('P')
    K = sp.IndexedBase('k')
    L = sp.IndexedBase('l')

    n = sp.Symbol('n', integer=True)

    i, j, d = [sp.Idx(*spec) for spec in [('i', n), ('j', n), ('d', dim)]]
    i_range, j_range, d_range = [(idx, idx.lower, idx.upper)
                                 for idx in [i, j, d]]

    #potential functionの用意
    dist = sp.sqrt(sp.Sum((P[i, d] - P[j, d])**2, d_range)).doit()
    Potential = K[i, j] * (dist - L[i, j])**2 / 2
    E = sp.Sum(Potential, i_range, j_range) / 2

    P_id, k_ij, l_ij, d_ij = ['P_{i,d}', 'k_{i,j}', 'l_{i,j}', 'd_{i,j}']

    markdown(
        r'''
##力学的エネルギーの定義

まず、グラフの全頂点がばねで結ばれていると仮定すると、系全体の力学的エネルギーは次で表される。
$$E = {E}$$
ただし、${P_id}$はi番目の頂点の座標の第$j$成分、${k_ij},{l_ij}$はそれぞれ頂点$i$と頂点$j$の間のばね定数、自然長とする。

${k_ij},{l_ij}$は、${d_ij}$を頂点$i$と頂点$j$を結ぶ最短経路の長さとして
$${k_ij} = K / {d_ij}^2 (i \neq j) \ \  0(i = j)$$
$${l_ij} = L \times {d_ij}$$
で定める($K,L$は定数)。

${d_ij}$は**Warshall-Floyd**のアルゴリズムにより求めることができる。
''', **locals())

    nd = n * dim
    _E = sp.Function('E')

    x = sp.IndexedBase('x')
    m = sp.Symbol('m', integer=True)
    i0 = sp.Idx('i', (1, n))
    i0_range = (i0, i0.lower, i0.upper)

    var0 = [x[d] for d in range(dim)]
    var0_m = sp.Matrix([var0])
    var0_Pm = sp.Matrix([[P[m, d] for d in range(dim)]])

    dist0 = sp.sqrt(sp.Sum((P[i0, d] - x[d])**2, d_range)).doit()
    E0 = sp.Sum(K[i0, 0] * (dist0 - L[i0, 0])**2 / 2, i0_range)
    E0_jac = sp.simplify(sp.Matrix([E0]).jacobian(var0))
    E0_hess = sp.simplify(sp.hessian(E0, var0))

    delta_x = sp.IndexedBase("\Delta x")
    delta_x_vec = sp.Matrix([[delta_x[d] for d in range(dim)]])
    norm = sp.sqrt(sp.Sum(sp.diff(_E(P[i, d]), P[i, d])**2, d_range).doit())

    markdown(
        r'''
##エネルギーの最小化

例として、頂点数が{n},次元が{dim}であるときを考える。
力学的エネルギーが最小になる点では、$gradE = \vec 0$が成り立つ。すなわち、変数が${nd}$個ある${nd}$本の非線型連立方程式を解けばよいのだが、これを解析的に解くのは難しい。
そこで、次のような方法で近似解を求める。

1:まず、特定の頂点1つに着目し、他の頂点の位置を固定する。

2:そして、Newton-Raphson法により選んだ頂点の座標について力学的エネルギー$E$の最小化を行う。

3:着目する頂点を変えて1,2を繰り返す。

4:$\|gradE\|$が十分小さくなったら終了、その時の座標の値を解とする。

以下で、その具体的な方法について述べる。

##近似解の導出

選んだ頂点をmとし、その座標を$P_m = {var0_m}$とする。つまり${var0_Pm} = {var0_m}$である。

このときNewton-Raphson法による反復式は、変数を${var0_m}$としたときのEの1次導関数を$J_m$、2次導関数を$H_m$として
$$H_m {delta_x_vec.T} = -J_m$$
により表される。
これは{dim}元連立1次方程式となり容易に解けて変位$\Delta P_i = {delta_x_vec}$が求められるので、$P_i = P_i + \Delta P_i$により座標を更新する。
以上を繰り返し、変位が十分小さくなったら操作を終了する。

例えば、$m=0$だとすると、反復式は
$${E0_hess}{delta_x_vec.T} = -{E0_jac.T} $$
となる。

選んだ頂点の最適化が終わったら、別な頂点を選んで上記の最適化を繰り返す。
$max_i {norm}$が十分小さくなったら更新を終了してその時の座標を力学的エネルギー$E$が最小となる解とする。
''', **locals())
示例#38
0
import sympy as sym
import math

e = sym.E
u, v = sym.symbols('u v')
E = e**u + e**(2*v) + e**(u*v) + u**2 - 2*u*v + 2*v**2 - 3*u - 2*v
para = [0, 0]
for i in xrange(5):
    print para
    subs = {u:para[0], v:para[1]}
    update = -1 * sym.Matrix([[sym.diff(E,u), sym.diff(E, v)]]) * sym.hessian(E, [u, v]).inv() 
    print sym.simplify(update)
    update = sym.simplify(update)
    update = update.evalf(subs=subs)
    para[0] += update[0]
    para[1] += update[1]
print(E.evalf(subs={u:para[0], v:para[1]}))
示例#39
0
def equilibrium(dbf, comps, phases, conditions, **kwargs):
    """
    Calculate the equilibrium state of a system containing the specified
    components and phases, under the specified conditions.
    Model parameters are taken from 'dbf'.

    Parameters
    ----------
    dbf : Database
        Thermodynamic database containing the relevant parameters.
    comps : list
        Names of components to consider in the calculation.
    phases : list or dict
        Names of phases to consider in the calculation.
    conditions : dict or (list of dict)
        StateVariables and their corresponding value.
    verbose : bool, optional (Default: True)
        Show progress of calculations.
    grid_opts : dict, optional
        Keyword arguments to pass to the initial grid routine.

    Returns
    -------
    Structured equilibrium calculation.

    Examples
    --------
    None yet.
    """
    active_phases = unpack_phases(phases) or sorted(dbf.phases.keys())
    comps = sorted(comps)
    indep_vars = ['T', 'P']
    grid_opts = kwargs.pop('grid_opts', dict())
    verbose = kwargs.pop('verbose', True)
    phase_records = dict()
    callable_dict = kwargs.pop('callables', dict())
    grad_callable_dict = kwargs.pop('grad_callables', dict())
    points_dict = dict()
    maximum_internal_dof = 0
    # Construct models for each phase; prioritize user models
    models = unpack_kwarg(kwargs.pop('model', Model), default_arg=Model)
    if verbose:
        print('Components:', ' '.join(comps))
        print('Phases:', end=' ')
    for name in active_phases:
        mod = models[name]
        if isinstance(mod, type):
            models[name] = mod = mod(dbf, comps, name)
        variables = sorted(mod.energy.atoms(v.StateVariable).union({key for key in conditions.keys() if key in [v.T, v.P]}), key=str)
        site_fracs = sorted(mod.energy.atoms(v.SiteFraction), key=str)
        maximum_internal_dof = max(maximum_internal_dof, len(site_fracs))
        # Extra factor '1e-100...' is to work around an annoying broadcasting bug for zero gradient entries
        models[name].models['_broadcaster'] = 1e-100 * Mul(*variables) ** 3
        out = models[name].energy
        if name not in callable_dict:
            undefs = list(out.atoms(Symbol) - out.atoms(v.StateVariable))
            for undef in undefs:
                out = out.xreplace({undef: float(0)})
            # callable_dict takes variables in a different order due to calculate() pecularities
            callable_dict[name] = make_callable(out,
                                                sorted((key for key in conditions.keys() if key in [v.T, v.P]),
                                                       key=str) + site_fracs)
        if name not in grad_callable_dict:
            grad_func = make_callable(Matrix([out]).jacobian(variables), variables)
        else:
            grad_func = grad_callable_dict[name]
        # Adjust gradient by the approximate chemical potentials
        plane_vars = sorted(models[name].energy.atoms(v.SiteFraction), key=str)
        hyperplane = Add(*[v.MU(i)*mole_fraction(dbf.phases[name], comps, i)
                           for i in comps if i != 'VA'])
        # Workaround an annoying bug with zero gradient entries
        # This forces numerically zero entries to broadcast correctly
        hyperplane += 1e-100 * Mul(*([v.MU(i) for i in comps if i != 'VA'] + plane_vars + [v.T, v.P])) ** 3

        plane_grad = make_callable(Matrix([hyperplane]).jacobian(variables),
                                   [v.MU(i) for i in comps if i != 'VA'] + plane_vars + [v.T, v.P])
        plane_hess = make_callable(hessian(hyperplane, variables),
                                   [v.MU(i) for i in comps if i != 'VA'] + plane_vars + [v.T, v.P])
        phase_records[name.upper()] = PhaseRecord(variables=variables,
                                                  grad=grad_func,
                                                  plane_grad=plane_grad,
                                                  plane_hess=plane_hess)
        if verbose:
            print(name, end=' ')
    if verbose:
        print('[done]', end='\n')

    conds = OrderedDict((key, unpack_condition(value)) for key, value in sorted(conditions.items(), key=str))
    str_conds = OrderedDict((str(key), value) for key, value in conds.items())
    indep_vals = list([float(x) for x in np.atleast_1d(val)]
                      for key, val in str_conds.items() if key in indep_vars)
    components = [x for x in sorted(comps) if not x.startswith('VA')]
    # 'calculate' accepts conditions through its keyword arguments
    grid_opts.update({key: value for key, value in str_conds.items() if key in indep_vars})
    if 'pdens' not in grid_opts:
        grid_opts['pdens'] = 10

    coord_dict = str_conds.copy()
    coord_dict['vertex'] = np.arange(len(components))
    grid_shape = np.meshgrid(*coord_dict.values(),
                             indexing='ij', sparse=False)[0].shape
    coord_dict['component'] = components
    if verbose:
        print('Computing initial grid', end=' ')

    grid = calculate(dbf, comps, active_phases, output='GM',
                     model=models, callables=callable_dict, fake_points=True, **grid_opts)

    if verbose:
        print('[{0} points, {1}]'.format(len(grid.points), sizeof_fmt(grid.nbytes)), end='\n')

    properties = xray.Dataset({'NP': (list(str_conds.keys()) + ['vertex'],
                                      np.empty(grid_shape)),
                               'GM': (list(str_conds.keys()),
                                      np.empty(grid_shape[:-1])),
                               'MU': (list(str_conds.keys()) + ['component'],
                                      np.empty(grid_shape)),
                               'points': (list(str_conds.keys()) + ['vertex'],
                                          np.empty(grid_shape, dtype=np.int))
                               },
                              coords=coord_dict,
                              attrs={'iterations': 1},
                              )
    # Store the potentials from the previous iteration
    current_potentials = properties.MU.copy()

    for iteration in range(MAX_ITERATIONS):
        if verbose:
            print('Computing convex hull [iteration {}]'.format(properties.attrs['iterations']))
        # lower_convex_hull will modify properties
        lower_convex_hull(grid, properties)
        progress = np.abs(current_potentials - properties.MU).max().values
        if verbose:
            print('progress', progress)
        if progress < MIN_PROGRESS:
            if verbose:
                print('Convergence achieved')
            break
        current_potentials[...] = properties.MU.values
        if verbose:
            print('Refining convex hull')
        # Insert extra dimensions for non-T,P conditions so GM broadcasts correctly
        energy_broadcast_shape = grid.GM.values.shape[:len(indep_vals)] + \
            (1,) * (len(str_conds) - len(indep_vals)) + (grid.GM.values.shape[-1],)
        driving_forces = np.einsum('...i,...i',
                                   properties.MU.values[..., np.newaxis, :],
                                   grid.X.values[np.index_exp[...] +
                                                 (np.newaxis,) * (len(str_conds) - len(indep_vals)) +
                                                 np.index_exp[:, :]]) - \
            grid.GM.values.view().reshape(energy_broadcast_shape)

        for name in active_phases:
            dof = len(models[name].energy.atoms(v.SiteFraction))
            current_phase_indices = (grid.Phase.values == name).reshape(energy_broadcast_shape[:-1] + (-1,))
            # Broadcast to capture all conditions
            current_phase_indices = np.broadcast_arrays(current_phase_indices,
                                                        np.empty(driving_forces.shape))[0]
            # This reshape is safe as long as phases have the same number of points at all indep. conditions
            current_phase_driving_forces = driving_forces[current_phase_indices].reshape(
                current_phase_indices.shape[:-1] + (-1,))
            # Note: This works as long as all points are in the same phase order for all T, P
            current_site_fractions = grid.Y.values[..., current_phase_indices[(0,) * len(str_conds)], :]
            if np.sum(current_site_fractions[(0,) * len(indep_vals)][..., :dof]) == dof:
                # All site fractions are 1, aka zero internal degrees of freedom
                # Impossible to refine these points, so skip this phase
                points_dict[name] = current_site_fractions[(0,) * len(indep_vals)][..., :dof]
                continue
            # Find the N points with largest driving force for a given set of conditions
            # Remember that driving force has a sign, so we want the "most positive" values
            # N is the number of components, in this context
            # N points define a 'best simplex' for every set of conditions
            # We also need to restrict ourselves to one phase at a time
            trial_indices = np.argpartition(current_phase_driving_forces,
                                            -len(components), axis=-1)[..., -len(components):]
            trial_indices = trial_indices.ravel()
            statevar_indices = np.unravel_index(np.arange(np.multiply.reduce(properties.GM.values.shape + (len(components),))),
                                                properties.GM.values.shape + (len(components),))[:len(indep_vals)]
            points = current_site_fractions[np.index_exp[statevar_indices + (trial_indices,)]]
            points.shape = properties.points.shape[:-1] + (-1, maximum_internal_dof)
            # The Y arrays have been padded, so we should slice off the padding
            points = points[..., :dof]
            # Workaround for derivative issues at endmembers
            points[points == 0.] = MIN_SITE_FRACTION
            if len(points) == 0:
                if name in points_dict:
                    del points_dict[name]
                # No nearly stable points: skip this phase
                continue

            num_vars = len(phase_records[name].variables)
            plane_grad = phase_records[name].plane_grad
            plane_hess = phase_records[name].plane_hess
            statevar_grid = np.meshgrid(*itertools.chain(indep_vals), sparse=True, indexing='xy')
            # TODO: A more sophisticated treatment of constraints
            num_constraints = len(indep_vals) + len(dbf.phases[name].sublattices)
            constraint_jac = np.zeros((num_constraints, num_vars))
            # Independent variables are always fixed (in this limited implementation)
            for idx in range(len(indep_vals)):
                constraint_jac[idx, idx] = 1
            # This is for site fraction balance constraints
            var_idx = len(indep_vals)
            for idx in range(len(dbf.phases[name].sublattices)):
                active_in_subl = set(dbf.phases[name].constituents[idx]).intersection(comps)
                constraint_jac[len(indep_vals) + idx,
                               var_idx:var_idx + len(active_in_subl)] = 1
                var_idx += len(active_in_subl)

            grad = phase_records[name].grad(*itertools.chain(statevar_grid, points.T))
            if grad.dtype == 'object':
                # Workaround a bug in zero gradient entries
                grad_zeros = np.zeros(points.T.shape[1:], dtype=np.float)
                for i in np.arange(grad.shape[0]):
                    if isinstance(grad[i], int):
                        grad[i] = grad_zeros
                grad = np.array(grad.tolist(), dtype=np.float)
            bcasts = np.broadcast_arrays(*itertools.chain(properties.MU.values.T, points.T))
            cast_grad = -plane_grad(*itertools.chain(bcasts, [0], [0]))
            cast_grad = cast_grad.T + grad.T
            grad = cast_grad
            grad.shape = grad.shape[:-1]  # Remove extraneous dimension
            # This Hessian is an approximation updated using the BFGS method
            # See Nocedal and Wright, ch.3, p. 198
            # Initialize as identity matrix
            hess = broadcast_to(np.eye(num_vars), grad.shape + (grad.shape[-1],)).copy()
            newton_iteration = 0
            while newton_iteration < MAX_NEWTON_ITERATIONS:
                e_matrix = np.linalg.inv(hess)
                dy_unconstrained = -np.einsum('...ij,...j->...i', e_matrix, grad)
                proj_matrix = np.dot(e_matrix, constraint_jac.T)
                inv_matrix = np.rollaxis(np.dot(constraint_jac, proj_matrix), 0, -1)
                inv_term = np.linalg.inv(inv_matrix)
                first_term = np.einsum('...ij,...jk->...ik', proj_matrix, inv_term)
                # Normally a term for the residual here
                # We only choose starting points which obey the constraints, so r = 0
                cons_summation = np.einsum('...i,...ji->...j', dy_unconstrained, constraint_jac)
                cons_correction = np.einsum('...ij,...j->...i', first_term, cons_summation)
                dy_constrained = dy_unconstrained - cons_correction
                # TODO: Support for adaptive changing independent variable steps
                new_direction = dy_constrained[..., len(indep_vals):]
                # Backtracking line search
                new_points = points + INITIAL_STEP_SIZE * new_direction
                alpha = np.full(new_points.shape[:-1], INITIAL_STEP_SIZE, dtype=np.float)
                negative_points = np.any(new_points < 0., axis=-1)
                while np.any(negative_points):
                    alpha[negative_points] *= 0.1
                    new_points = points + alpha[..., np.newaxis] * new_direction
                    negative_points = np.any(new_points < 0., axis=-1)
                # If we made "near" zero progress on any points, don't update the Hessian until
                # we've rebuilt the convex hull
                # Nocedal and Wright recommend against skipping Hessian updates
                # They recommend using a damped update approach, pp. 538-539 of their book
                # TODO: Check the projected gradient norm, not the step length
                if np.any(np.max(np.abs(alpha[..., np.newaxis] * new_direction), axis=-1) < MIN_STEP_LENGTH):
                    break
                # Workaround for derivative issues at endmembers
                new_points[new_points == 0.] = 1e-16
                # BFGS update to Hessian
                new_grad = phase_records[name].grad(*itertools.chain(statevar_grid, new_points.T))
                if new_grad.dtype == 'object':
                    # Workaround a bug in zero gradient entries
                    grad_zeros = np.zeros(new_points.T.shape[1:], dtype=np.float)
                    for i in np.arange(new_grad.shape[0]):
                        if isinstance(new_grad[i], int):
                            new_grad[i] = grad_zeros
                    new_grad = np.array(new_grad.tolist(), dtype=np.float)
                bcasts = np.broadcast_arrays(*itertools.chain(properties.MU.values.T, new_points.T))
                cast_grad = -plane_grad(*itertools.chain(bcasts, [0], [0]))
                cast_grad = cast_grad.T + new_grad.T
                new_grad = cast_grad
                new_grad.shape = new_grad.shape[:-1]  # Remove extraneous dimension
                # Notation used here consistent with Nocedal and Wright
                s_k = np.empty(points.shape[:-1] + (points.shape[-1] + len(indep_vals),))
                # Zero out independent variable changes for now
                s_k[..., :len(indep_vals)] = 0
                s_k[..., len(indep_vals):] = new_points - points
                y_k = new_grad - grad
                s_s_term = np.einsum('...j,...k->...jk', s_k, s_k)
                s_b_s_term = np.einsum('...i,...ij,...j', s_k, hess, s_k)
                y_y_y_s_term = np.einsum('...j,...k->...jk', y_k, y_k) / \
                    np.einsum('...i,...i', y_k, s_k)[..., np.newaxis, np.newaxis]
                update = np.einsum('...ij,...jk,...kl->...il', hess, s_s_term, hess) / \
                    s_b_s_term[..., np.newaxis, np.newaxis] + y_y_y_s_term
                hess = hess - update
                cast_hess = -plane_hess(*itertools.chain(bcasts, [0], [0])).T + hess
                hess = -cast_hess #TODO: Why does this fix things?
                # TODO: Verify that the chosen step lengths reduce the energy
                points = new_points
                grad = new_grad
                newton_iteration += 1
            new_points = new_points.reshape(new_points.shape[:len(indep_vals)] + (-1, new_points.shape[-1]))
            new_points = np.concatenate((current_site_fractions[..., :dof], new_points), axis=-2)
            points_dict[name] = new_points

        if verbose:
            print('Rebuilding grid', end=' ')
        grid = calculate(dbf, comps, active_phases, output='GM',
                         model=models, callables=callable_dict,
                         fake_points=True, points=points_dict, **grid_opts)
        if verbose:
            print('[{0} points, {1}]'.format(len(grid.points), sizeof_fmt(grid.nbytes)), end='\n')
        properties.attrs['iterations'] += 1

    # One last call to ensure 'properties' and 'grid' are consistent with one another
    lower_convex_hull(grid, properties)
    ravelled_X_view = grid['X'].values.view().reshape(-1, grid['X'].values.shape[-1])
    ravelled_Y_view = grid['Y'].values.view().reshape(-1, grid['Y'].values.shape[-1])
    ravelled_Phase_view = grid['Phase'].values.view().reshape(-1)
    # Copy final point values from the grid and drop the index array
    # For some reason direct construction doesn't work. We have to create empty and then assign.
    properties['X'] = xray.DataArray(np.empty_like(ravelled_X_view[properties['points'].values]),
                                     dims=properties['points'].dims + ('component',))
    properties['X'].values[...] = ravelled_X_view[properties['points'].values]
    properties['Y'] = xray.DataArray(np.empty_like(ravelled_Y_view[properties['points'].values]),
                                     dims=properties['points'].dims + ('internal_dof',))
    properties['Y'].values[...] = ravelled_Y_view[properties['points'].values]
    # TODO: What about invariant reactions? We should perform a final driving force calculation here.
    # We can handle that in the same post-processing step where we identify single-phase regions.
    properties['Phase'] = xray.DataArray(np.empty_like(ravelled_Phase_view[properties['points'].values]),
                                         dims=properties['points'].dims)
    properties['Phase'].values[...] = ravelled_Phase_view[properties['points'].values]
    del properties['points']
    return properties
示例#40
0
文件: example.py 项目: estel1/it6
ax = fig.add_subplot(121, projection='3d')
ax.plot_wireframe(X, Y, Z)

cc = fig.add_subplot(122)
cc.contour(X, Y, Z)

# экспортируются библиотеки символьных вычислений
from sympy import Function, hessian, pprint
from sympy.abc import x, y 

# задается функция в символьном виде
z = 3.5 * x*x - 6*x*y - y*y
# выводится значение матрицы Гессе и собственных чисел
# ремарка - полезно собственные числа найти аналитически,
# а потом проверить с помощью ПЭВМ
pprint(hessian(z, (x, y)))
pprint(hessian(z, (x, y)).eigenvals())

# собственные числа -5 and 10, таким образом alpha < 2/10 = 0.2
# значением можно поиграть, чтобы получить представление
# каким образом скорость обучения влияет на скорость сходимости
alpha = 0.01
# 20 итераций поиска
iterations = 20
# Вектор значений - мы хотим вывести на экран траекторию,
# поэтому двумерный массив [итерции, 2]
solution = np.zeros((iterations, 2))
x0 = np.array((1, 1)).reshape(2, 1)
# преобразуем начальное приближение в вектор столбец
solution[0, :] = x0.reshape(1, 2)
print("x0: " + str(solution[0, :]))
示例#41
0
 def hess_sympy(self):
     """Calculate hessian matrix using sympy."""
     symbols = [ sympy.symbols('x')]
     for p in self.pars:
         symbols.append(sympy.symbols(p.name))
     return sympy.hessian(sympy.simplify(self.expr),symbols[1:])
示例#42
0
import numpy as np
import sympy
from numpy.core.multiarray import ndarray
from scipy.sparse import csgraph
from scipy.sparse.csgraph import laplacian

# MAX_VALUE = 10
# MIN_VALUE = 1
# K = 10
delta = np.ones((10,2))
print("delta:", delta)

a = sympy.hessian(delta)

print("a:", a)
示例#43
0
 def hess_sympy(self):
     """Calculate hessian matrix using sympy."""
     symbols = [sympy.symbols('x')]
     for p in self.pars:
         symbols.append(sympy.symbols(p.name))
     return sympy.hessian(sympy.simplify(self.expr), symbols[1:])
示例#44
0
import sympy

x1, x2 = sympy.symbols('x1 x2')
f = 100 * (x2 - x1**2)**2 + (1 - x1)**2

df_dx1 = sympy.diff(f, x1)
df_dx2 = sympy.diff(f, x2)
H = sympy.hessian(f, (x1, x2))

xs = sympy.solve([df_dx1, df_dx2], [x1, x2])

H_xs = H.subs([(x1, xs[0][0]), (x2, xs[0][1])])
lambda_xs = H_xs.eigenvals()

count = 0
for i in lambda_xs.keys():
    if i.evalf() <= 0:
        count += 1

if count == 0:
    print 'Local minima'
elif count == len(lambda_xs.keys()):
    print 'Lacal maxima'
else:
    print 'Saddle point'
示例#45
0
def main():
    x, y = symbols('x y', real=True)
    f = (2 + cos(y)) * (a * cos(x) + b * sin(x)) + c * sin(y)
    H = hessian(f, (x, y))
    print(latex(H, fold_func_brackets=True,
                mat_str='pmatrix', mat_delim=''))
示例#46
0
# Sasha Nikiforov
# [NND] excersize E9.3

import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

ylist = np.linspace(-2.0, 2.0)
xlist = np.linspace(-2.0, 2.0)

X, Y = np.meshgrid(xlist, ylist)
Z = X*X + 2*Y*Y

fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax.plot_wireframe(X, Y, Z)

cc = fig.add_subplot(122)
cc.contour(X, Y, Z)

plt.grid()
#plt.show()

# i - find minimum along the line
from sympy import Function, hessian, pprint
from sympy.abc import x, y 

#f = Function('f')(x, y)
z = x*x + 2*y*y
pprint(hessian(z, (x, y)))
示例#47
0
#Symbolic solutions

#print("Symbolic: ")
#print(sp.solvers.solve(chi2_sym.jacobian(ys), ys))

#Numerical solutions

print("Numerical: ")

#Calculate using Newton
result = opt.minimize(chi2, [0, 0, 0.01], method='TNC', bounds=bounds[1])
print("Newton's method")
print(result.fun)
print(result.x.tolist())

#Calculate using jacobian
print("Newton's with Jacobian matrix")
jacobian = lambda x: lambdify(ys, chi2_sym.jacobian(ys), 'numpy')(*x)
hessian = lambda x: sp.hessian(ys, sp.hessian(chi2_sym, ys), 'numpy')(*x)
result = opt.minimize(chi2, [0, 0, 0.01],
                      method='TNC',
                      jac=jacobian,
                      #hess=hessian,
                      bounds=bounds[1])
print(result.fun)
print(result.x.tolist())



示例#48
0
#%% Define Function
f = x**4+y**2-x*y # function 2 from Stanford
#f = 4*x + 2*y - x**2 -3*y**2

f

df_dy = sy.diff(f,y)
df_dx = sy.diff(f,x)

df_dx
#%% Find critical points
cr  =sy.nonlinsolve([df_dx,df_dy],[x,y])
print('critical points',cr)
cr
#%% build hessian
e = sy.hessian(f,[x,y])
e

#%% Find eigenvalues for each of the critical points
for c in cr :
    xv = c[0]
    yv = c[1]
    print('Critical point : \n\tx : {} \n\ty : {}'.format(xv.evalf(),yv.evalf()))
    eigs = list(e.subs({x:xv,y:yv}).eigenvals().keys())
    if eigs[0] > 0 and eigs[1] > 0 :
        print('Concave up')
    elif eigs[0] < 0  and eigs[1] < 0 :
        print('Concave down')
    else :
        print('Saddle Point')
    print('Eigen Values : ',eigs)
示例#49
0
文件: helpers.py 项目: nstarman/gala
    def test_against_sympy(self):
        # compare Gala gradient and hessian to sympy values

        pot = self.potential
        Phi, v, p = pot.to_sympy()

        # Derive sympy gradient and hessian functions to evaluate:
        from scipy.special import gamma, gammainc

        def lowergamma(a, x):  # noqa
            # Differences between scipy and sympy lower gamma
            return gammainc(a, x) * gamma(a)

        modules = [
            'numpy', {
                'atan': np.arctan,
                'lowergamma': lowergamma,
                'gamma': gamma
            }
        ]

        e_func = sy.lambdify(list(p.values()) + list(v.values()),
                             Phi,
                             modules=modules)

        grad = sy.derive_by_array(Phi, list(v.values()))
        grad_func = sy.lambdify(list(p.values()) + list(v.values()),
                                grad,
                                modules=modules)

        Hess = sy.hessian(Phi, list(v.values()))
        Hess_func = sy.lambdify(list(p.values()) + list(v.values()),
                                Hess,
                                modules=modules)

        # Make a dict of potential parameter values without units:
        par_vals = {}
        for k, v in pot.parameters.items():
            par_vals[k] = v.value

        N = 64  # MAGIC NUMBER:
        trial_x = self.rnd.uniform(-10., 10., size=(pot.ndim, N))
        x_dict = {k: v for k, v in zip(['x', 'y', 'z'], trial_x)}

        f_gala = pot.energy(trial_x).value
        f_sympy = e_func(G=pot.G, **par_vals, **x_dict)
        e_close = np.allclose(f_gala, f_sympy)

        G_gala = pot.gradient(trial_x).value
        G_sympy = grad_func(G=pot.G, **par_vals, **x_dict)
        g_close = np.allclose(G_gala, G_sympy)

        H_gala = pot.hessian(trial_x).value
        H_sympy = Hess_func(G=pot.G, **par_vals, **x_dict)
        h_close = np.allclose(H_gala, H_sympy)

        if not all([e_close, g_close, h_close]):
            print(f'{pot}: energy {e_close}, gradient {g_close}, '
                  f'hessian {h_close}')

        assert all([e_close, g_close, h_close])