Пример #1
0
	def __init__(self,name,syms,expression):

		# info
		self.name = name
		self.sizes = (syms[0].numel(),expression.numel(),sum([s.numel() for s in syms[1:]]))

		# use a single parameter for the expression (because julia sucks sometimes)
		self.param_sym = oc.MX.sym("P",self.sizes[2])
		self.param_names = [s.name() for s in syms[1:]]
		expression_ = cs.substitute(expression,cs.vcat(syms[1:]),self.param_sym)

		# expression evaluation
		self.eval = cs.Function('eval',[syms[0],self.param_sym],[expression_]).expand()

		# collect the sx version of the symbols
		sx_syms = self.eval.sx_in()
		self.main_sym = sx_syms[0]

		# expression jacobian
		jacobian = cs.jacobian(expression_,syms[0])
		self.eval_jac = cs.Function('eval_jac',[syms[0],self.param_sym],[jacobian]).expand()

		# hessian of each of the elements of the expression
		split_eval = [cs.Function('eval',[syms[0],self.param_sym],[expression_[i]]).expand() for i in range(expression_.shape[0]) ]
		hessian = [cs.hessian(split_eval[i](*sx_syms),sx_syms[0])[0]  for i in range(expression_.shape[0])]
		self.eval_hes = [cs.Function('eval_hes'+str(i),sx_syms,[hessian[i]]).expand() for i in range(expression.shape[0])]



		# location of the compiled library
		self.lib_path = None
Пример #2
0
    def __construct_sensitivities(self):
        """ Construct NLP sensitivities
        """

        # convenience
        w = self.__w
        p = self.__p

        # cost function
        self.__f_fun = ca.Function('f_fun', [w, p], [self.__f])
        self.__jacf_fun = ca.Function('jacf_fun', [w, p],
                                      [ca.jacobian(self.__f, self.__w)])

        # constraints
        self.__g_fun = ca.Function('g_fun', [w, p], [self.__g])
        self.__jacg_fun = ca.Function('jacg_fun', [w, p],
                                      [ca.jacobian(self.__g, self.__w)])
        self.__gzeros = np.zeros((self.__g.shape[0], 1))

        # exact hessian
        lam_g = ca.MX.sym('lam_g', self.__g.shape)
        lag = self.__f + ct.mtimes(lam_g.T, self.__g)
        self.__jlag_fun = ca.Function('jLag', [w, p, lam_g],
                                      [ca.jacobian(lag, w)])

        if self.__options['hessian_approximation'] == 'exact':
            self.__H_fun = ca.Function('H_fun', [w, p, lam_g],
                                       [ca.hessian(lag, w)[0]])
        else:
            self.__H_fun = self.__options['hessian_approximation']
Пример #3
0
def smooth_Lagrange_poly(x, y):
    t = ca.SX.sym('t')
    d = len(x)  # amount of parameters
    tau = ca.SX.sym('tau', d)  # parameter as minimisation variable
    poly = 0

    for j in range(d):  # for all data points ...
        L = tau[j]
        for r in range(d):
            if r != j:
                L *= (t - x[r]) / (x[j] - x[r])
        poly += L
    L_fun = ca.Function('L_fun', [t, tau], [poly])
    ddL, _ = ca.hessian(poly, t)
    ddL_fun = ca.Function('ddL_fun', [t, tau], [ddL])
    # ddL_fun = L_fun.hessian(0)          # second order derivative to
    # [ddL,_,_]  = ddL_fun([t,tau])

    # minimise tau = fct output, incl penalize curvature
    res = 0.1 * sum([(L_fun(x[k], tau) - y[k])**2 for k in range(d)])[0]
    res += sum([ddL_fun(x[k], tau)[0]**2 * 1e4 for k in range(d)])[0]

    Cost = ca.Function('cost', [tau], [res])
    nlp = {'x': tau, 'f': res}
    solver = ca.nlpsol("solver", "ipopt", nlp)
    sol = solver(**{})
    tau_opt = sol['x']  # optimal parameter for polynomial
    return L_fun, tau_opt
Пример #4
0
    def __init__(self, ocpVars, lbw, ubw, ocpParams, ocpCosts, ocpConstr, NX, NU, M, w0, Q, R, solvOpts={}):
        self.var = ocpVars
        self.lbw = lbw
        self.ubw = ubw
        self.params = ocpParams
        self.costs = sum(ocpCosts)
        self.g = ca.vertcat(*[c[0] for c in ocpConstr])
        self.lbg = ca.vertcat(*[c[1] for c in ocpConstr])
        self.ubg = ca.vertcat(*[c[2] for c in ocpConstr])
        self.w0 = w0
        self.lagrMulConstr = np.zeros((self.g.numel(),))
        self.lagrMulOptVars = np.zeros((self.var.cat.numel(),))

        self.NX = NX
        self.NU = NU
        self.M = M

        # symbolic variables
        w = ca.vertcat(self.var['x'][:], self.var['u']
                       [:])  # primal decision variables
        # constraint lagrange multipliers (lambda + mu, i.e. dual decision variables)
        lagrMult = ca.SX.sym('lagrMult', self.g.size())
        wGuess = ca.SX.sym('wGuess', w.shape)  # guess / linearization point

        # single weight matrix for all decision variables
        W = np.diag(np.concatenate([np.tile(Q, M+1), np.tile(R, M)]))

        L = self.costs + lagrMult.T @ self.g
        B = ca.Function('B', [w], [ca.hessian(L,w)[0]])(wGuess)

        wErr = w - wGuess
        # reference as QP parameters
        wRef = ca.vertcat(self.params['x_ref'][:], self.params['u_ref'][:])

        #B = W  # gauss-newton hessian approximation
        J = W @ (wGuess - wRef)

        fqp = 1./2. * wErr.T @ B @ wErr + J.T @ wErr
        gqp = linearize(self.g, w, wGuess)
        pqp = ca.vertcat(self.params.cat, wGuess, lagrMult)

        # assemble QP
        if not solvOpts:
            solvOpts = {'jit': False, 'print_time': 0, 'printLevel': 'low',
                        'sparse': True, 'enableEqualities': True}
            #solvOpts = {'jit' : True, 'print_time' : 0, 'printLevel' : 'high', 'sparse' : True}

        qp = {'x': self.var, 'f': fqp, 'g': gqp, 'p': pqp}
        self.solver = ca.qpsol('S', 'qpoases', qp, solvOpts)

        self.solverResults = []
        self.durations = []
Пример #5
0
    def calculate_optimal_control(self):
        dd_h_dudu, d_h_du = hessian(self.problem.H, self.model.u)
        if is_equal(dd_h_dudu, DM.zeros(self.model.n_u, self.model.n_u)):
            # TODO: Implement the case where the controls are linear on the Hamiltonian ("Bang-Bang" control)
            raise Exception(
                'The Hamiltonian "H" is not strictly convex with respect to the control "u". '
                + 'The obtained hessian d^2 H/du^2 = 0')
        # if not ddH_dudu.is_constant():
        #     raise NotImplementedError('The Hessian of the Hamiltonian with respect to "u" is not constant,
        #                                this case has not been implemented')

        u_opt = -mtimes(inv(dd_h_dudu), substitute(d_h_du, self.model.u, 0))

        for i in range(self.model.n_u):
            if not self.problem.u_min[i] == -inf:
                u_opt[i] = fmax(u_opt[i], self.problem.u_min[i])

            if not self.problem.u_max[i] == inf:
                u_opt[i] = fmin(u_opt[i], self.problem.u_max[i])
        return u_opt
Пример #6
0
    def nllh_casf(self, grad=True, hess=False):
        """
        Creates a casadi function computing the negative log likelihood (without const terms) of the data
        dependent on hyperparameters v.
        :param grad: Whether the function should compute the gradient, too
        :param hess: Whether the function should compute the hessian, too
        :return: A casadi function taking a value of v as input and returning the neg log likelihood, gradient,
                 and hessian is desired
        """
        vshape = np.atleast_2d(self.v).shape
        v = cas.MX.sym("v", vshape[0], vshape[1])
        phi_x = self.phi_cas(self.x, v)

        sinv = self.sinv0 + self.beta * cas.mtimes(phi_x.T, phi_x)
        mean = cas.solve(
            sinv,
            cas.mtimes(self.sinv0, self.mean0) +
            self.beta * cas.mtimes(phi_x.T, self.y))

        y_pred = cas.mtimes(mean.T, phi_x.T).T
        sigma = 1 / self.beta + cas.sum2(phi_x * cas.solve(sinv, phi_x.T).T)

        y_true = self.y
        y_diff = y_true - y_pred
        llht = cas.sum2(y_diff * y_diff) / sigma
        llh = cas.sum1(llht)

        if hess:
            H, llh_grad = cas.hessian(llh, v)
        elif grad:
            llh_grad = cas.gradient(llh, v)

        res = [llh]
        if grad:
            res += [llh_grad]
        if hess:
            res += [H]

        f = cas.Function("f_mu", [v], res)
        return f
Пример #7
0
    def __init__(self,name,syms,expression,type='auto'):

        # info
        self.name = name
        self.sizes = (syms[0].numel(),expression.numel(),sum([s.numel() for s in syms[1:]]))
        self.main_sym = syms[0]

        # use a single parameter for the expression (because julia sucks sometimes)
        self.param_sym = oc.MX.sym("P",self.sizes[2])
        self.param_names = [s.name() for s in syms[1:]]
        expression_ = cs.substitute(expression,cs.vcat(syms[1:]),self.param_sym)

        # expression evaluation
        self.eval = cs.Function('eval',[self.main_sym,self.param_sym],[expression_])

        # expression jacobian
        jacobian = cs.jacobian(expression_,self.main_sym)
        self.jcb_nnz = jacobian.nnz()
        self.jcb_sparsity = jacobian.sparsity().get_triplet()
        self.eval_jac = cs.Function('eval_jac',[self.main_sym,self.param_sym],[jacobian])

        # hessian of each of the elements of the expression
        hessian = [cs.hessian(expression_[i],self.main_sym)[0] for i in range(expression_.shape[0])]
        self.hes_nnz = [hessian[i].nnz() for i in range(expression_.shape[0])]
        self.hes_sparsity = [hessian[i].sparsity().get_triplet() for i in range(expression_.shape[0])]
        self.eval_hes = [cs.Function('eval_hes'+str(i),[self.main_sym,self.param_sym],[hessian[i]]) for i in range(expression.shape[0])]

        # check type
        self.type = 'Linear'
        for n in self.hes_nnz:
            if n > 0:
                self.type = 'Quadratic'
                break

        if self.type == 'Quadratic' and self.type not in ['auto','Auto','Quadratic','quadratic']:
            raise NameError('LinQuadForJulia: the function is declared '+type+' but it results Quadratic.')
        if self.type == 'Linear' and self.type not in ['auto','Auto','Linear','linear']:
            raise NameError('LinQuadForJulia: the function is declared '+type+' but it results Linear.')
Пример #8
0
# 适用于矩阵或稀疏模式
# y = ca.SX.sym('y',10,1)
# print(y.shape)

# .size1() => 行数
# .size2() => 列数
# .numel() => 元素个数 == .size1() * .size2()
# .sparisity() => 稀疏模式
'''
    Part 7: 微分计算
'''
# 微分计算可以从前向和反向分别计算,分别对应得到jacobian vector和jacobian-transposed vector

#1. jacobian
A = ca.SX.sym('A', 3, 2)
x = ca.SX.sym('x', 2)
print('A:', A, ' x: ', x, 'Ax: ', ca.mtimes(A, x))
print('J:', ca.jacobian(ca.mtimes(A, x), x))

print(ca.dot(A, A))
print(ca.gradient(ca.dot(A, A), A))

[H, g] = ca.hessian(ca.dot(x, x), x)  #hessian()会同时返回梯度和hessian矩阵
print('H:', H)
print('g:', g)

v = ca.SX.sym('v', 2)
f = ca.mtimes(A, x)

print(ca.jtimes(f, x, v))  # jtimes = jacobian function * vector
def generate_c_code_external_cost(model, is_terminal):

    casadi_version = CasadiMeta.version()
    casadi_opts = dict(mex=False, casadi_int="int", casadi_real="double")

    if casadi_version not in (ALLOWED_CASADI_VERSIONS):
        casadi_version_warning(casadi_version)

    x = model.x
    p = model.p

    if isinstance(x, MX):
        symbol = MX.sym
    else:
        symbol = SX.sym

    if is_terminal:
        suffix_name = "_cost_ext_cost_e_fun"
        suffix_name_hess = "_cost_ext_cost_e_fun_jac_hess"
        suffix_name_jac = "_cost_ext_cost_e_fun_jac"
        u = symbol("u", 0, 0)
        ext_cost = model.cost_expr_ext_cost_e

    else:
        suffix_name = "_cost_ext_cost_fun"
        suffix_name_hess = "_cost_ext_cost_fun_jac_hess"
        suffix_name_jac = "_cost_ext_cost_fun_jac"
        u = model.u
        ext_cost = model.cost_expr_ext_cost


    # set up functions to be exported
    fun_name = model.name + suffix_name
    fun_name_hess = model.name + suffix_name_hess
    fun_name_jac = model.name + suffix_name_jac

    # generate expression for full gradient and Hessian
    full_hess, grad = hessian(ext_cost, vertcat(u, x))

    ext_cost_fun = Function(fun_name, [x, u, p], [ext_cost])
    ext_cost_fun_jac_hess = Function(
        fun_name_hess, [x, u, p], [ext_cost, grad, full_hess]
    )
    ext_cost_fun_jac = Function(
        fun_name_jac, [x, u, p], [ext_cost, grad]
    )

    # generate C code
    if not os.path.exists("c_generated_code"):
        os.mkdir("c_generated_code")

    os.chdir("c_generated_code")
    gen_dir = model.name + '_cost'
    if not os.path.exists(gen_dir):
        os.mkdir(gen_dir)
    gen_dir_location = "./" + gen_dir
    os.chdir(gen_dir_location)

    ext_cost_fun.generate(fun_name, casadi_opts)
    ext_cost_fun_jac_hess.generate(fun_name_hess, casadi_opts)
    ext_cost_fun_jac.generate(fun_name_jac, casadi_opts)

    os.chdir("../..")
    return
Пример #10
0
    def __init__(self,
                 x,
                 u,
                 lc_,
                 lcN_,
                 g_,
                 gN_,
                 fc_,
                 T,
                 M,
                 N,
                 tau,
                 print_level=1,
                 pt=True):
        """
        Define an optimal control formulation

        Parameters
        ----------
        x    : CasADi MX
            CasADi symbolic variables representing the states
        u    : CasADi MX
            CasADi symbolic variables representing the inputs
        lc_  : CasADi expression 
            lc: R^{nx} x R^{nu} -> R (continuous-time Lagrange term)
        lcN_ : CasADi expression 
            lcN: R^{nx} -> R (Mayer term)
        g_   : CasADi expression 
            g: R^{n_x} x R^{n_u} -> R^{ng} (constraint function)
        gN_  : CasADi expression 
            gN: R^{nx} -> R^{ngN} (constraint function at t=T)
        fc_  : CasADi expression 
            fc: R^{nx} x R^{nu} -> R^{nx} (continuous time dynamics)
        T    : float 
            prediction horizon
        tau  : float 
            tightening factor
        print_level : int
            print level
        pt : bool
            partial tightening vs standard SQP (default = True)
        """

        self.pt = pt
        NX = x.shape[0]
        NU = u.shape[0]
        if not isinstance(g_, list):
            NG = g_.shape[0]
        else:
            NG = 0

        if not isinstance(gN_, list):
            NGN = gN_.shape[0]
        else:
            NGN = 0

        dims = OcpDims(NX, NU, NG, NGN, N, M)
        self.dims = dims

        self.tau = tau
        self.print_level = print_level

        # define CasADi functions
        lc = ca.Function('lc', [x, u], [lc_])
        lcN = ca.Function('lcN', [x], [lcN_])
        g = ca.Function('g', [x, u], [g_])
        gN = ca.Function('gN', [x], [gN_])
        fc = ca.Function('fc', [x, u], [fc_])

        self.lc = lc
        self.lcN = lcN
        self.g = g

        self.gN = gN
        self.fc = fc

        self.dims = dims
        NX = dims.nx
        NU = dims.nu
        NG = dims.ng
        NGN = dims.ngN
        N = dims.N
        M = dims.M

        Td = T / N

        # create integrator
        integrator = Integrator(x, u, fc_, Td)
        self.integrator = integrator

        # build OCP
        w = []
        w0 = []
        lbw = []
        ubw = []
        c = []
        lbc = []
        ubc = []
        Xk = ca.MX.sym('X0', NX, 1)
        w += [Xk]
        c += [Xk]
        lbc += [np.zeros((NX, 1))]
        ubc += [np.zeros((NX, 1))]
        lbw += [-np.inf * np.ones((NX, 1))]
        ubw += [+np.inf * np.ones((NX, 1))]
        w0 += [np.zeros((NX, 1))]
        f = 0

        # formulate the NLP
        for k in range(M):

            # new NLP variable for the control
            Uk = ca.MX.sym('U_' + str(k), NU, 1)

            # update variable list
            w += [Uk]
            lbw += [-np.inf * np.ones((NU, 1))]
            ubw += [np.inf * np.ones((NU, 1))]
            w0 += [np.zeros((NU, 1))]

            # add cost contribution
            f = f + Td * lc(Xk, Uk)

            # add constraints
            c += [g(Xk, Uk)]
            lbc += [-np.inf * np.ones((NG, 1))]
            ubc += [np.zeros((NG, 1))]

            # integrate till the end of the interval
            Xk_end = integrator.xplus(Xk, Uk)

            # new NLP variable for state at end of interval
            Xk = ca.MX.sym('X_' + str(k + 1), NX, 1)
            w += [Xk]
            lbw += [-np.inf * np.ones((NX, 1))]
            ubw += [np.inf * np.ones((NX, 1))]
            w0 += [np.zeros((NX, 1))]

            # add equality constraint
            c += [Xk_end - Xk]
            lbc += [np.zeros((NX, 1))]
            ubc += [np.zeros((NX, 1))]

        for k in range(M, N):

            # new NLP variable for the control
            Uk = ca.MX.sym('U_' + str(k), NU, 1)

            # compute barrier term
            barr_term = 0
            for i in range(NG):
                barr_term = barr_term - tau * np.log(-g(Xk, Uk)[i])

            f = f + Td * lc(Xk, Uk) + barr_term

            w += [Uk]
            lbw += [-np.inf * np.ones((NU, 1))]
            ubw += [np.inf * np.ones((NU, 1))]
            w0 += [np.zeros((NU, 1))]

            # integrate till the end of the interval
            Xk_end = integrator.xplus(Xk, Uk)

            # new NLP variable for state at end of interval
            Xk = ca.MX.sym('X_' + str(k + 1), NX, 1)
            w += [Xk]
            lbw += [-np.inf * np.ones((NX, 1))]
            ubw += [np.inf * np.ones((NX, 1))]
            w0 += [np.zeros((NX, 1))]

            # add equality constraint
            c += [Xk_end - Xk]
            lbc += [np.zeros((NX, 1))]
            ubc += [np.zeros((NX, 1))]

        if M <= N:
            # compute barrier term
            barr_term = 0
            for i in range(NGN):
                barr_term = barr_term + -tau * np.log(-gN(Xk)[i])

            f = f + lcN(Xk) + barr_term
        else:
            f = f + lcN(Xk)

            # add constraints
            c += [gN(Xk)]
            lbc += [np.zeros((NGN, 1))]
            ubc += [np.zeros((NGN, 1))]

        c = ca.vertcat(*c)
        w = ca.vertcat(*w)

        # convert lists to numpy arrays
        lbw_a = np.vstack(lbw)

        self._lbw = np.vstack(lbw)
        self._ubw = np.vstack(ubw)

        self._lbc = np.vstack(lbc)
        self._ubc = np.vstack(ubc)

        self._w0 = np.vstack(w0)

        # create an NLP solver
        prob = {'f': f, 'x': w, 'g': c}
        # opts = {'ipopt': {'print_level': 2}}
        opts = {}
        self.nlp_solver = ca.nlpsol('solver', 'ipopt', prob, opts)

        #----------------------------------------------------------------------
        #                       partially tightened RTI
        #----------------------------------------------------------------------

        # define CasADi functions for linearization

        # dynamics
        jac_x_f = ca.Function('jac_x_f', [integrator.x, integrator.u], \
            [ca.jacobian(integrator.xplus_expr, integrator.x)])
        self.jac_x_f = jac_x_f

        jac_u_f = ca.Function('jac_u_f', [integrator.x, integrator.u], \
            [ca.jacobian(integrator.xplus_expr, integrator.u)])
        self.jac_u_f = jac_u_f

        # cost
        jac_x_l = ca.Function('jac_x_l', [x, u], \
            [ca.jacobian(Td*lc_, x)])
        self.jac_x_l = jac_x_l

        jac_u_l = ca.Function('jac_u_l', [x, u], \
            [ca.jacobian(Td*lc_, u)])
        self.jac_u_l = jac_u_l

        jac_xx_l = ca.Function('jac_xx_l', [x, u], \
            [ca.hessian(Td*lc_, x)[0]])
        self.jac_xx_l = jac_xx_l

        jac_uu_l = ca.Function('jac_uu_l', [x, u], \
            [ca.hessian(Td*lc_, u)[0]])
        self.jac_uu_l = jac_uu_l

        jac_xu_l = ca.Function('jac_xu_l', [x, u], \
            [ca.jacobian(ca.jacobian(Td*lc_, x), u)])
        self.jac_xu_l = jac_xu_l

        jac_xx_lN = ca.Function('jac_xx_lN', [x], \
            [ca.hessian(lcN_, x)[0]])
        self.jac_xx_lN = jac_xx_lN

        jac_x_lN = ca.Function('jac_x_lN', [x], \
            [ca.jacobian(lcN_, x)])
        self.jac_x_lN = jac_x_lN

        # constraints
        jac_x_g = ca.Function('jac_x_g', [x, u], \
            [ca.jacobian(g_, x)])
        self.jac_x_g = jac_x_g

        jac_u_g = ca.Function('jac_u_g', [x, u], \
            [ca.jacobian(g_, u)])
        self.jac_u_g = jac_u_g

        jac_x_gN = ca.Function('jac_x_gN', [x], \
            [ca.jacobian(gN_, x)])
        self.jac_x_gN = jac_x_gN

        # these are the primal-dual iterates of the partially tightened RTI
        self.x = []
        self.u = []
        self.lam = []
        self.t = []
        self.nu = []

        t_init = 1.00
        nu_init = 1.00

        if self.pt:
            for i in range(M):
                self.x.append(np.zeros((NX, 1)))
                self.u.append(np.zeros((NU, 1)))
                self.lam.append(np.zeros((NX, 1)))
                self.t.append(np.zeros((NG, 1)))
                self.nu.append(np.zeros((NG, 1)))

            for i in range(M, N):
                self.x.append(np.zeros((NX, 1)))
                self.u.append(np.zeros((NU, 1)))
                self.lam.append(np.zeros((NX, 1)))
                self.t.append(t_init * np.ones((NG, 1)))
                self.nu.append(nu_init * np.ones((NG, 1)))

            self.x.append(np.zeros((NX, 1)))
            self.lam.append(np.zeros((NX, 1)))
            self.t.append(t_init * np.ones((NGN, 1)))
            self.nu.append(nu_init * np.ones((NGN, 1)))
        else:
            for i in range(N):
                self.x.append(np.zeros((NX, 1)))
                self.u.append(np.zeros((NU, 1)))
                self.lam.append(np.zeros((NX, 1)))
                self.t.append(np.zeros((NG, 1)))
                self.nu.append(np.zeros((NG, 1)))
            self.x.append(np.zeros((NX, 1)))
            self.lam.append(np.zeros((NX, 1)))
            self.t.append(np.zeros((NGN, 1)))
            self.nu.append(np.zeros((NGN, 1)))

        # these are the variables associated with the linearized problem
        # - matrices
        self.A = []
        self.B = []
        self.C = []
        self.D = []
        self.Hxx = []
        self.Huu = []
        self.Hxu = []
        self.Hxx_t = []
        self.Huu_t = []
        self.Hxu_t = []

        for i in range(N):
            self.A.append(np.zeros((NX, NX)))
            self.B.append(np.zeros((NX, NU)))
            self.C.append(np.zeros((NG, NX)))
            self.D.append(np.zeros((NG, NU)))
            self.Hxx.append(np.zeros((NX, NX)))
            self.Huu.append(np.zeros((NU, NU)))
            self.Hxu.append(np.zeros((NU, NX)))
            self.Hxx_t.append(np.zeros((NX, NX)))
            self.Huu_t.append(np.zeros((NU, NU)))
            self.Hxu_t.append(np.zeros((NU, NX)))

        self.C.append(np.zeros((NGN, NX)))
        self.D.append(np.zeros((NGN, NU)))
        self.Hxx.append(np.zeros((NX, NX)))
        self.Hxx_t.append(np.zeros((NX, NX)))

        # - vectors (residuals)
        self.r_lam = []
        self.r_x = []
        self.r_x_t = []
        self.r_u = []
        self.r_u_t = []
        self.r_nu = []
        self.e = []

        for i in range(N):
            self.r_lam.append(np.zeros((NX, 1)))
            self.r_x.append(np.zeros((NX, 1)))
            self.r_x_t.append(np.zeros((NX, 1)))
            self.r_u.append(np.zeros((NU, 1)))
            self.r_u_t.append(np.zeros((NU, 1)))
            self.r_nu.append(np.zeros((NG, 1)))
            self.e.append(np.zeros((NG, 1)))

        self.r_lam.append(np.zeros((NX, 1)))
        self.r_x.append(np.zeros((NX, 1)))
        self.r_x_t.append(np.zeros((NX, 1)))
        self.r_u.append(np.zeros((NU, 1)))
        self.r_nu.append(np.zeros((NG, 1)))
        self.e.append(np.zeros((NG, 1)))

        # - vectors (QP residuals)
        self.r_lam_qp = []
        self.r_x_qp = []
        self.r_u_qp = []
        self.r_nu_qp = []
        self.e_qp = []

        for i in range(N):
            self.r_lam_qp.append(np.zeros((NX, 1)))
            self.r_x_qp.append(np.zeros((NX, 1)))
            self.r_u_qp.append(np.zeros((NU, 1)))
            self.r_nu_qp.append(np.zeros((NG, 1)))
            self.e_qp.append(np.zeros((NG, 1)))

        self.r_lam_qp.append(np.zeros((NX, 1)))
        self.r_x_qp.append(np.zeros((NX, 1)))
        self.r_u_qp.append(np.zeros((NU, 1)))
        self.r_nu_qp.append(np.zeros((NG, 1)))
        self.e_qp.append(np.zeros((NG, 1)))

        # these are the variables associated with the Riccati recursion
        self.P = []
        self.p = []

        self.r_u_t_back = []
        self.r_lam_back = []

        for i in range(N + 1):
            self.p.append(np.zeros((NX, 1)))
            self.P.append(np.zeros((NX, NX)))

        self.du = []
        self.dx = []
        self.dlam = []
        self.dnu = []
        self.dt = []

        for i in range(N):
            self.du.append(np.zeros((NU, 1)))
            self.dx.append(np.zeros((NX, 1)))
            self.dt.append(np.zeros((NG, 1)))
            self.dlam.append(np.zeros((NX, 1)))
            self.dnu.append(np.zeros((NG, 1)))

        self.dlam.append(np.zeros((NX, 1)))
        self.dx.append(np.zeros((NX, 1)))
        self.dt.append(np.zeros((NGN, 1)))
        self.dnu.append(np.zeros((NGN, 1)))

        self.x0 = np.zeros((NX, 1))

        if M > 0:
            # construct reduced QP
            dW = []
            dLam = []
            dX = []
            dU = []
            dNu = []
            dT = []
            dW0 = []
            p = []
            Lam_lin = []
            X_lin = []
            U_lin = []
            Nu_lin = []
            T_lin = []

            # first parameter is initial state
            X_par = ca.MX.sym('X_par', NX, 1)

            # define variables
            for i in range(M):
                dLamk = ca.MX.sym('dLam_' + str(i), NX, 1)
                dLam += [dLamk]
                dW += [dLamk]

                Lamk_lin = ca.MX.sym('Lam_' + str(i) + '_lin', NX, 1)
                Lam_lin += [Lamk_lin]

                dW0 += [np.zeros((NX, 1))]

                dXk = ca.MX.sym('dX_' + str(i), NX, 1)
                dX += [dXk]
                dW += [dXk]

                Xk_lin = ca.MX.sym('X_' + str(i) + '_lin', NX, 1)
                X_lin += [Xk_lin]

                dW0 += [np.zeros((NX, 1))]

                dUk = ca.MX.sym('dU_' + str(i), NU, 1)
                dU += [dUk]
                dW += [dUk]

                Uk_lin = ca.MX.sym('U_' + str(i) + '_lin', NU, 1)
                U_lin += [Uk_lin]

                dW0 += [np.zeros((NU, 1))]

                dNuk = ca.MX.sym('dNu_' + str(i), NG, 1)
                dNu += [dNuk]
                dW += [dNuk]

                Nuk_lin = ca.MX.sym('dNu_' + str(i) + '_lin', NG, 1)
                Nu_lin += [Nuk_lin]
                dW0 += [np.ones((NG, 1))]

                dTk = ca.MX.sym('dT_' + str(i), NG, 1)
                dT += [dTk]
                dW += [dTk]

                Tk_lin = ca.MX.sym('T_' + str(i) + '_lin', NG, 1)
                T_lin += [Tk_lin]
                dW0 += [np.ones((NG, 1))]

            i = M
            dLamk = ca.MX.sym('dLam_' + str(i), NX, 1)
            dLam += [dLamk]
            dW += [dLamk]

            Lamk_lin = ca.MX.sym('Lam_' + str(i) + '_lin', NX, 1)
            Lam_lin += [Lamk_lin]

            dW0 += [np.zeros((NX, 1))]

            dXk = ca.MX.sym('dX_' + str(i), NX, 1)
            dX += [dXk]
            dW += [dXk]

            Xk_lin = ca.MX.sym('X_' + str(i) + '_lin', NX, 1)
            X_lin += [Xk_lin]

            dW0 += [np.zeros((NX, 1))]

            if NGN > 0:
                dNuk = ca.MX.sym('dNu_' + str(i), NGN, 1)
                dNu += [dNuk]
                dW += [dNuk]

                Nuk_lin = ca.MX.sym('dNu_' + str(i) + '_lin', NGN, 1)
                Nu_lin += [Nuk_lin]
                dW0 += [np.ones((NG, 1))]

                dTk = ca.MX.sym('dT_' + str(i), NGN, 1)
                dT += [dTk]
                dW += [dTk]

                Tk_lin = ca.MX.sym('T_' + str(i) + '_lin', NGN, 1)
                T_lin += [Tk_lin]
                dW0 += [np.ones((NG, 1))]

            # form parameter vector
            p += [X_par]
            p += Lam_lin[:]
            p += X_lin[:]
            p += U_lin[:]
            p += Nu_lin[:]
            p += T_lin[:]

            # last parameters correspond to p_M and P_M
            P_M_flat = ca.MX.sym('P', NX * NX, 1)
            p += [P_M_flat]
            p_M = ca.MX.sym('p_M', NX, 1)
            p += [p_M]

            # empty QP
            c_qp = []
            lbc_qp = []
            ubc_qp = []
            f_qp = 0

            # formulate the reduced QP
            k = 0
            dXk = dX[k]

            r_lam_0 = -X_lin[0] + X_par
            c_qp += [-dXk + r_lam_0]
            lbc_qp += [np.zeros((NX, 1))]
            ubc_qp += [np.zeros((NX, 1))]

            dUk = dU[k]
            dXk = dX[k]

            Lamk_lin = Lam_lin[k]
            Lamk_lin_next = Lam_lin[k + 1]
            Xk_lin = X_lin[k]
            Uk_lin = U_lin[k]
            Nuk_lin = Nu_lin[k]
            Tk_lin = T_lin[k]

            # compute residuals
            nabla_x_l = ca.transpose(jac_x_l(Xk_lin, Uk_lin))
            nabla_u_l = ca.transpose(jac_u_l(Xk_lin, Uk_lin))
            nabla_x_f = ca.transpose(jac_x_f(Xk_lin, Uk_lin))
            nabla_u_f = ca.transpose(jac_u_f(Xk_lin, Uk_lin))
            nabla_x_g = ca.transpose(jac_x_g(Xk_lin, Uk_lin))
            nabla_u_g = ca.transpose(jac_u_g(Xk_lin, Uk_lin))

            r_x_k = nabla_x_l + ca.mtimes(nabla_x_f, Lamk_lin_next) + \
                - Lamk_lin + ca.mtimes(nabla_x_g, Nuk_lin)

            r_u_k = nabla_u_l + ca.mtimes(nabla_u_f, Lamk_lin_next) + \
                ca.mtimes(nabla_u_g, Nuk_lin)

            r_nu_k = g(Xk_lin, Uk_lin) + Tk_lin

            # compute Hessian approximation
            Huu = jac_uu_l(Xk_lin, Uk_lin)
            Hxx = jac_xx_l(Xk_lin, Uk_lin)
            Hxu = jac_xu_l(Xk_lin, Uk_lin)

            Hk = ca.vertcat(ca.horzcat(Hxx, Hxu, r_x_k), \
                    ca.horzcat(Hxu.T, Huu, r_u_k), \
                    ca.horzcat(ca.transpose(r_x_k), ca.transpose(r_u_k), 1))

            # add cost contribution
            f_qp = f_qp + ca.mtimes(ca.vertcat(dXk, dUk, 1).T, ca.mtimes(Hk, \
                    ca.vertcat(ca.vertcat(dXk, dUk, 1))))

            # add inequality constraints
            c_qp += [ca.mtimes(ca.transpose(nabla_x_g), dXk) \
                + ca.mtimes(ca.transpose(nabla_u_g), dUk) + r_nu_k]

            lbc_qp += [-np.inf * np.ones((NG, 1))]
            ubc_qp += [np.zeros((NG, 1))]

            for k in range(1, M):

                dUk = dU[k]
                dUk_prev = dU[k - 1]
                dXk = dX[k]
                dXk_prev = dX[k - 1]

                Lamk_lin = Lam_lin[k]
                Lamk_lin_next = Lam_lin[k + 1]
                Xk_lin = X_lin[k]
                Xk_lin_prev = X_lin[k - 1]
                Uk_lin = U_lin[k]
                Uk_lin_prev = U_lin[k - 1]
                Nuk_lin = Nu_lin[k]
                Tk_lin = T_lin[k]

                # compute residuals
                nabla_x_l = ca.transpose(jac_x_l(Xk_lin, Uk_lin))
                nabla_u_l = ca.transpose(jac_u_l(Xk_lin, Uk_lin))
                nabla_x_f = ca.transpose(jac_x_f(Xk_lin, Uk_lin))
                nabla_u_f = ca.transpose(jac_u_f(Xk_lin, Uk_lin))
                nabla_x_g = ca.transpose(jac_x_g(Xk_lin, Uk_lin))
                nabla_u_g = ca.transpose(jac_u_g(Xk_lin, Uk_lin))
                nabla_x_f_prev = ca.transpose(jac_x_f(Xk_lin_prev,
                                                      Uk_lin_prev))
                nabla_u_f_prev = ca.transpose(jac_u_f(Xk_lin_prev,
                                                      Uk_lin_prev))

                r_lam_k = -Xk_lin + integrator.eval(Xk_lin_prev, Uk_lin_prev)

                r_x_k = nabla_x_l + ca.mtimes(nabla_x_f, Lamk_lin_next) \
                    - Lamk_lin + ca.mtimes(nabla_x_g, Nuk_lin)

                r_u_k = nabla_u_l + ca.mtimes(nabla_u_f, Lamk_lin_next) \
                    + ca.mtimes(nabla_u_g, Nuk_lin)

                r_nu_k = g(Xk_lin, Uk_lin) + Tk_lin

                # compute Hessian approximation
                Hxx = jac_uu_l(Xk_lin, Uk_lin)
                Hxx = jac_xx_l(Xk_lin, Uk_lin)
                Hxu = jac_xu_l(Xk_lin, Uk_lin)

                Hk = ca.vertcat(ca.horzcat(Hxx, Hxu, r_x_k), \
                        ca.horzcat(Hxu.T, Huu, r_u_k), \
                        ca.horzcat(ca.transpose(r_x_k), ca.transpose(r_u_k), 1))

                # add cost contribution
                f_qp = f_qp + ca.mtimes(ca.vertcat(dXk, dUk, 1).T, ca.mtimes(Hk, \
                        ca.vertcat(ca.vertcat(dXk, dUk, 1))))

                # add equality constraints
                c_qp += [-dXk + ca.mtimes(ca.transpose(nabla_x_f_prev), dXk_prev) \
                    + ca.mtimes(ca.transpose(nabla_u_f_prev), dUk_prev) + r_lam_k]

                lbc_qp += [np.zeros((NG, 1))]
                ubc_qp += [np.zeros((NG, 1))]

                # add inequality constraints
                c_qp += [ca.mtimes(ca.transpose(nabla_x_g), dXk) \
                    + ca.mtimes(ca.transpose(nabla_u_g), dUk) + r_nu_k]

                lbc_qp += [-np.inf * np.ones((NG, 1))]
                ubc_qp += [np.zeros((NG, 1))]

            k = M
            Xk_lin_prev = X_lin[k - 1]
            Uk_lin_prev = U_lin[k - 1]
            dXk = dX[k]
            dXk_prev = dX[k - 1]
            dUk_prev = dU[k - 1]
            Lamk_lin = Lam_lin[k]
            Xk_lin = X_lin[k]

            if M < N:
                dUk = dU[k - 1]

            nabla_x_f_prev = ca.transpose(jac_x_f(Xk_lin_prev, Uk_lin_prev))
            nabla_u_f_prev = ca.transpose(jac_u_f(Xk_lin_prev, Uk_lin_prev))

            # compute residuals
            if M == N:
                nabla_x_l = ca.transpose(jac_x_lN(Xk_lin))
            else:
                nabla_x_l = ca.transpose(jac_x_l(Xk_lin, Uk_lin))

            r_lam_k = -Xk_lin + integrator.eval(Xk_lin_prev, Uk_lin_prev)

            # compute Hessian approximation
            PM = ca.reshape(p[-2], NX, NX)
            pM = p[-1]

            HM = ca.vertcat(ca.horzcat(PM, pM), ca.horzcat(pM.T, 1))

            # add cost contribution
            f_qp = f_qp + ca.mtimes(ca.vertcat(dXk, 1).T, ca.mtimes(HM, \
                ca.vertcat(dXk, 1)))

            # add equality constraints
            c_qp += [-dXk + ca.mtimes(ca.transpose(nabla_x_f_prev), dXk_prev) \
                + ca.mtimes(ca.transpose(nabla_u_f_prev), dUk_prev) + r_lam_k]

            lbc_qp += [np.zeros((NG, 1))]
            ubc_qp += [np.zeros((NG, 1))]

            if M == N and NGN > 0:
                Tk_lin = T_lin[k]
                nabla_x_g = ca.transpose(jac_x_gN(Xk_lin))

                r_nu_k = gN(Xk_lin) + Tk_lin
                c_qp += [ca.mtimes(ca.transpose(nabla_g_x), dXk) + r_nu_k]

                lbc_qp += [-np.inf * np.ones((NGN, 1))]
                ubc_qp += [np.zeros((NGN, 1))]

            c_qp = ca.vertcat(*c_qp)
            dW = ca.vertcat(*dW)
            p = ca.vertcat(*p)

            # convert lists to numpy arrays
            self._lbc_qp = np.vstack(lbc_qp)
            self._ubc_qp = np.vstack(ubc_qp)

            self._w0 = np.vstack(w0)

            # create an NLP solver
            prob = {'f': f_qp, 'x': dW, 'g': c_qp, 'p': p}
            # opts = {'ipopt': {'print_level': 2}}
            opts = {}
            self.qp_solver = ca.nlpsol('solver', 'ipopt', prob, opts)
Пример #11
0
theta = model.x[1]

ocp.model.cost_expr_ext_cost = tanh(theta)**2 + .5 * (model.x[0]**2 +
                                                      W_u * model.u**2)
ocp.model.cost_expr_ext_cost_e = tanh(theta)**2 + .5 * model.x[0]**2

custom_hess_u = W_u

J = horzcat(SX.eye(2), SX(2, 2))

print(DM(J.sparsity()))

# diagonal matrix with second order terms of outer loss function.
D = SX.sym('D', Sparsity.diag(2))
D[0, 0] = 1
[hess_tan, grad_tan] = hessian(tanh(theta)**2, theta)
D[1, 1] = if_else(theta == 0, hess_tan, grad_tan / theta)

custom_hess_x = J.T @ D @ J

zeros = SX(1, nx)
cost_expr_ext_cost_custom_hess = blockcat(custom_hess_u, zeros, zeros.T,
                                          custom_hess_x)
cost_expr_ext_cost_custom_hess_e = custom_hess_x

ocp.model.cost_expr_ext_cost_custom_hess = cost_expr_ext_cost_custom_hess
ocp.model.cost_expr_ext_cost_custom_hess_e = cost_expr_ext_cost_custom_hess_e

# set constraints
Fmax = 35
ocp.constraints.lbu = np.array([-Fmax])
Пример #12
0
    def __construct_sensitivity_funcs(self):
        """ Construct functions for NLP sensitivity evaluations
        """

        # system variables
        x, u = self.__vars['x'], self.__vars['u']
        nx = x.shape[0]
        wk = ca.vertcat(x, u)
        if 'us' in self.__vars:
            us = self.__vars['us']
            uhat = ca.vertcat(u, us)
            wk = ca.vertcat(wk, us)
        else:
            uhat = u

        # dynamics sensitivities
        x_next = self.__F(x0=x, p=u)['xf']  # symbolic integrator evaluation

        self.__jac_Fx = ca.Function('jac_Fx', [x, u],
                                    [ca.jacobian(x_next, x)]).map(self.__N)
        self.__jac_Fu = ca.Function('jac_Fu', [x, u],
                                    [ca.jacobian(x_next, uhat)]).map(self.__N)

        self.__hess_F = []
        for i in range(nx):
            self.__hess_F.append(
                ca.Function('hess_F_' + str(i), [x, u],
                            [ca.hessian(x_next[i], wk)[0]]))

        # cost sensitivities
        obj = self.__cost(x, u)  # symbolic cost evaluation

        self.__jac_cost = ca.Function('q', [x, u],
                                      [ca.jacobian(obj, wk)]).map(self.__N)
        hess_cost = ca.Function('hess_cost', [x, u], [ca.hessian(obj, wk)[0]])
        self.__hess_cost = hess_cost.map(self.__N)

        # phase fixing sensitivities
        alpha = ca.MX.sym('alpha')
        x0star = ca.MX.sym('x0star', self.__nx, 1)
        phase_fix = self.__phase_fix_fun(alpha, x0star, x)
        self.__jac_phase_fix = ca.Function('jac_phase_fix', [alpha, x0star, x],
                                           [ca.jacobian(phase_fix, wk)])
        self.__hess_phase_fix = ca.Function('hess_phase_fix',
                                            [alpha, x0star, x],
                                            [ca.hessian(phase_fix, wk)[0]])

        # constraints sensitivities
        if self.__h is not None:
            if 'us' in self.__vars:
                constr = self.__h(x, u, us)  # symbolic constraint evaluation
                self.__jac_h = ca.Function('jac_h', [x, u, us],
                                           [ca.jacobian(constr, wk)]).map(
                                               self.__N)

            else:
                constr = self.__h(x, u)
                self.__jac_h = ca.Function(
                    'jac_h', [x, u], [ca.jacobian(constr, wk)]).map(self.__N)

        if self.__gnl is not None:
            constr = self.__gnl(x, u, us)
            self.__jac_g = ca.Function('jac_g', [x, u, us],
                                       [ca.jacobian(constr, wk)]).map(self.__N)
            self.__hess_g = []
            for i in range(self.__ns):
                self.__hess_g.append(
                    ca.Function('hess_g_' + str(i), [x, u, us],
                                [ca.hessian(constr[i], wk)[0]]))

        # hessian of the lagrangian
        hess_lag = hess_cost(x, u)
        lam_g = ct.MX.sym('lam_g', nx)
        hess_lag += sum([lam_g[i] * self.__hess_F[i](x, u) for i in range(nx)])
        lag_args = [x, u, lam_g]

        if 'us' in self.__vars:
            lam_s = ct.MX.sym('lam_s', self.__ns)
            hess_lag += sum([
                lam_s[i] * self.__hess_g[i](x, u, us) for i in range(self.__ns)
            ])
            lag_args += [us, lam_s]
            hess_lag += ca.hessian(
                0.5 * self.__reg_slack * ct.mtimes(us.T, us), wk)[0]

        self.__hess_lag = ca.Function('hess_lag_fun', [*lag_args],
                                      [hess_lag]).map(self.__N)

        return None
Пример #13
0
    def generate_solver(self, f, f0, g, lby = [], uby = [], lbg = [], ubg = [], p0 = [], \
            y0 = [], lam0 = [], qpoases_root=None, casadi_root=None, eigen_root=None, approximate_hessian=None, optlevel='-O2'):

        g_shape = g.shape

        if g_shape[1] != 1:
            raise Exception(
                'g must have shape (<>,1), you have {}.'.format(g_shape))

        ni = g_shape[0]
        self.ni = ni
        nv = self.nv
        np = self.np

        FSQP_INF = 1E12
        if lby == []:
            lby = -FSQP_INF * nmp.ones((nv, 1))

        if uby == []:
            uby = FSQP_INF * nmp.ones((nv, 1))

        if lbg == []:
            lbg = nmp.zeros((ni, 1))

        if ubg == []:
            ubg = nmp.zeros((ni, 1))

        if p0 == []:
            p0 = nmp.zeros((np, 1))

        if y0 == []:
            y0 = nmp.zeros((nv, 1))

        if lam0 == []:
            lam0 = nmp.zeros((2 * (ni + nv), 1))

        if not isinstance(lby, nmp.ndarray):
            raise Exception(
                'lby must be of type nmp.array, you have {}'.format(type(lby)))

        if not isinstance(uby, nmp.ndarray):
            raise Exception(
                'lbu must be of type nmp.array, you have {}'.format(type(uby)))

        if not isinstance(lbg, nmp.ndarray):
            raise Exception(
                'lbg must be of type nmp.array, you have {}'.format(type(lbg)))

        if not isinstance(ubg, nmp.ndarray):
            raise Exception(
                'ubg must be of type nmp.array, you have {}'.format(type(ubg)))

        lby_shape = lby.shape

        if lby_shape[0] != nv or lby_shape[1] != 1:
            raise Exception('lby must have shape (nv,1) = ({},1), you have ({},{})'\
                .format(nv, lby_shape[0], lby_shape[1]))

        uby_shape = uby.shape

        if uby_shape[0] != nv or uby_shape[1] != 1:
            raise Exception('uby must have shape (nv,1) = ({},1), you have ({},{})'\
                .format(nv, uby_shape[0], uby_shape[1]))

        lbg_shape = lbg.shape

        if lbg_shape[0] != ni or lbg_shape[1] != 1:
            raise Exception('lbg must have shape (ni,1) = ({},1), you have ({},{})'\
                .format(nv, lbg_shape[0], lbg_shape[1]))

        ubg_shape = ubg.shape

        if ubg_shape[0] != ni or ubg_shape[1] != 1:
            raise Exception('ubg must have shape (ni,1) = ({},1), you have ({},{})'\
                .format(ni, ubg_shape[0], ubg_shape[1]))

        p0_shape = p0.shape

        if p0_shape[0] != np or p0_shape[1] != 1:
            raise Exception('p0 must have shape (np,1) = ({},1), you have ({},{})'\
                .format(np, p0_shape[0], p0_shape[1]))

        y0_shape = y0.shape

        if y0_shape[0] != nv or y0_shape[1] != 1:
            raise Exception('y0 must have shape (nv,1) = ({},1), you have ({},{})'\
                .format(nv, y0_shape[0], y0_shape[1]))
        opts = dict(with_header=True)

        lam0_shape = lam0.shape

        if lam0_shape[0] != 2 * (ni + nv) or lam0_shape[1] != 1:
            raise Exception('lam0 must have shape (2*(ni+nv),1) = ({},1), you have ({},{})'\
                .format(ni, lam0_shape[0], lam0_shape[1]))

        y = self.y
        p = self.p
        lam = ca.SX.sym('lam', 2 * (ni + nv), 1)
        # multiplier structure: [ubg, lbg, ub, lb]

        # copy Eigen headers to solver folder
        cmd = 'mkdir -p {}'.format(self.opts['solver_name'])
        status = os.system(cmd)
        if status != 0:
            raise Exception('{} failed'.format(cmd))

        os.chdir(self.opts['solver_name'])

        ca_f = ca.Function('ca_f', [y, p], [f])
        ca_f.generate('ca_f', opts)
        print('compiling generated code for f...')
        cmd = 'gcc -fPIC -shared {} ca_f.c -o libca_f.so'.format(optlevel)
        status = os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))
        cmd = 'gcc -fPIC -shared {} ca_f.c -o ca_f.so'.format(optlevel)
        status = os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))

        ca_f0 = ca.Function('ca_f0', [y, p], [f0])
        ca_f0.generate('ca_f0', opts)
        print('compiling generated code for f0...')
        cmd = 'gcc -fPIC -shared {} ca_f0.c -o libca_f0.so'.format(optlevel)
        status = os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))
        cmd = 'gcc -fPIC -shared {} ca_f0.c -o ca_f0.so'.format(optlevel)
        status = os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))

        ca_dfdy = ca.Function('ca_dfdy', [y, p], [ca.jacobian(f, y)])
        ca_dfdy.generate('ca_dfdy', opts)
        print('compiling generated code for dfdy...')
        cmd = 'gcc -fPIC -shared {} ca_dfdy.c -o libca_dfdy.so'.format(
            optlevel)
        status = os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))
        cmd = 'gcc -fPIC -shared {} ca_dfdy.c -o ca_dfdy.so'.format(optlevel)
        status = os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))

        ca_g = ca.Function('ca_g', [y, p], [g])
        ca_g.generate('ca_g', opts)
        print('compiling generated code for g...')
        cmd = 'gcc -fPIC -shared {} ca_g.c -o libca_g.so'.format(optlevel)
        os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))
        cmd = 'gcc -fPIC -shared {} ca_g.c -o ca_g.so'.format(optlevel)
        os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))
        # ca_g.save("ca_g.casadi")

        print('compiling generated code for dgdy...')
        ca_dgdy = ca.Function('ca_dgdy', [y, p], [ca.jacobian(g, y)])
        ca_dgdy.generate('ca_dgdy', opts)
        cmd = 'gcc -fPIC -shared {} ca_dgdy.c -o libca_dgdy.so'.format(
            optlevel)
        os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))
        cmd = 'gcc -fPIC -shared {} ca_dgdy.c -o ca_dgdy.so'.format(optlevel)
        os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))

        # Lagrangian
        L = f + ca.dot(lam[2 * nv:2 * nv + ni], g) - ca.dot(
            lam[2 * nv + ni:2 * nv + 2 * ni], g) + ca.dot(
                lam[0:nv], y) - ca.dot(lam[nv:2 * nv], y)
        #TODO(andrea): bounds first here!

        print('compiling generated code for dLdyy...')
        ca_dLdyy = ca.Function('ca_dLdyy', [y, lam, p], [ca.hessian(L, y)[0]])
        ca_dLdyy.generate('ca_dLdyy', opts)
        cmd = 'gcc -fPIC -shared {} ca_dLdyy.c -o libca_dLdyy.so'.format(
            optlevel)
        os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))
        cmd = 'gcc -fPIC -shared {} ca_dLdyy.c -o ca_dLdyy.so'.format(optlevel)
        os.system(cmd)
        if status != 0:
            raise Exception('Command {} failed'.format(cmd))

        if approximate_hessian is not None:
            if approximate_hessian.shape != (nv, nv):
                raise Exception(
                    'Hessian approximation has wrong dimension! You have {} instead of {}'
                    .format(approximate_hessian.shape, (nv, nv)))

            print('compiling generated code for approximate Hessian M...')
            ca_M = ca.Function('ca_M', [y, p], [approximate_hessian])
            ca_M.generate('ca_M', opts)
            cmd = 'gcc -fPIC -shared -O3 ca_M.c -o libca_M.so'
            os.system(cmd)
            if status != 0:
                raise Exception('Command {} failed'.format(cmd))
            cmd = 'gcc -fPIC -shared -O3 ca_M.c -o ca_M.so'
            os.system(cmd)
            if status != 0:
                raise Exception('Command {} failed'.format(cmd))

            use_approximate_hessian = True
        else:
            use_approximate_hessian = False

        print('rendering templated C++ code...')
        env = Environment(loader=FileSystemLoader(
            os.path.dirname(os.path.abspath(__file__))))
        tmpl = env.get_template("templates/feasibleSQP.in.cpp")
        code = tmpl.render(solver_opts=self.opts,
                           NI=self.ni,
                           NV=self.nv,
                           NP=self.np,
                           use_approximate_hessian=use_approximate_hessian)
        with open('{}.cpp'.format(self.opts['solver_name']), "w+") as f:
            f.write(code)

        tmpl = env.get_template("templates/feasibleSQP.in.hpp")
        code = tmpl.render(solver_opts = self.opts, NV = nv, NI = ni, NP = np,\
            lby = lby, uby = uby, lbg = lbg, ubg = ubg, p0 = p0, y0 = y0, lam0 = lam0, use_approximate_hessian=use_approximate_hessian)

        with open('{}.hpp'.format(self.opts['solver_name']), "w+") as f:
            f.write(code)

        tmpl = env.get_template("templates/main.in.cpp")
        code = tmpl.render(solver_opts=self.opts,
                           use_approximate_hessian=use_approximate_hessian)
        with open('main.cpp', "w+") as f:
            f.write(code)

        print('rendering templated Makefile...')
        tmpl = env.get_template("templates/Makefile.in")
        build_params = dict()
        fsqp_root = os.path.dirname(os.path.abspath(__file__)) + '/../'
        if casadi_root is None:
            casadi_root = fsqp_root + 'external/casadi'
        if qpoases_root is None:
            qpoases_root = fsqp_root + 'external/qpOASES'
        if eigen_root is None:
            eigen_root = fsqp_root + 'external/eigen-git-mirror/Eigen'
        build_params['qpoases_root'] = qpoases_root
        build_params['casadi_root'] = casadi_root
        build_params['eigen_root'] = eigen_root
        build_params['solver_name'] = self.opts['solver_name']
        code = tmpl.render(build_params=build_params,
                           use_approximate_hessian=use_approximate_hessian)
        with open('Makefile', "w+") as f:
            f.write(code)

        cmd = 'make {}_shared'.format(self.opts['solver_name'])
        status = os.system(cmd)

        if status != 0:
            raise Exception('Command {} failed'.format(cmd))

        print('successfully generated solver!')
        os.chdir('..')

        # generate script to set LD_LIBRARY_PATH
        fsqp_root = os.path.dirname(os.path.abspath(__file__)) + '/../'
        with open(fsqp_root + '/feasible_sqp/paths.json', 'r') as f:
            library_paths = json.load(f)

        paths = ''
        cwd = os.getcwd()
        paths = paths + ':' + cwd + '/' + self.opts['solver_name']
        for key in library_paths:
            paths = paths + ':' + library_paths[key]

        with open('set_LD_LIBRARY_PATH.sh', 'w') as f:
            f.write('#!/bin/bash\nexport LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{}'.
                    format(paths))

        self.init()

        return
Пример #14
0
def objective(x,y,p,N,params):
    
    nPrimal = x.numel()               #number of primal sln
    nDual = y['lam_g'].numel()               #number of dual
    nParam = p.numel()                  #number of parameters
    
    #Loading initial states and controls
    data = spio.loadmat('CstrDistXinit.mat', squeeze_me = True)
    Xinit = data['Xinit']
    xf = Xinit[0:84]
    u_opt = Xinit[84:]

    #Model parameters
    NT = params['dist']['NT']             #Stages in column
    Uf = 0.3                          #Feed rate to CSTR F_0
    _,state,xdot,inputs = ColCSTR_model(Uf,params)
    sf = Function('sf',[state,inputs],[xdot])

    params['model']['sf'] = sf
    params['model']['xdot_val_rf_ss'] = xf
    params['model']['x'] = x
    params['model']['u_opt'] = u_opt
    
    nx = params['prob']['nx']
    nu = params['prob']['nu']
    nk = params['prob']['nk']
    tf = params['prob']['tf']
    ns = params['prob']['ns']
    h = params['prob']['h']

    #Preparing collocation matrices
    _, C, D, d = collocationSetup()
    params['prob']['d'] = d
    colloc = {'C':C,'D':D, 'h':h}
    params['colloc'] = colloc
    
    #NLP variable vector
    V = MX()                              #Decision variables (control + state)
    obj = 0                                                 #Objective function
    cons = MX()                                          #Nonlinear Constraints
    
    delta_time = 1
    alpha = 1
    beta = 1
    gamma = 1

    params['weight']['delta_time'] = delta_time
    params['weight']['alpha'] = alpha
    params['weight']['beta'] = beta
    params['weight']['gamma'] = gamma
    
    #Initial states and Controls
    data_init = spio.loadmat('CstrDistXinit.mat', squeeze_me = True)
    xf = data_init['Xinit'][0:84]
    u_opt = data_init['Xinit'][84:89]
    
    #"Lift" Initial conditions
    X0 = MX.sym('X0', nx)
    V = vertcat(V,X0)                     #Decision variables
    cons = vertcat(cons, X0 - x[0:nx,0]) #Nonlinear constraints
    cons_x0 = X0 - x[0:nx,0]

    #Formulating the NLP
    Xk = X0
    
    data = spio.loadmat('Qmax.mat', squeeze_me = True)
    params['Qmax'] = data['Qmax']
    ssoftc = 0
    
    for i in range(0,N):
       obj, cons, V, Xk, params, ssoftc = itPredHorizon_pf(Xk, V, cons, obj, params, i, ssoftc)

    V = vertcat(V[:])
    cons = vertcat(cons[:])

    #Objective function and constraint functions
    f = Function('f', [V], [obj], ['V'], ['objective'])
    c = Function('c', [V], [cons], ['V'], ['constraint'])
    cx0 = Function('cx0',[X0], [cons_x0], ['X0'], ['constraint'])

    #Constructing Lagrangian
    lag_expr = obj + mtimes(transpose(y['lam_g']),cons)
    g = Function('g',[V],[jacobian(obj,V),obj])
    lagr = Function('lagr', [V], [lag_expr], ['V'], ['lag_expr'])
    [H,gg] = hessian(lag_expr,V)
    H = Function('H',[V],[H,gg])
    [Hobj,gobj] = hessian(obj,V)
    Hobj = Function('Hobj', [V], [Hobj,gobj])
    J = Function('J',[V],[jacobian(cons,V),cons])
    Jp = Function('Jp',[X0],[jacobian(cons_x0,X0),cons_x0])

    #Evaluating functions at current point
    f = f(x)
    g = g(x)
    g = g[0]
    g = transpose(g)
    H = H(x)
    H = H[0]
    Lxp = H[0:nPrimal,0:nParam]
    J = J(x)
    J = J[0]
    Jtemp = DM.zeros((nDual,nParam))
    cp = Jp(x[0:nParam])
    cp = cp[0]
    Jtemp[0:nParam, 0:nParam] = cp.full()
    cp = Jtemp.sparse()
    cst = c(x)

    #Evaluation of objective function used for Greshgorin bound
    Hobj = Hobj(x)
    Hobj = Hobj[0].sparse()
    f = f.full()
    g = g.sparse()
    Lxp = Lxp.sparse()
    cst = cst.full()
   
    #Equality constraint
    Jeq = J
    dpe = cp

    return f, g, H, Lxp, cst, J, cp, Jeq, dpe, Hobj
Пример #15
0
def generate_c_code_external_cost(model, stage_type, opts):

    casadi_version = CasadiMeta.version()
    casadi_opts = dict(mex=False, casadi_int="int", casadi_real="double")

    if casadi_version not in (ALLOWED_CASADI_VERSIONS):
        casadi_version_warning(casadi_version)

    x = model.x
    p = model.p

    if isinstance(x, MX):
        symbol = MX.sym
    else:
        symbol = SX.sym

    if stage_type == 'terminal':
        suffix_name = "_cost_ext_cost_e_fun"
        suffix_name_hess = "_cost_ext_cost_e_fun_jac_hess"
        suffix_name_jac = "_cost_ext_cost_e_fun_jac"
        u = symbol("u", 0, 0)
        ext_cost = model.cost_expr_ext_cost_e
        custom_hess = model.cost_expr_ext_cost_custom_hess_e

    elif stage_type == 'path':
        suffix_name = "_cost_ext_cost_fun"
        suffix_name_hess = "_cost_ext_cost_fun_jac_hess"
        suffix_name_jac = "_cost_ext_cost_fun_jac"
        u = model.u
        ext_cost = model.cost_expr_ext_cost
        custom_hess = model.cost_expr_ext_cost_custom_hess

    elif stage_type == 'initial':
        suffix_name = "_cost_ext_cost_0_fun"
        suffix_name_hess = "_cost_ext_cost_0_fun_jac_hess"
        suffix_name_jac = "_cost_ext_cost_0_fun_jac"
        u = model.u
        ext_cost = model.cost_expr_ext_cost_0
        custom_hess = model.cost_expr_ext_cost_custom_hess_0

    # set up functions to be exported
    fun_name = model.name + suffix_name
    fun_name_hess = model.name + suffix_name_hess
    fun_name_jac = model.name + suffix_name_jac

    # generate expression for full gradient and Hessian
    full_hess, grad = hessian(ext_cost, vertcat(u, x))

    if custom_hess is not None:
        full_hess = custom_hess

    ext_cost_fun = Function(fun_name, [x, u, p], [ext_cost])
    ext_cost_fun_jac_hess = Function(fun_name_hess, [x, u, p],
                                     [ext_cost, grad, full_hess])
    ext_cost_fun_jac = Function(fun_name_jac, [x, u, p], [ext_cost, grad])

    # generate C code
    code_export_dir = opts["code_export_directory"]
    if not os.path.exists(code_export_dir):
        os.makedirs(code_export_dir)

    cwd = os.getcwd()
    os.chdir(code_export_dir)
    gen_dir = model.name + '_cost'
    if not os.path.exists(gen_dir):
        os.mkdir(gen_dir)
    gen_dir_location = "./" + gen_dir
    os.chdir(gen_dir_location)

    ext_cost_fun.generate(fun_name, casadi_opts)
    ext_cost_fun_jac_hess.generate(fun_name_hess, casadi_opts)
    ext_cost_fun_jac.generate(fun_name_jac, casadi_opts)

    os.chdir(cwd)
    return
Пример #16
0
 def spy_hessian(self, opti):
     import matplotlib.pylab as plt
     lag = opti.f + dot(opti.lam_g, opti.g)
     H = hessian(lag, opti.x)[0].sparsity()
     plt.spy(H)
     plt.title("Lagrange Hessian: " + H.dim(True))